code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table border="1" cellspacing="0" id="outputSample">
<colgroup><col width="80"><col></colgroup>
<thead>
<tr>
<th>Field Name</th>
<th>Value</th>
</tr>
</thead>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<th>%s</th>
<td><pre>%s</pre></td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Sample page.
"""
import cgi
import os
# Ensure that the fckeditor.py is included in your classpath
import fckeditor
# Tell the browser to render html
print "Content-Type: text/html"
print ""
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Sample</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h1>FCKeditor - Python - Sample 1</h1>
This sample displays a normal HTML form with an FCKeditor with full features
enabled.
<hr>
<form action="sampleposteddata.py" method="post" target="_blank">
"""
# This is the real work
try:
sBasePath = os.environ.get("SCRIPT_NAME")
sBasePath = sBasePath[0:sBasePath.find("_samples")]
oFCKeditor = fckeditor.FCKeditor('FCKeditor1')
oFCKeditor.BasePath = sBasePath
oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>"""
print oFCKeditor.Create()
except Exception, e:
print e
print """
<br>
<input type="submit" value="Submit">
</form>
"""
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| Python |
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = ""
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&Toolbar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| Python |
ImageAnalysis: 1.1.3
| Python |
import sys
from time import gmtime
year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()
bld = ((year - 2000) * 12 + mon - 1) * 100 + mday
rev = hour * 100 + min
print 'Your build and revision number for today is %d.%d.' % (bld, rev)
| Python |
'''
Created on 2009-6-12
@author: roamer
'''
import analysis.Statistics
import codeprocess.ParseCode
import analysis.Params
import analysis.Setting
import os
if __name__ == "__main__":
a = analysis.Statistics.Statistics()
a.StatisticsFiles()
a.SaveResult()
b = analysis.Statistics.BaseProcess()
b.Process()
b.SaveResult()
c = analysis.Params.Params()
c.ReadFile()
c.SaveResultToFile()
print "Process Done" | Python |
'''
Created on 2009-6-12
@author: roamer
'''
import mainprocess.MainProcess
if __name__ == "__main__":
#a = mainprocess.MainProcess.MainProcess("H,bd1,U,b90,H,2l0,2l0,5b0,o50")
#a = mainprocess.MainProcess.MainProcess("H,J,ig0,2l0,kf0")
#a = mainprocess.MainProcess.MainProcess("K,kt0,670,2n0")
#a = mainprocess.MainProcess.MainProcess("H,2f0,J,ab0,ab0")
#a = mainprocess.MainProcess.MainProcess("H,by0,O,2p0,2l0")
a = mainprocess.MainProcess.MainProcess("G,fh0")
a.Parse()
| Python |
'''
Created on 2009-6-13
@author: roamer
'''
from codeprocess import *
from analysis import *
import Util
import Image,os
class MainProcess(object):
def __init__(self,code):
self.__parser = ParseCode.ParseCode(code)
words = Statistics.Statistics().RestoreResult()
bases = Statistics.BaseProcess().RestoreResult()
params = Params.Params().RestoreResultFromFile()
self.Data = {"words" : words[Setting.Setting["ImgKey"]],"words_max_min" : words[Setting.Setting["ImgKey_Max_Min"]],"bases" : bases,"params" : params}
def Parse(self):
bases = self.__parser.Parse()
base_count = len(bases)
params = []
for i in range(0,base_count):
key = bases[i][0]
close_codes = self.__getClosedParams(key, i, base_count)
param_bak = list(close_codes)
for j in range(len(close_codes) - 1,-1,-1):
x1,y1,x2,y2 = self.Data["bases"][key]
a,c,b,d = close_codes[j]
x1 = a * x1 + c
y1 = b * y1 + d
x2 = a * x2 + c
y2 = b * y2 + d
a,b,c,d = bases[i][1]
if a > x1 or c + a < x2 or b > y1 or d + b < y2:
del close_codes[j]
if len(close_codes) == 0:
print "No filters were applied to %s,parameters count: %d" % (key,len(param_bak))
close_codes = param_bak
print "Candidate count for %s: %d" % (key,len(close_codes))
params.append((key,bases[i][1],close_codes))
ret = []
for param in params:
ret = self.__getPerm(ret, param)
final_result_set = []
print "Total candidates:",len(ret)
for word in ret:
a,b,c,d = self.__getIntersections(word)
im = self.CreateImage(word)
imggem = self.__getImgParam(im)
max_min = self.Data['words_max_min'][str((int(imggem[1][0] / Setting.Setting["Step"]) + 1) * Setting.Setting["Step"])]
inArea = 0
if imggem[1][1] >= max_min[0][0] and imggem[1][1] <= max_min[1][0] and imggem[1][2] >= max_min[0][1] and imggem[1][2] >= max_min[1][1]:
inArea = 0
else:
inArea = (15.5 - imggem[1][1])**2 + (15.5 - imggem[1][2])**2
final_result_set.append((a,b,c,d,inArea,imggem[1][0],im));
final_result_set.sort(cmp=MainProcess.__sortResult)
result_len = len(final_result_set)
for i in range(result_len - 1,0,-1):
del final_result_set[i]
if len(final_result_set) == 1:
final_result_set[0][6].show()
del final_result_set[0]
else:
print "No result!"
@staticmethod
def __sortResult(x,y):
point = 0
if x[0] > y[0]:
point += -25
elif x[0] < y[0]:
point += 25
if x[1] > y[1]:
point += -30
elif x[1] < y[1]:
point += 30
if x[2] > y[2]:
point += 10
elif x[2] < y[2]:
point += -10
if x[3] > y[3]:
point += -100
elif x[3] < y[3]:
point += 100
if x[4] > y[4]:
point += -5
elif x[4] < y[4]:
point += 5
if x[5] > y[5]:
point += 50
elif x[5] < y[5]:
point += -50
return -point
def __getImgParam(self,image):
black_count = 0;
x_sum = 0;
y_sum = 0;
x_size,y_size = image.size
minx = x_size
maxx = 0
miny = y_size
maxy = 0
for i in range(0,x_size):
for j in range(0,y_size):
pixel = image.getpixel((i,j))
if isinstance(pixel, tuple):
pixel = sum(pixel)
if pixel < 150:
black_count += 1
x_sum += i
y_sum += j
if(minx > i):
minx = i
if(maxx < i):
maxx = i
if(miny > j):
miny = j
if(maxy < j):
maxy = j
if black_count == 0:
black_count = 1
return ((minx,miny,maxx,maxy),(black_count,float(x_sum) / float(black_count),float(y_sum) / float(black_count)))
def __getIntersections(self,item):
if len(item) < 2:
return 0,0,1024,False
comb = Util.Util.Combination(item, 2)
ret = []
for x,y in comb:
com_area = 0
com_point = 0
total_area = 0
outside = False
a,b,c,d = self.Data['bases'][x[0]]
a1,b1,c1,d1 = self.Data['bases'][y[0]]
a = a * x[1][0] + x[1][1]
b = b * x[1][2] + x[1][3]
c = c * x[1][0] + x[1][1]
d = d * x[1][2] + x[1][3]
a1 = a1 * y[1][0] + y[1][1]
b1 = b1 * y[1][2] + y[1][3]
c1 = c1 * y[1][0] + y[1][1]
d1 = d1 * y[1][2] + y[1][3]
total_area = (c - a) * (d - b) + (c1 - a1) * (d1 - b1)
outside = any(map(lambda x: x < 0 or x >= 32,(a,b,c,d,a1,b1,c1,d1)))
intersection = (int(max(a,a1)) - 1,int(max(b,b1)) - 1,int(min(c,c1)) + 1,int(min(d,d1)) + 1)
if intersection[0] >= intersection[2] or intersection[1] >= intersection[3]:
com_area = 0
else:
com_area = (intersection[2] - intersection[0]) * (intersection[3] - intersection[1])
ima = Image.open(Setting.Setting['BaseFolder'] + os.sep + x[0] + Setting.Setting['ImgExt'])
imb = Image.open(Setting.Setting['BaseFolder'] + os.sep + y[0] + Setting.Setting['ImgExt'])
imx = Image.new("L",(32,32),0xFFFFFF)
imy = Image.new("L",(32,32),0xFFFFFF)
for i in range(0,32):
for j in range(0,32):
pixelx = ima.getpixel((i,j))
pixely = imb.getpixel((i,j))
if(isinstance(pixelx, tuple)):
pixelx = sum(pixelx)
if(isinstance(pixely, tuple)):
pixely = sum(pixely)
i1 = int(i * x[1][0] + x[1][1])
j1 = int(j * x[1][2] + x[1][3])
i2 = int(i * y[1][0] + y[1][1])
j2 = int(j * y[1][2] + y[1][3])
if pixelx < 150:
if i1 >= 0 and i1 < 32 and j1 >= 0 and j1 < 32:
imx.putpixel((i1,j1),pixelx)
if pixely < 150:
if i2 >= 0 and i2 < 32 and j2 >= 0 and j2 < 32:
imy.putpixel((i2,j2),pixely)
for i in range(intersection[0],intersection[2]):
for j in range(intersection[1],intersection[3]):
if i >= 0 and i < imx.size[0] and j >= 0 and j < imx.size[1] and i < imy.size[0] and j < imy.size[1]:
pixelx = imx.getpixel((i,j))
pixely = imy.getpixel((i,j))
if(isinstance(pixelx, tuple)):
pixelx = sum(pixelx)
if(isinstance(pixely, tuple)):
pixely = sum(pixely)
if pixelx < 150 and pixely < 150:
com_point += 1
ret.append((com_area,com_point,total_area,outside))
del ima,imb,imx,imy
interset_area,intersect_point,total_area,outside = reduce(lambda x,y: (x[0]+y[0],x[1]+y[1],x[2]+y[2],x[3] or y[3]), ret)
return (interset_area,intersect_point,int(total_area / 2 - interset_area),outside)
def CreateImage(self,param):
im = Image.new("L",(32,32),0xFFFFFF)
for item in param:
img = Image.open(Setting.Setting['BaseFolder'] + os.sep + item[0] + Setting.Setting['ImgExt'])
x,y = img.size
for i in range(0,x):
for j in range(0,y):
pixel = img.getpixel((i,j))
if(isinstance(pixel,tuple)):
pixel = sum(pixel)
if pixel > 255:
pixel = 255
a = int(i * item[1][0] + item[1][1])
b = int(j * item[1][2] + item[1][3])
if a > 0 and a < x and b > 0 and b < y:
if im.getpixel((a,b)) > pixel:
im.putpixel((a,b),pixel)
del img
return im
def __getPerm(self,ret,items):
a = []
key = items[0]
for item in items[2]:
if len(ret) == 0:
a.append([(key,item)])
else:
for i in ret:
i = list(i)
i.append((key,item))
a.append(i)
return a
def __getClosedParams(self,code,order,total):
codes = self.Data["params"][code]
closed_codes = filter(lambda x : x[1] == order and x[2] == total, codes)
if(len(closed_codes) == 0):
closed_codes = codes
ret = []
for i in closed_codes:
ps = i[0].split(',')
ret.append((float(ps[0]),float(ps[1]),float(ps[2]),float(ps[3])))
return self.__removeClosedParams(ret)
def __removeClosedParams(self,params):
count = len(params)
current = 0
while current < count:
index = current + 1
a,b,c,d = params[current]
while index < count:
a1,b1,c1,d1 = params[index]
if abs(a - a1) < 0.1 and abs(b - b1) < 3 and abs(c - c1) < 0.1 and abs(d - d1) < 3:
del params[index]
count -= 1
else:
index += 1
current += 1
return params
| Python |
'''
Created on 2009-6-19
@author: roamer
'''
class Util:
@staticmethod
def Combination(items, n=None):
if n is None:
n = len(items)
for i in range(len(items)):
v = items[i:i+1]
if n == 1:
yield v
else:
rest = items[i+1:]
for c in Util.Combination(rest, n-1):
yield v + c | Python |
'''
Created on 2009-6-12
@author: roamer
'''
__all__ = ['Util','MainProcess'] | Python |
'''
Created on 2009-6-12
@author: roamer
'''
import Image
import os
from Setting import *
from ValueStore import *
class Statistics:
def __init__(self):
self.__result = []
def __processOneImg(self,file):
black_count = 0;
x_sum = 0;
y_sum = 0;
img = Image.open(file)
x,y = img.size
for i in xrange(0,x):
for j in xrange(0,y):
pixel = img.getpixel((i,j))
if(isinstance(pixel,tuple)):
pixel = 255 - pixel[len(pixel) - 1]
if pixel < 100:
black_count += 1
x_sum += i
y_sum += j
#avoid empty image
if black_count == 0:
black_count = 1
name = file[file.rindex("\\")+1:file.rindex(".")]
self.__result.append((black_count,float(x_sum) / float(black_count),float(y_sum) / float(black_count),name))
def StatisticsFiles(self):
import glob,os
folder = Setting["ImgFolder"]
files = glob.glob(folder + os.sep + "*" + Setting['ImgExt'])
for i in files:
self.__processOneImg(i)
def SaveResult(self):
data = self.__result
step = Setting['Step']
index = 1
ret = {}
while (index - 1) * step < 1024:
minx = 32
miny = 32
maxx = 0
maxy = 0
start = (index - 1) * step
end = index * step
for item in data:
if(item[0] > start and item[0] <= end):
if minx > item[1]:
minx = item[1]
if miny > item[2]:
miny = item[2]
if maxx < item[1]:
maxx = item[1]
if maxy < item[2]:
maxy = item[2]
ret[str(end)] = ((minx,miny),(maxx,maxy))
index += 1
ValueStore.Save(Setting["Img"],{Setting["ImgKey"] : self.__result,Setting["ImgKey_Max_Min"] : ret})
def RestoreResult(self):
return ValueStore.Read(Setting["Img"])
class BaseProcess:
def __init__(self):
self.__result = {}
def Process(self):
import glob,os
folder = Setting["BaseFolder"]
files = glob.glob(folder + os.sep + "*" + Setting['ImgExt'])
for i in files:
self.__processOneImg(i)
def __processOneImg(self,file):
minx = 32
maxx = 0
miny = 32
maxy = 0
img = Image.open(file)
x,y = img.size
for i in xrange(0,x):
for j in xrange(0,y):
pixel = img.getpixel((i,j))
if(isinstance(pixel,tuple)):
pixel = 255 - pixel[len(pixel) - 1]
if pixel < 150:
if(minx > i):
minx = i
if(maxx < i):
maxx = i
if(miny > j):
miny = j
if(maxy < j):
maxy = j
name = file[file.rindex("\\")+1:file.rindex(".")]
self.__result[name] = (minx,miny,maxx,maxy)
def SaveResult(self):
ValueStore.Save(Setting["BaseKey"], self.__result)
def RestoreResult(self):
return ValueStore.Read(Setting["BaseKey"]) | Python |
'''
Created on 2009-6-12
@author: roamer
'''
import os
import shelve
from Setting import *
from ValueStore import *
class Params:
def __init__(self):
self.__fileData = {}
def ReadFile(self):
file = open(Setting['ParamFile'])
for line in file.readlines():
cols = [i for i in line.strip().split(' ') if i != '']
key = cols[0]
val = (cols[1],int(cols[2]),int(cols[3]))
if(self.__fileData.has_key(key)):
self.__fileData[key].append(val)
else:
self.__fileData[key] = [val]
file.close()
self.__removeDumplicate()
def SaveResultToFile(self):
ValueStore.Save(Setting['ParamKey'], self.__fileData)
def RestoreResultFromFile(self):
self.__fileData = ValueStore.Read(Setting['ParamKey'])
return self.__fileData
def __removeDumplicate(self):
for i in self.__fileData.keys():
self.__fileData[i] = sorted(self.__fileData[i])
for j in range(len(self.__fileData[i]) - 2,-1,-1):
if(self.__fileData[i][j] == self.__fileData[i][j+1]):
del self.__fileData[i][j]
| Python |
'''
Created on 2009-6-12
@author: roamer
'''
__all__ = ['Statistics','Params','Setting','ValueStore'] | Python |
'''
Created on 2009-6-12
@author: roamer
'''
import os
Setting = {
"DbFile" : os.sep.join(["..","data","result","mydb.db"]),
"ParamFile" : os.sep.join(['..','data','rawdata','param.txt']),
"ImgFolder" : os.sep.join(["..","data","images"]),
"ImgKey" : "ImgKey",
"ImgKey_Max_Min" : "ImgKey_Max_Min",
"Step" : 20,
"Img" : "Img",
"ImgExt" : ".bmp",
"BaseFolder" : os.sep.join(["..","data","base"]),
"BaseKey" : "BaseKey",
"ParamKey" : "ParamKey"
} | Python |
'''
Created on 2009-6-12
@author: roamer
'''
import shelve
from Setting import *
class ValueStore:
@staticmethod
def Read(key):
vals = shelve.open(Setting['DbFile'],"r")
val = vals[key]
vals.close()
return val
@staticmethod
def Save(key,val):
vals = shelve.open(Setting['DbFile'],"c")
vals[key] = val
vals.close()
| Python |
'''
Created on 2009-6-12
@author: roamer
'''
import re
_Dic_StructParams = {
'H':((0.0,0.8,0.0,1.0),(0.2,1.0,0.0,1.0)),
'J':((0.0,1.0,0.0,0.8),(0.0,1.0,0.2,1.0)),
'L':((0.0,1.0,0.0,1.0),(0.1,0.9,0.1,0.9)),
'G':((0.0,1.0,0.0,1.0),),
'K':((0.0,1.0,0.0,0.6),(0.0,1.0,0.2,0.8),(0.0,1.0,0.4,1.0)),
'U':((0.0,1.0,0.0,0.4),(0.0,1.0,0.2,0.6),(0.0,1.0,0.4,0.8),(0.0,1.0,0.6,1.0)),
'V':((0.0,1.0,0.0,0.5),(0.0,1.0,0.2,0.6),(0.0,1.0,0.4,0.8),(0.0,1.0,0.5,0.9),(0.0,1.0,0.6,1.0)),
'I':((0.0,0.6,0.0,1.0),(0.2,0.8,0.0,1.0),(0.4,1.0,0.0,1.0)),
'X':((0.0,0.4,0.0,1.0),(0.2,0.6,0.0,1.0),(0.4,0.8,0.0,1.0),(0.6,1.0,0.0,1.0)),
'M':((0.0,1.0,0.0,1.0),(0.1,0.9,0.1,1.0)),
'O':((0.0,1.0,0.0,1.0),(0.1,1.0,0.1,1.0)),
'P':((0.0,1.0,0.0,1.0),(0.1,1.0,0.0,0.9)),
'Q':((0.0,1.0,0.0,1.0),(0.1,1.0,0.1,0.9)),
'R':((0.0,1.0,0.0,1.0),(0.0,0.9,0.1,1.0)),
'N':((0.0,1.0,0.0,1.0),(0.1,0.9,0.0,0.9)),
'S':((0.0,1.0,0.0,0.6),(0.0,0.6,0.4,1.0),(0.4,1.0,0.4,1.0)),
'T':((0.0,0.6,0.0,0.6),(0.4,1.0,0.0,0.6),(0.0,0.6,0.4,1.0),(0.4,1.0,0.4,1.0)),
'W':((0.0,0.6,0.0,0.6),(0.4,1.0,0.0,0.6),(0.0,0.6,0.4,1.0),(0.4,1.0,0.4,1.0))
}
class ParseCode:
def __init__(self,code):
self.__code = code
def UnitCodeCount(self):
codes = self.__code.split(',')
return len(filter(lambda x: len(x) == 3, codes))
def CodeValid(self):
if not self.__check():
return False
ret,codes = self.__parse()
return len(codes) == 1
def __parse(self):
codes = self.__code.split(',')
ret = []
ret,codes = self.__doParse(0, (0.0,1.0,0.0,1.0), ret, codes)
return ret,codes
def __check(self):
codes = self.__code.split(',')
for i in codes:
if len(i) == 1:
if not i.isupper():
return False
elif len(i) == 3:
if re.match('[a-z0-9]{3}',i) is None:
return False
else:
return False
return True
def Parse(self):
if not self.__check():
return False
ret,codes = self.__parse()
if(len(codes) == 1):
return ret
else:
return False
def __doParse(self,index,zone,ret,codes):
param = _Dic_StructParams[codes[index]]
for i in xrange(0,len(param)):
x = index + i + 1
if(x >= len(codes)):
break
zone_i = param[i]
x_start = zone[0] + zone_i[0] * (zone[1] - zone[0])
x_end = zone[0] + zone_i[1] * (zone[1] - zone[0])
y_start = zone[2] + zone_i[2] * (zone[3] - zone[2])
y_end = zone[2] + zone_i[3] * (zone[3] - zone[2])
newZone = (x_start,x_end,y_start,y_end)
if(len(codes[x]) == 1):
ret,codes = self.__doParse(x, newZone, ret, codes)
else:
x1 = newZone[0] * 32.0
y1 = newZone[2] * 32.0
width1 = 32.0 * (newZone[1] - newZone[0])
height1 = 32.0 * (newZone[3] - newZone[2])
ret.append((codes[x],(x1,y1,width1,height1)))
for i in xrange(0,len(param)):
del codes[index + 1]
return ret,codes
| Python |
'''
Created on 2009-6-12
@author: roamer
'''
__all__ = ["ParseCode"] | Python |
income = 0.00
alg1 = 0.00
alg2 = 0.00
ausgaben = dict()
print('### FINANZPLANER ###\n')
income = float(input('Monatsgehalt: '))
# DEBUG
#print(type(income))
# DEBUG END
print("Dein Monatgehalt beträgt " + str(income) + ' €')
print("Nachfolgend werden deine monatlichen Ausgaben erfragt\n")
weiter = 1
while(weiter == 1):
weiter = 0
ausgabentext = input("Bezeichnung der Ausgabe: ")
ausgabenbetrag = float(input("monatlicher Betrag: "))
ausgaben[ausgabentext] = ausgabenbetrag
print(ausgaben)
abfrage = input("weitere Ausgaben erfassen? [j/n]: ")
if(abfrage == "j"):
weiter = 1
elif(abfrage == "n"):
weiter = 0
else:
print("Keine gültige Eingabe")
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# find-delete
# Copyright (C) 2010 Elton Pereira <eltonplima@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
import os
import commands
import sys
import datetime
import re
from optparse import OptionParser
import ConfigParser
class FindDelete():
def __init__(self):
VERSION = "0.1"
usage = "usage: %prog [options] arg"
self.CONF_FILE = "find-delete.conf"
self.search_paths = None # Store the search paths in configuration file.
self.ignore_paths = None # Store the ignore paths in configuration file.
self.delete_extensions = None # Store the extensions in configuration file.
self.backup_path = None
self.datetime_backup_path = None # Backup path with date and time.
self.has_files = False
self.searched_files = False # If false no search was also performed.
self.current_backup_path = None
self.backup_list = None # List of backups stored.
self.optParser = OptionParser(usage, version="%prog " + VERSION)
self.config = ConfigParser.RawConfigParser()
self.output_find = None # Store the output of the find command.
def _preserve_path(self, path):
split_path = re.split('/', path)
split_path.pop(len(split_path)-1) # remove the last element(file name).
new_path = ''
for i in split_path:
new_path = new_path + i + '/'
complete_path = self.current_backup_path + new_path
if not os.path.exists(complete_path):
try:
os.makedirs(complete_path)
except OSError as os_exception:
pass
return new_path
def _move_file(self, source, destination):
move_command = 'mv \"' + source + '\" \"' + destination + '\"'
return commands.getstatusoutput(move_command)[0]
def _copy_file(self, source, destination):
copy_command = 'cp \"' + source + '\" \"' + destination + '\"'
return commands.getstatusoutput(copy_command)[0]
def _find_file(self):
""" Search files based on patterns in configuration file(extensions)
"""
paths = ' '.join(i for i in self.search_paths)
i = 0
pattern_delete = ' '
for delete_extension in self.delete_extensions:
pattern_delete += ' -iname *.' + delete_extension
i += 1
if i < len(self.delete_extensions):
pattern_delete += ' -o'
find_command = None
if self.ignore_paths:
ignore = ''
for ignore_path in self.ignore_paths:
ignore += ' -path ' + ignore_path + ' -prune -o'
find_command = 'find ' + paths + ignore + ' -type f \(' +\
pattern_delete + ' \)'
else:
self.find_command = 'find ' + paths + ' -type f \(' +\
pattern_delete + ' \)'
self.output_find = commands.getoutput(find_command)
if self.output_find:
self.has_files = True
self.searched_files = True
def _list_backups(self):
""" Create a list of backups stored.
"""
list_backup_command = 'ls ' + self.backup_path
self.backup_list = commands.getoutput(list_backup_command)
def print_list_backups(self, option, opt, value, parser):
self._list_backups()
print(self.backup_list)
def _premanipulation_files(self):
""" Create the basic backup directories structure.
"""
if not self.searched_files:
self._find_file()
if not self.backup_path:
print('Please inform a backup directory in configuration file.')
sys.exit(1)
# No create backup if files not found.
if self.output_find:
# Create a backup dir.
if not os.path.exists(self.backup_path):
os.makedirs(self.backup_path)
self.datetime_backup_path = self.backup_path + '/' +\
datetime.datetime.now().strftime('%d%m%Y%H%M')
if not os.path.exists(self.datetime_backup_path):
os.makedirs(self.datetime_backup_path)
self.current_backup_path = self.datetime_backup_path
else:
print("No files to manipulate.")
sys.exit(0)
def _config_option_parser(self):
""" Configure the OptionParser.
"""
self.optParser.add_option(
"-f",
"--find",
action="callback",
callback=self.print_find_file,
help="Only find and display files."
)
self.optParser.add_option(
"-m",
"--move",
action="callback",
callback=self.move_files,
help="Moves the files insteat of deleting them."
)
self.optParser.add_option(
"-c",
"--copy",
action="callback",
callback=self.copy_files,
help="Copy the files insteat of deleting them."
)
self.optParser.add_option(
"-l",
"--list",
action="callback",
callback=self.print_list_backups,
help="List backups stored."
)
self.optParser.add_option(
"-p",
"--preserve",
action="store_true",
dest="preserve_path",
default=True,
help="Preserves the original path in backup dir(default option)."
)
self.optParser.add_option(
"-P",
"--dontpreserve",
action="store_false",
dest="preserve_path",
help="Don't preserves the original path in backup dir."
)
(options, args) = self.optParser.parse_args()
if not sys.argv[1:]:
self.optParser.print_help()
sys.exit(1)
def _config_config(self):
""" Configure the ConfigParser.
"""
path_config_file = os.getcwd() + '/' + self.CONF_FILE
if os.path.exists(path_config_file):
self.config.read(path_config_file)
try:
self.search_paths = self.config.get\
('search', 'paths').split(',')
self.ignore_paths = self.config.get\
('search', 'ignore_paths').split(',')
self.delete_extensions = self.config.get\
('delete', 'extensions').split(',')
if len(self.config.get('delete', 'backup_path').split(',')) > 1:
sys.stderr.write('Error in configuration file.' +
' The backup_path contain more one directory.\n')
sys.exit(1)
else:
self.backup_path = self.config.get('delete', 'backup_path')
except ConfigParser.NoSectionError as exception:
sys.stderr.write('%s\n' % exception.line)
sys.exit(1)
else:
sys.stderr.write('Configuration file not found in: %s\n' %\
path_config_file)
sys.exit(1)
def print_find_file(self, option, opt, value, parser):
""" Print the list of files found.
"""
if not self.searched_files:
self._find_file()
print(self.output_find)
def move_files(self, option, opt, value, parser):
self._premanipulation_files()
output_find_list = self.output_find.split('\n')
for file_found in output_find_list:
if parser.values.preserve_path:
preserve_path = self._preserve_path(file_found)
move_to = self.datetime_backup_path + preserve_path
if not os.path.isdir(file_found):
self._move_file(file_found, move_to)
else:
if not os.path.isdir(file_found):
self._move_file(file_found, self.datetime_backup_path)
def copy_files(self, option, opt, value, parser):
self._premanipulation_files()
output_find_list = self.output_find.split('\n')
for file_found in output_find_list:
if parser.values.preserve_path:
preserve_path = self._preserve_path(file_found)
copy_to = self.datetime_backup_path + preserve_path
if not os.path.isdir(file_found):
self._copy_file(file_found, copy_to)
else:
if not os.path.isdir(file_found):
self._copy_file(file_found, self.datetime_backup_path)
def configure(self):
self._config_config()
self._config_option_parser()
if __name__ == "__main__":
f = FindDelete()
f.configure()
| Python |
"""Author: Sean McKiernan (Mekire)
Purpose: Exploring A* pathfinding.
License: Free for everyone and anything (no warranty expressed or implied)
Module: main.py
Overview: Primary driver for entire program.
Classes:
Control(object):
Methods:
__init__(self)
event_loop(self)
game_loop(self)
Functions:
main()"""
import sys,os
import pygame as pg
import interface
class Control(object):
"""Driver class for the whole program."""
def __init__(self):
self.Screen = pg.display.get_surface()
self.done = False
self.Clock = pg.time.Clock()
self.fps = 50
self.State = interface.Interface()
def event_loop(self):
"""Check event queue and pass events to states as necessary."""
for event in pg.event.get():
self.keys = pg.key.get_pressed()
if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:
self.done = True
if self.State.mode != "RUN":
self.State.get_event(event)
def game_loop(self):
"""Main game loop of entire program."""
while not self.done:
self.event_loop()
self.State.update(self.Screen)
self.Clock.tick_busy_loop(self.fps)
pg.display.flip()
###
def main():
"""Initialize the display and create an instance of Control."""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.init()
pg.display.set_caption("A* Demonstration")
pg.display.set_mode((440,280))
RunIt = Control()
RunIt.game_loop()
pg.quit();sys.exit()
####
if __name__ == "__main__":
main()
| Python |
"""Module: solver.py
Overview:
Contains the astar algorithm itself which can be used completely
independent of the rest of the program.
Gobal Constants:
ADJACENTS
HEURISTICS
Functions:
rook(x,y)
queen(x,y)
knight(x,y)
Classes:
Star(object):
Methods:
__init__(self)
setup(self)
get_neighbors(self)
follow_current_path(self)
get_path(self,cell)
evaluate(self)"""
import pygame as pg
ADJACENTS = {"rook" : [(1,0),(-1,0),(0,1),(0,-1)],
"queen" : [(1,0),(-1,0),(0,1),(0,-1),(1,1),(1,-1),(-1,1),(-1,-1)],
"knight" : [(1,-2),(1,2),(-1,-2),(-1,2),(2,1),(2,-1),(-2,1),(-2,-1)]}
def rook(x,y):
"""Optimum rook distance heuristic."""
return x+y
def queen(x,y):
"""Optimum queen distance heuristic."""
return max(x,y)
def knight(x,y):
"""Knight distance heuristic."""
return max((x//2+x%2),(y//2+y%2))
HEURISTICS = {"rook" : rook,
"queen" : queen,
"knight" : knight}
class Star(object):
"""This class is the astar algorithm itself. The goal is to make it
flexible enough that it can be used in isolation."""
def __init__(self,start,end,move_type,barriers):
"""Arguments start and end are coordinates to start solving from and to.
move_type is a string cooresponding to the keys of the ADJACENTS and
HEURISTICS constant dictionaries. barriers is a set of cells which the
agent is not allowed to occupy."""
self.start,self.end = start,end
self.moves = ADJACENTS[move_type]
self.heuristic = HEURISTICS[move_type]
self.barriers = barriers
self.setup()
def setup(self):
"""Initialize sets,dicts and others"""
self.closed_set = set((self.start,)) #Set of cells already evaluated
self.open_set = set() #Set of cells to be evaluated.
self.came_from = {} #Used to reconstruct path once solved.
self.gx = {self.start:0} #Cost from start to current position.
self.hx = {} #Optimal estimate to goal based on heuristic.
self.fx = {} #Distance-plus-cost heuristic function.
self.current = self.start
self.current = self.follow_current_path()
self.solution = []
self.solved = False
def get_neighbors(self):
"""Find adjacent neighbors with respect to how our agent moves."""
neighbors = set()
for (i,j) in self.moves:
check = (self.current[0]+i,self.current[1]+j)
if check not in (self.barriers|self.closed_set):
neighbors.add(check)
return neighbors
def follow_current_path(self):
"""In the very common case of multiple points having the same heuristic
value, this function makes sure that points on the current path take
presidence. This is most obvious when trying to use astar on an
obstacle free grid."""
next_cell = None
for cell in self.get_neighbors():
tentative_gx = self.gx[self.current]+1
if cell not in self.open_set:
self.open_set.add(cell)
tentative_best = True
elif cell in self.gx and tentative_gx < self.gx[cell]:
tentative_best = True
else:
tentative_best = False
if tentative_best:
x,y = abs(self.end[0]-cell[0]),abs(self.end[1]-cell[1])
self.came_from[cell] = self.current
self.gx[cell] = tentative_gx
self.hx[cell] = self.heuristic(x,y)
self.fx[cell] = self.gx[cell]+self.hx[cell]
if not next_cell or self.fx[cell]<self.fx[next_cell]:
next_cell = cell
return next_cell
def get_path(self,cell):
"""Recursively reconstruct the path. No real need to do it recursively."""
if cell in self.came_from:
self.solution.append(cell)
self.get_path(self.came_from[cell])
def evaluate(self):
"""Core logic for executing the astar algorithm."""
if self.open_set and not self.solved:
for cell in self.open_set:
if (self.current not in self.open_set) or (self.fx[cell]<self.fx[self.current]):
self.current = cell
if self.current == self.end:
self.get_path(self.current)
self.solved = True
self.open_set.discard(self.current)
self.closed_set.add(self.current)
self.current = self.follow_current_path()
elif not self.solution:
self.solution = "NO SOLUTION" | Python |
"""Module: interface.py
Overview: The core of the GUI for the A* demo.
Classes:
Interface(object):
Methods:
__init__(self)
make_background(self)
reset(self,full=True)
setup_barriers(self)
render_text(self,specific=None)
get_target(self)
get_event(self,event)
left_button_clicked(self)
right_button_clicked(self)
hotkeys(self,event)
toggle_animate(self)
toggle_piece(self,ind=None)
add_barriers(self)
update(self,Surf)
found_solution(self)
fill_cell(self,cell,color,Surf)
center_number(self,cent,string,color,Surf)
draw(self,Surf)
draw_solve(self,Surf)
draw_start_end_walls(self,Surf)
draw_messages(self,Surf)"""
import pygame as pg
import solver
class Interface(object):
def __init__(self):
self.animate = False
self.options = ("rook","queen","knight")
self.piece_type = "rook"
self.cell_size = (20,20)
self.image = self.make_background()
self.reset()
self.font = pg.font.SysFont("arial",13)
self.rendered = {}
self.render_text()
def make_background(self):
"""Create grid image. Currently screen and cell size are hardcoded."""
image = pg.Surface((440,280)).convert()
image.set_colorkey((255,0,255))
image.fill((255,0,255),(20,20,400,240))
for i in range(21):
image.fill((255,0,0),(20+20*i,20,2,242))
for i in range(13):
image.fill((255,0,0),(20,20+20*i,400,2))
return image
def reset(self,full=True):
"""Allows both completely resetting the grid or resetting to an
unsolved state."""
if full:
self.mode = "START"
self.start_cell = None
self.goal_cell = None
self.barriers = self.setup_barriers()
self.Solver = None
self.time_end = self.time_start = 0.0
self.solution = []
else:
self.Solver = None
self.mode = "BARRIER"
def setup_barriers(self):
"""Initialize the boundary borders. Borders must be two cells thick to
prevent knight pieces from leaving the grid."""
self.add_barrier = False
self.del_barrier = False
barriers = set()
for i in range(-1,23):
for j in (-1,0,13,14):
barriers.add((i,j))
for j in range(-1,15):
for i in (-1,0,21,22):
barriers.add((i,j))
return barriers
def render_text(self,specific=None):
"""Prerender text messages. By default all are rendered. Single messages
can be rerendered by passing a key corresponding to the below dictionary."""
def render_each(specific,text_dict):
msg,loc = text_dict[specific]
rend = self.font.render(msg,1,(255,255,255))
rect = pg.Rect(rend.get_rect(topleft=loc))
self.rendered[specific] = [rend,rect]
text = {"START" : ["Place your start point:",(10,1)],
"GOAL" : ["Place your goal:",(10,1)],
"BARRIER" : ["Draw your walls or press spacebar to solve:",(10,1)],
"ENTER" : ["Press 'Enter' to restart.",(10,1)],
"RESET" : ["Press 'i' to reset.",(150,1)],
"ANIM" : ["Animation: {}".format(["Off","On"][self.animate]),(340,1)],
"MOVE" : ["Move type: {}".format(self.piece_type.capitalize()),(320,263)],
"TIME" : ["Time (ms): {}".format(self.time_end-self.time_start),(100,263)],
"FAILED" : ["No solution.",(20,263)],
"SOLVED" : ["Steps: {}".format(len(self.solution)),(20,263)]}
if specific:
render_each(specific,text)
else:
for specific in text:
render_each(specific,text)
def get_target(self):
"""Find both the exact mouse position and its position in graph cells."""
self.mouse = pg.mouse.get_pos()
self.target = (self.mouse[0]//self.cell_size[0],self.mouse[1]//self.cell_size[1])
def get_event(self,event):
"""Receives events from the control class and passes them along as appropriate."""
self.get_target()
if event.type == pg.MOUSEBUTTONDOWN:
hit = pg.mouse.get_pressed()
if hit[0]:
self.left_button_clicked()
elif hit[2]:
self.right_button_clicked()
elif event.type == pg.MOUSEBUTTONUP:
self.add_barrier = False
self.del_barrier = False
elif event.type == pg.KEYDOWN:
self.hotkeys(event)
def left_button_clicked(self):
"""Left mouse button functionality for get_event method."""
if pg.Rect(20,20,400,240).collidepoint(self.mouse):
if self.mode == "START":
if self.target != self.goal_cell and self.target not in self.barriers:
self.start_cell = self.target
self.mode = ("BARRIER" if self.goal_cell else "GOAL")
elif self.mode == "GOAL":
if self.target != self.start_cell and self.target not in self.barriers:
self.goal_cell = self.target
self.mode = "BARRIER"
elif self.mode == "BARRIER":
self.add_barrier = True
elif self.rendered["MOVE"][1].collidepoint(self.mouse):
self.toggle_piece()
elif self.rendered["ANIM"][1].collidepoint(self.mouse):
self.toggle_animate()
elif self.mode == "BARRIER" and self.rendered["BARRIER"][1].collidepoint(self.mouse):
self.mode = "RUN"
elif self.mode in ("SOLVED","FAILED"):
if self.rendered["ENTER"][1].collidepoint(self.mouse):
self.reset()
elif self.rendered["RESET"][1].collidepoint(self.mouse):
self.reset(False)
def right_button_clicked(self):
"""Right mouse button functionality for get_event method."""
if self.mode != "RUN":
if self.target == self.start_cell:
self.start_cell = None
self.mode = "START"
elif self.target == self.goal_cell:
self.goal_cell = None
self.mode = ("GOAL" if self.start_cell else "START")
elif self.mode == "BARRIER":
self.del_barrier = True
def hotkeys(self,event):
"""Keyboard functionality for get_event method."""
if event.key in (pg.K_1,pg.K_2,pg.K_3):
self.toggle_piece(int(event.unicode)-1)
elif event.key == pg.K_d:
self.toggle_animate()
elif self.mode == "BARRIER" and event.key == pg.K_SPACE:
self.mode = "RUN"
elif self.mode in ("SOLVED","FAILED"):
if event.key == pg.K_RETURN:
self.reset()
elif event.key == pg.K_i:
self.reset(False)
def toggle_animate(self):
"""Turns animation mode on and off."""
if self.mode != "RUN":
self.animate = not self.animate
self.render_text("ANIM")
def toggle_piece(self,ind=None):
"""Change to next piece or to a specific piece if ind is supplied."""
if self.mode != "RUN":
if not ind:
ind = (self.options.index(self.piece_type)+1)%len(self.options)
self.piece_type = self.options[ind]
self.render_text("MOVE")
def add_barriers(self):
"""Controls both adding and deleting barrier cells with the mouse."""
if self.mode == "BARRIER":
self.get_target()
if pg.Rect(20,20,400,240).collidepoint(self.mouse):
if self.target not in (self.start_cell,self.goal_cell):
if self.add_barrier:
self.barriers.add(self.target)
elif self.del_barrier:
self.barriers.discard(self.target)
def update(self,Surf):
"""Primary update logic control flow for the GUI."""
self.add_barriers()
if self.mode == "RUN":
if not self.Solver:
self.time_start = pg.time.get_ticks()
self.Solver = solver.Star(self.start_cell,self.goal_cell,self.piece_type,self.barriers)
if self.animate:
self.Solver.evaluate()
else:
while not self.Solver.solution:
self.Solver.evaluate()
if self.Solver.solution:
self.found_solution()
if self.mode != "RUN" or self.animate:
self.draw(Surf)
def found_solution(self):
"""Sets appropriate mode when solution is found (or failed)."""
self.time_end = pg.time.get_ticks()
if self.Solver.solution == "NO SOLUTION":
self.mode = "FAILED"
else:
self.solution = self.Solver.solution
self.mode = "SOLVED"
self.render_text("SOLVED")
self.render_text("TIME")
def fill_cell(self,cell,color,Surf):
"""Fills a single cell given coordinates, color, and a target Surface."""
loc = cell[0]*self.cell_size[0],cell[1]*self.cell_size[1]
Surf.fill(color,(loc,self.cell_size))
return pg.Rect(loc,self.cell_size)
def center_number(self,cent,string,color,Surf):
"""Used for centering numbers on cells."""
rend = self.font.render(string,1,color)
rect = pg.Rect(rend.get_rect(center=cent))
rect.move_ip(1,1)
Surf.blit(rend,rect)
def draw(self,Surf):
"""Calls draw functions in the appropraite order."""
Surf.fill(0)
self.draw_solve(Surf)
self.draw_start_end_walls(Surf)
Surf.blit(self.image,(0,0))
self.draw_messages(Surf)
def draw_solve(self,Surf):
"""Draws while solving (if animate is on) and once solved."""
if self.mode in ("RUN","SOLVED","FAILED"):
for cell in self.Solver.closed_set:
self.fill_cell(cell,(255,0,255),Surf)
if self.mode == "SOLVED":
for i,cell in enumerate(self.solution):
cent = self.fill_cell(cell,(0,255,0),Surf).center
self.center_number(cent,str(len(self.solution)-i),(0,0,0),Surf)
def draw_start_end_walls(self,Surf):
"""Draw endpoints and barriers."""
if self.start_cell:
self.fill_cell(self.start_cell,(255,255,0),Surf)
if self.goal_cell:
cent = self.fill_cell(self.goal_cell,(0,0,255),Surf).center
if self.mode == "SOLVED":
self.center_number(cent,str(len(self.solution)),(255,255,255),Surf)
for cell in self.barriers:
self.fill_cell(cell,(255,255,255),Surf)
def draw_messages(self,Surf):
"""Draws the text (not including cell numbers)."""
for key in [self.mode,"MOVE","ANIM"]:
try:
Surf.blit(*self.rendered[key])
except KeyError:
pass
if self.mode in ("SOLVED","FAILED"):
for rend in ("TIME","RESET","ENTER"):
Surf.blit(*self.rendered[rend]) | Python |
'''
This file handles all the request related to fabric operation
'''
import os
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext.db import djangoforms
import django
from django import http
from util import respond
from form import FabricTypeForm
from dao import FabricType
def index(request):
user = users.GetCurrentUser()
fabrics = db.GqlQuery('SELECT * FROM FabricType ORDER BY addDate DESC')
return respond(request, user, 'fabric/list', {'fabrics': fabrics})
def edit(request, fabric_id):
user = users.GetCurrentUser()
if user is None:
return http.HttpResponseForbidden('You must be signed in to add or edit a user')
fabric = None
if fabric_id:
fabric = FabricType.get(db.Key.from_path(FabricType.kind(), int(fabric_id)))
if fabric is None:
return http.HttpResponseNotFound('No user exists with that key (%r)' %fabric_id)
form = FabricTypeForm(data=request.POST or None, instance=fabric)
if not request.POST:
return respond(request, user, 'fabric/modify', {'form': form, 'fabric': fabric})
errors = form.errors
if not errors:
try:
fabric = form.save(commit=False)
except ValueError, err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, user, 'fabricmodify', {'form': form, 'fabric': fabric})
fabric.put()
return http.HttpResponseRedirect('/fabric')
def new(request):
return edit(request, None)
def delete(request, fabric_id):
if fabric_id:
fabric = FabricType.get(db.Key.from_path(FabricType.kind(), int(fabric_id)))
if fabric is None:
return http.HttpResponseNotFound('No user exists with that key (%r)' %fabric_id)
else:
fabric.delete()
return http.HttpResponseRedirect('/fabric')
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License atROOT_URLCONF
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for this project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles' # i.e., Mountain View
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Whether to append trailing slashes to URLs.
APPEND_SLASH = False
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hubba-hubba'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
)
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
)
| Python |
from google.appengine.ext.db import djangoforms
#local import
from dao import *
class UserForm(djangoforms.ModelForm):
class Meta:
model = LocalUsers
exclude = ['joinDate']
class FabricTypeForm(djangoforms.ModelForm):
class Meta:
model = FabricType
exclude = ['addDate']
class StatisticForm(djangoforms.ModelForm):
class Meta:
model = Statistic
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
urlpatterns = patterns(
'',
#index page
(r'^$', 'indexcontroller.index'),
#nav
(r'^nav$', 'indexcontroller.nav'),
#user controller
(r'^user$', 'usercontroller.index'),
(r'^user/new$', 'usercontroller.new'),
(r'^user/edit/(\d+)$', 'usercontroller.edit'),
(r'^fabric/delete/(\d+)$', 'usercontroller.delete'),
#fabric controller
(r'^fabric$', 'fabriccontroller.index'),
(r'^fabric/new$', 'fabriccontroller.new'),
(r'^fabric/edit/(\d+)$', 'fabriccontroller.edit'),
(r'^fabric/delete/(\d+)$', 'fabriccontroller.delete'),
#statistic controller
(r'^statistic$', 'statisticcontroller.index'),
(r'^statistic/new$', 'statisticcontroller.new'),
(r'^statistic/edit/(\d+)$', 'statisticcontroller.edit'),
(r'^statistic/delete/(\d+)$', 'statisticcontroller.delete'),
)
| Python |
'''
This file handles all the request from index page
'''
import os
from google.appengine.api import users
from util import *
def index(request):
user = users.GetCurrentUser()
return respond(request, user, 'index', None)
def nav(request):
user = users.GetCurrentUser()
return respond(request, user, 'navigation', None)
| Python |
'''
This file handles all the request related to in and out operation
'''
import os
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext.db import djangoforms
import django
from django import http
#local import
from form import StatisticForm
from util import respond
from dao import LocalUsers
def index(request):
user = users.GetCurrentUser()
statistics = db.GqlQuery('SELECT * FROM Statistic ORDER BY addDate DESC')
return respond(request, user, 'statistic/list', {'statistics': statistics})
def edit(request, statistic_id):
user = users.GetCurrentUser()
if user is None:
return http.HttpResponseForbidden('You must be signed in to add or edit a statistic')
statistic = None
if statistic_id:
statistic = Statistic.get(db.Key.from_path(Statistic.kind(), int(statistic_id)))
if statistic is None:
return http.HttpResponseNotFound('No statistic exists with that key (%r)' %statistic_id)
form = StatisticForm(data=request.POST or None, instance=statistic)
if not request.POST:
return respond(request, user, 'statistic/modify', {'form': form, 'statistic': statistic})
errors = form.errors
if not errors:
try:
statistic = form.save(commit=False)
except ValueError, err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, user, 'statistic/modify', {'form': form, 'statistic': statistic})
localuser.put()
return http.HttpResponseRedirect('/statistic')
def new(request):
return edit(request, None)
def delete(request, statistic_id):
if statistic_id:
statistic = Statistic.get(db.Key.from_path(Statistic.kind(), int(statistic_id)))
if statistic is None:
return http.HttpResponseNotFound('No user exists with that key (%r)' %statistic_id)
else:
statistic.delete()
return http.HttpResponseRedirect('/statistic')
| Python |
'''
This file handles all the request related to user operation
'''
import os
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext.db import djangoforms
import django
from django import http
#local import
from form import UserForm
from util import respond
from dao import LocalUsers
def index(request):
user = users.GetCurrentUser()
localusers = db.GqlQuery('SELECT * FROM LocalUsers ORDER BY joinDate DESC')
return respond(request, user, 'user/list', {'localusers': localusers})
def edit(request, user_id):
user = users.GetCurrentUser()
if user is None:
return http.HttpResponseForbidden('You must be signed in to add or edit a user')
localuser = None
if user_id:
localuser = LocalUsers.get(db.Key.from_path(LocalUsers.kind(), int(user_id)))
if localuser is None:
return http.HttpResponseNotFound('No user exists with that key (%r)' %user_id)
form = UserForm(data=request.POST or None, instance=localuser)
if not request.POST:
return respond(request, user, 'user/modify', {'form': form, 'localuser': localuser})
errors = form.errors
if not errors:
try:
localuser = form.save(commit=False)
except ValueError, err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, user, 'user/modify', {'form': form, 'localuser': localuser})
localuser.put()
return http.HttpResponseRedirect('/user')
def new(request):
return edit(request, None)
def delete(request, user_id):
if user_id:
localuser = LocalUsers.get(db.Key.from_path(LocalUsers.kind(), int(user_id)))
if localuser is None:
return http.HttpResponseNotFound('No user exists with that key (%r)' %user_id)
else:
localuser.delete()
return http.HttpResponseRedirect('/user')
| Python |
import os
import django
from django import shortcuts
from google.appengine.api import users
def respond(request, user, template, params=None):
"""Helper to render a response, passing standard stuff to the response.
Args:
request: The request object.
user: The User object representing the current user; or None if nobody
is logged in.
template: The template name; '.html' is appended automatically.
params: A dict giving the template parameters; modified in-place.
Returns:
Whatever render_to_response(template, params) returns.
Raises:
Whatever render_to_response(template, params) raises.
"""
if params is None:
params = {}
if user:
params['user'] = user
params['sign_out'] = users.CreateLogoutURL('/')
params['is_admin'] = (users.IsCurrentUserAdmin() and
'Dev' in os.getenv('SERVER_SOFTWARE'))
else:
params['sign_in'] = users.CreateLoginURL(request.path)
if not template.endswith('.html'):
template += '.html'
return shortcuts.render_to_response(template, params)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bootstrap for running a Django app under Google App Engine.
The site-specific code is all in other files: settings.py, urls.py,
models.py, views.py. And in fact, only 'settings' is referenced here
directly -- everything else is controlled from there.
"""
# Standard Python imports.
import os
import sys
import logging
import __builtin__
# Google App Hosting imports.
from google.appengine.ext.webapp import util
from django.conf import settings
'''This is important to set setting's target to None'''
settings._target = None
import pickle
sys.modules['cPickle'] = pickle
# Enable info logging by the app (this is separate from appserver's
# logging).
logging.getLogger().setLevel(logging.INFO)
# Force sys.path to have our own directory first, so we can import from it.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# Must set this env var *before* importing any part of Django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# Make sure we can import Django. We may end up needing to do this
# little dance, courtesy of Google third-party versioning hacks. Note
# that this patches up sys.modules, so all other code can just use
# "from django import forms" etc.
try:
from django import v0_96 as django
except ImportError:
pass
# Import the part of Django that we use here.
import django.core.handlers.wsgi
def main():
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
from google.appengine.ext import db
class LocalUsers(db.Model):
displayName = db.StringProperty(required = True)
user = db.UserProperty()
isAuthorized = db.BooleanProperty()
joinDate = db.DateProperty(auto_now_add = True)
class FabricType(db.Model):
localUser = db.ReferenceProperty(LocalUsers)
displayName = db.StringProperty(required = True)
defaultSalePrice = db.FloatProperty()
defaultBuyPrice = db.FloatProperty()
addDate = db.DateTimeProperty(auto_now_add = True)
class Statistic(db.Model):
localUser = db.ReferenceProperty(LocalUsers)
fabricType = db.ReferenceProperty(FabricType)
meter = db.FloatProperty()
price = db.FloatProperty()
type = db.StringProperty(choices=('in', 'out'))
addDate = db.DateTimeProperty(auto_now_add = True)
| Python |
# fill this with data from os.urandom(64)
COOKIE_KEY = ''
FACEBOOK_KEY = ''
FACEBOOK_SECRET = ''
# to enable, set to analytics id: 'UA-12345678-9'
GOOGLE_ANALYTICS = ''
TWITTER_KEY = 'pCUVbTMmjUgFdUte2OpKw'
TWITTER_SECRET = '2ZkmX9uZXViXRTvHulBMTBo7XFpSygyyxAWRKUz0'
DROPBOX_KEY = ''
DROPBOX_SECRET = ''
GOOGLE_SITE_VERIFICATION = ''
GOOGLE_KEY = ''
GOOGLE_SECRET = ''
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import with_statement
import datetime
import logging
import re
from google.appengine.api import images
from google.appengine.ext import blobstore
from google.appengine.ext import db
import cache
import hashlib
import urllib
import utils
import webapp2
class DerefModel(db.Model):
def get_key(self, prop_name):
return getattr(self.__class__, prop_name).get_value_for_datastore(self)
class DerefExpando(db.Expando):
def get_key(self, prop_name):
return getattr(self.__class__, prop_name).get_value_for_datastore(self)
USER_SOURCE_FACEBOOK = 'facebook'
USER_SOURCE_GOOGLE = 'google'
USER_SOURCE_TWITTER = 'twitter'
USER_SOURCE_CHOICES = [
USER_SOURCE_FACEBOOK,
USER_SOURCE_GOOGLE,
]
USER_SOCIAL_NETWORKS = [
USER_SOURCE_FACEBOOK,
USER_SOURCE_TWITTER,
]
USER_BACKUP_DROPBOX = 'dropbox'
USER_BACKUP_GOOGLE_DOCS = 'google_docs'
USER_BACKUP_NETWORKS = [
USER_BACKUP_DROPBOX,
USER_BACKUP_GOOGLE_DOCS,
]
class User(db.Model):
name = db.StringProperty(required=True, indexed=False)
lname = db.StringProperty(indexed=True)
email = db.EmailProperty()
register_date = db.DateTimeProperty(auto_now_add=True)
last_active = db.DateTimeProperty(auto_now=True)
token = db.StringProperty(required=True, indexed=False)
chars = db.IntegerProperty(required=True, default=0)
words = db.IntegerProperty(required=True, default=0)
sentences = db.IntegerProperty(required=True, default=0)
first_entry = db.DateTimeProperty()
last_entry = db.DateTimeProperty()
entry_days = db.IntegerProperty(required=True, default=0)
# all frequencies are per week
freq_entries = db.FloatProperty(required=True, default=0.)
freq_chars = db.FloatProperty(required=True, default=0.)
freq_words = db.FloatProperty(required=True, default=0.)
freq_sentences = db.FloatProperty(required=True, default=0.)
# these two properties will be deleted
source = db.StringProperty(choices=USER_SOURCE_CHOICES)
uid = db.StringProperty()
google_id = db.StringProperty()
allowed_data = db.IntegerProperty(required=True, default=50 * 2 ** 20) # 50 MB default
used_data = db.IntegerProperty(required=True, default=0)
journal_count = db.IntegerProperty(required=True, default=0)
entry_count = db.IntegerProperty(required=True, default=0)
facebook_id = db.StringProperty()
facebook_enable = db.BooleanProperty(indexed=False)
facebook_token = db.StringProperty(indexed=False)
twitter_id = db.StringProperty()
twitter_enable = db.BooleanProperty(indexed=False)
twitter_key = db.StringProperty(indexed=False)
twitter_secret = db.StringProperty(indexed=False)
dropbox_id = db.StringProperty(indexed=False)
dropbox_enable = db.BooleanProperty(indexed=False)
dropbox_token = db.StringProperty(indexed=False)
google_docs_enable = db.BooleanProperty(indexed=False)
google_docs_token = db.StringProperty(indexed=False)
def count(self):
if self.entry_count and self.last_entry and self.first_entry:
self.entry_days = (self.last_entry - self.first_entry).days + 1
weeks = self.entry_days / 7.
self.freq_entries = self.entry_count / weeks
self.freq_chars = self.chars / weeks
self.freq_words = self.words / weeks
self.freq_sentences = self.sentences / weeks
else:
self.entry_days = 0
self.freq_entries = 0.
self.freq_chars = 0.
self.freq_words = 0.
self.freq_sentences = 0.
def set_dates(self):
self.last_entry = datetime.datetime.now()
if not self.first_entry:
self.first_entry = self.last_entry
def __str__(self):
return str(self.name)
def gravatar(self, size=''):
if size:
size = '&s=%s' %size
if not self.email:
email = ''
else:
email = self.email.lower()
return 'http://www.gravatar.com/avatar/' + hashlib.md5(email).hexdigest() + '?d=mm%s' %size
def can_upload(self):
return self.bytes_remaining > 0
@property
def bytes_remaining(self):
return self.allowed_data - self.used_data
@property
def sources(self):
return [i for i in USER_SOURCE_CHOICES if getattr(self, '%s_id' %i)]
class UserFollowersIndex(db.Model):
users = db.StringListProperty()
class UserFollowingIndex(db.Model):
users = db.StringListProperty()
class Journal(db.Model):
ENTRIES_PER_PAGE = 5
MAX_JOURNALS = 10
name = db.StringProperty(required=True)
created_date = db.DateTimeProperty(auto_now_add=True)
last_entry = db.DateTimeProperty()
first_entry = db.DateTimeProperty()
last_modified = db.DateTimeProperty(auto_now=True)
entry_count = db.IntegerProperty(required=True, default=0)
entry_days = db.IntegerProperty(required=True, default=0)
chars = db.IntegerProperty(required=True, default=0)
words = db.IntegerProperty(required=True, default=0)
sentences = db.IntegerProperty(required=True, default=0)
# all frequencies are per week
freq_entries = db.FloatProperty(required=True, default=0.)
freq_chars = db.FloatProperty(required=True, default=0.)
freq_words = db.FloatProperty(required=True, default=0.)
freq_sentences = db.FloatProperty(required=True, default=0.)
def count(self):
if self.entry_count:
self.entry_days = (self.last_entry - self.first_entry).days + 1
weeks = self.entry_days / 7.
self.freq_entries = self.entry_count / weeks
self.freq_chars = self.chars / weeks
self.freq_words = self.words / weeks
self.freq_sentences = self.sentences / weeks
else:
self.entry_days = 0
self.freq_entries = 0.
self.freq_chars = 0.
self.freq_words = 0.
self.freq_sentences = 0.
def __unicode__(self):
return unicode(self.name)
@property
def pages(self):
if self.entry_count == 0:
return 1
return (self.entry_count + self.ENTRIES_PER_PAGE - 1) / self.ENTRIES_PER_PAGE
def url(self, page=1):
if page > 1:
return webapp2.uri_for('view-journal', username=self.key().parent().name(), journal_name=self.name, page=page)
else:
return webapp2.uri_for('view-journal', username=self.key().parent().name(), journal_name=self.name)
RENDER_TYPE_HTML = 'HTML'
RENDER_TYPE_MARKDOWN = 'markdown'
RENDER_TYPE_RST = 'reStructured Text'
RENDER_TYPE_TEXT = 'plain text'
RENDER_TYPE_TEXTILE = 'textile'
CONTENT_TYPE_CHOICES = [
RENDER_TYPE_MARKDOWN,
RENDER_TYPE_RST,
RENDER_TYPE_TEXT,
RENDER_TYPE_TEXTILE,
]
class EntryContent(db.Model):
subject = db.StringProperty()
tags = db.StringListProperty()
text = db.TextProperty()
rendered = db.TextProperty(default='')
markup = db.StringProperty(required=True, indexed=False, choices=CONTENT_TYPE_CHOICES, default=RENDER_TYPE_TEXT)
class Entry(db.Model):
date = db.DateTimeProperty(auto_now_add=True)
created = db.DateTimeProperty(required=True, auto_now_add=True)
last_edited = db.DateTimeProperty(required=True, auto_now=True)
content = db.IntegerProperty(required=True) # key id of EntryContent
blobs = db.StringListProperty()
chars = db.IntegerProperty(required=True, default=0)
words = db.IntegerProperty(required=True, default=0)
sentences = db.IntegerProperty(required=True, default=0)
dropbox_rev = db.StringProperty(indexed=False)
google_docs_id = db.StringProperty(indexed=False)
WORD_RE = re.compile("[A-Za-z0-9']+")
SENTENCE_RE = re.compile("[.!?]+")
@property
def time(self):
if not self.date.hour and not self.date.minute and not self.date.second:
return ''
else:
return self.date.strftime('%I:%M %p')
@property
def content_key(self):
return db.Key.from_path('EntryContent', long(self.content), parent=self.key())
@property
def blob_keys(self):
return [db.Key.from_path('Blob', long(i), parent=self.key()) for i in self.blobs]
ACTIVITY_NEW_JOURNAL = 1
ACTIVITY_NEW_ENTRY = 2
ACTIVITY_FOLLOWING = 3
ACTIVITY_SAVE_ENTRY = 4
ACTIVITY_CHOICES = [
ACTIVITY_NEW_JOURNAL,
ACTIVITY_NEW_ENTRY,
ACTIVITY_FOLLOWING,
ACTIVITY_SAVE_ENTRY,
]
ACTIVITY_ACTION = {
ACTIVITY_NEW_JOURNAL: 'created a new journal',
ACTIVITY_NEW_ENTRY: 'started a new journal entry',
ACTIVITY_FOLLOWING: 'started following',
ACTIVITY_SAVE_ENTRY: 'updated a journal entry',
}
class Activity(DerefModel):
RESULTS = 25
user = db.StringProperty(required=True)
img = db.StringProperty(indexed=False)
date = db.DateTimeProperty(auto_now_add=True)
action = db.IntegerProperty(required=True, choices=ACTIVITY_CHOICES)
object = db.ReferenceProperty()
def get_action(self):
r = ACTIVITY_ACTION[self.action]
if self.action == ACTIVITY_FOLLOWING:
name = self.get_key('object').name()
r += ' <a href="%s">%s</a>' %(webapp2.uri_for('user', username=name), name)
return r
@staticmethod
def create(user, action, object):
a = Activity(user=user.name, img=user.gravatar('30'), action=action, object=object)
ar = db.put_async(a)
receivers = cache.get_followers(user.name)
receivers.append(user.name)
ar.get_result()
ai = ActivityIndex(parent=a, receivers=receivers)
ai.put()
class ActivityIndex(db.Model):
receivers = db.StringListProperty()
date = db.DateTimeProperty(auto_now_add=True)
BLOB_TYPE_IMAGE = 1
BLOB_TYPE_PDF = 2
BLOB_TYPE_CHOICES = [
BLOB_TYPE_IMAGE,
BLOB_TYPE_PDF,
]
class Blob(DerefExpando):
MAXSIZE = 4 * 2 ** 20 # 4MB
blob = blobstore.BlobReferenceProperty(required=True)
type = db.IntegerProperty(required=True, choices=BLOB_TYPE_CHOICES)
name = db.StringProperty(indexed=False)
size = db.IntegerProperty()
url = db.StringProperty(indexed=False)
def get_url(self, size=None, name=None):
if self.type == BLOB_TYPE_IMAGE:
if not self.url:
self.url = images.get_serving_url(self.blob)
url = self.url
if size:
url += '=s' + size
return url
else:
kwargs = {'key': self.get_key('blob')}
if name is True:
name = self.name
if name:
kwargs['name'] = name
return webapp2.uri_for('blob', **kwargs)
RENDER_TYPE_CHOICES = [
RENDER_TYPE_HTML,
RENDER_TYPE_MARKDOWN,
RENDER_TYPE_RST,
RENDER_TYPE_TEXT,
RENDER_TYPE_TEXTILE,
]
class BlogEntry(db.Model):
ENTRIES_PER_PAGE = 10
date = db.DateTimeProperty(required=True, auto_now_add=True)
draft = db.BooleanProperty(required=True, default=True)
markup = db.StringProperty(required=True, indexed=False, choices=RENDER_TYPE_CHOICES, default=RENDER_TYPE_MARKDOWN)
title = db.StringProperty(required=True, indexed=False, default='Title')
text = db.TextProperty(default='')
rendered = db.TextProperty(default='')
user = db.StringProperty(required=True)
avatar = db.StringProperty()
slug = db.StringProperty(indexed=False)
@property
def url(self):
if not self.slug:
self.slug = str(self.key().id())
return webapp2.uri_for('blog-entry', entry=self.slug)
class Config(db.Expando):
pass
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import StringIO
import logging
import os
import os.path
import re
import unicodedata
from django.utils import html
#suwonchon: it's not support GAE module no longer.
#from google.appengine.api import conversion
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext.webapp import template
import jinja2
import webapp2
import cache
import facebook
import filters
import models
import settings
# Fix sys.path
import fix_path
fix_path.fix_sys_path()
from docutils.core import publish_parts
import dropbox
#suwonchon: add conversion module.
import conversion
import gdata.data
import gdata.docs.client
import gdata.docs.data
import gdata.docs.service
import gdata.gauth
import markdown
import rst_directive
import textile
env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
env.filters.update(filters.filters)
def prefetch_refprops(entities, *props):
fields = [(entity, prop) for entity in entities for prop in props]
ref_keys_with_none = [prop.get_value_for_datastore(x) for x, prop in fields]
ref_keys = filter(None, ref_keys_with_none)
ref_entities = dict((x.key(), x) for x in db.get(set(ref_keys)))
for (entity, prop), ref_key in zip(fields, ref_keys_with_none):
if ref_key is not None:
prop.__set__(entity, ref_entities[ref_key])
return entities
def render(_template, context):
return env.get_template(_template).render(**context)
NUM_PAGE_DISP = 5
def page_list(page, pages):
if pages <= NUM_PAGE_DISP:
return range(1, pages + 1)
else:
# this page logic could be better
half = NUM_PAGE_DISP / 2
if page < 1 + half:
page = half + 1
elif page > pages - half:
# have to handle even and odd NUM_PAGE_DISP differently
page = pages - half + abs(NUM_PAGE_DISP % 2 - 1)
page -= half
return range(page, page + NUM_PAGE_DISP)
def render_options(options, default=None):
ret = ''
for i in options:
if i == default:
d = ' selected'
else:
d = ''
ret += '<option%s>%s</option>' %(d, i)
return ret
def markup(text, format):
if format == models.RENDER_TYPE_HTML:
return text
elif format == models.RENDER_TYPE_TEXT:
return html.linebreaks(html.escape(text))
elif format == models.RENDER_TYPE_MARKDOWN:
return markdown.Markdown().convert(text)
elif format == models.RENDER_TYPE_TEXTILE:
return textile.textile(text)
elif format == models.RENDER_TYPE_RST:
warning_stream = cStringIO.StringIO()
parts = publish_parts(text, writer_name='html4css1',
settings_overrides={
'_disable_config': True,
'embed_stylesheet': False,
'warning_stream': warning_stream,
'report_level': 2,
})
rst_warnings = warning_stream.getvalue()
if rst_warnings:
logging.warn(rst_warnings)
return parts['html_body']
else:
raise ValueError('invalid markup')
def deunicode(s):
return unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')
def slugify(s):
s = deunicode(s)
return re.sub('[^a-zA-Z0-9-]+', '-', s).strip('-')
def convert_html(f, title, entries, output_type='application/pdf'):
try:
html = render('pdf.html', {'title': title, 'entries': entries})
asset = conversion.Asset('text/html', deunicode(html))
conversion_request = conversion.Conversion(asset, output_type)
result = conversion.convert(conversion_request)
if result and result.assets:
for i in result.assets:
f.write(i.data)
return None
else:
logging.error('Conversion error: %s', result.error_text)
return result.error_text
except Exception, e:
logging.error('Conversion exception: %s', e)
return str(e)
def absolute_uri(*args, **kwargs):
return 'http://' + os.environ['HTTP_HOST'] + webapp2.uri_for(*args, **kwargs)
def dropbox_session():
return dropbox.session.DropboxSession(settings.DROPBOX_KEY, settings.DROPBOX_SECRET, 'app_folder')
def dropbox_url():
sess = dropbox_session()
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token, oauth_callback=absolute_uri('dropbox'))
return request_token, url
def dropbox_token(request_token):
sess = dropbox_session()
return sess.obtain_access_token(request_token)
def dropbox_put(access_token, path, content, rev=None):
tokens = dict([i.split('=', 1) for i in access_token.split('&')])
sess = dropbox_session()
sess.set_token(tokens['oauth_token'], tokens['oauth_token_secret'])
client = dropbox.client.DropboxClient(sess)
return client.put_file(path, content, parent_rev=rev)
GOOGLE_DATA_SCOPES = ['https://docs.google.com/feeds/']
def google_url():
next = absolute_uri('google')
return gdata.gauth.generate_auth_sub_url(next, GOOGLE_DATA_SCOPES, session=True)
def google_session_token(token):
single_use_token = gdata.auth.AuthSubToken()
single_use_token.set_token_string(token)
docs_service = gdata.docs.service.DocsService()
return docs_service.upgrade_to_session_token(single_use_token)
def google_revoke(token):
docs_service = gdata.docs.service.DocsService()
docs_service.SetAuthSubToken(token, GOOGLE_DATA_SCOPES)
docs_service.RevokeAuthSubToken()
def google_folder(service, name, subfolder=None):
folder_name_query = gdata.docs.service.DocumentQuery(categories=['folder'], params={'showfolders': 'true'})
folder_name_query['title-exact'] = 'true'
folder_name_query['title'] = name
folder_feed = service.Query(folder_name_query.ToUri())
if folder_feed.entry:
return folder_feed.entry[0]
return service.CreateFolder(name, subfolder)
def google_upload(token, path, content, entryid=None):
docs_service = gdata.docs.service.DocsService()
docs_service.SetAuthSubToken(token, GOOGLE_DATA_SCOPES)
file_dir, file_name = path.rsplit('/', 1)
f = StringIO.StringIO(content)
ms = gdata.data.MediaSource(file_handle=f, content_type='text/html', content_length=len(content), file_name=file_name)
if not entryid:
j_folder = google_folder(docs_service, 'journalr')
dest_folder = google_folder(docs_service, file_dir, j_folder)
entry = docs_service.Upload(ms, file_name, folder_or_uri=dest_folder)
return entry.resourceId.text
else:
client = gdata.docs.client.DocsClient()
authsub = gdata.gauth.AuthSubToken(token)
client.auth_token = authsub
entry = client.GetDoc(entryid)
entry = client.Update(entry, media_source=ms)
| Python |
"""Tests for query.py."""
import datetime
import os
import unittest
from .google_imports import datastore_errors
from .google_imports import users
from .google_test_imports import datastore_stub_util
from . import model
from . import query
from . import tasklets
from . import test_utils
class QueryTests(test_utils.NDBTest):
def setUp(self):
super(QueryTests, self).setUp()
# Create class inside tests because kinds are cleared every test.
global Foo
class Foo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
tags = model.StringProperty(repeated=True)
self.create_entities()
the_module = query
def create_entities(self):
self.joe = Foo(name='joe', tags=['joe', 'jill', 'hello'], rate=1)
self.joe.put()
self.jill = Foo(name='jill', tags=['jack', 'jill'], rate=2)
self.jill.put()
self.moe = Foo(name='moe', rate=1)
self.moe.put()
def testBasicQuery(self):
q = query.Query(kind='Foo')
q = q.filter(Foo.name >= 'joe').filter(Foo.name <= 'moe').filter()
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testOrderedQuery(self):
q = query.Query(kind='Foo')
q = q.order(Foo.rate).order().order(-Foo.name)
res = list(q)
self.assertEqual(res, [self.moe, self.joe, self.jill])
def testQueryAttributes(self):
q = query.Query(kind='Foo')
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, None)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
key = model.Key('Barba', 'papa')
q = query.Query(kind='Foo', ancestor=key)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
q = q.filter(Foo.rate == 1)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
self.assertEqual(q.orders, None)
q = q.order(-Foo.name)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
expected_order = [('name', query._DESC)]
self.assertEqual(query._orders_to_orderings(q.orders), expected_order)
def testQueryRepr(self):
q = Foo.query()
self.assertEqual(repr(q), "Query(kind='Foo')")
q = Foo.query(ancestor=model.Key('Bar', 1))
self.assertEqual(repr(q), "Query(kind='Foo', ancestor=Key('Bar', 1))")
# Let's not specify what it should show for filters and orders,
# just test that it doesn't blow up.
q1 = q.filter(Foo.rate == 1, Foo.name == 'x')
repr(q1)
q2 = q1.order(-Foo.rate)
repr(q2)
# App and namespace.
q3 = Foo.query(app='a', namespace='ns')
self.assertEqual(repr(q3), "Query(kind='Foo', app='a', namespace='ns')")
def testRunToQueue(self):
qry = Foo.query()
queue = tasklets.MultiFuture()
qry.run_to_queue(queue, self.conn).check_success()
results = queue.get_result()
self.assertEqual(len(results), 3)
self.assertEqual(results[0][2], self.joe)
self.assertEqual(results[1][2], self.jill)
self.assertEqual(results[2][2], self.moe)
def testRunToQueueError(self):
self.ExpectWarnings()
qry = Foo.query(Foo.name > '', Foo.rate > 0)
queue = tasklets.MultiFuture()
fut = qry.run_to_queue(queue, self.conn)
self.assertRaises(datastore_errors.BadRequestError, fut.check_success)
self.assertRaises(datastore_errors.BadRequestError, queue.check_success)
def testModernQuerySyntax(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
@classmethod
def seniors(cls, min_age, min_rank):
q = cls.query().filter(cls.age >= min_age, cls.rank <= min_rank)
q = q.order(cls.name, -cls.age)
return q
q = Employee.seniors(42, 5)
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
self.assertEqual(query._orders_to_orderings(q.orders),
[('name', query._ASC), ('Age', query._DESC)])
def testAndQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.AND(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.AND(Employee.age >= 42, Employee.rank <= 5))
self.assertEqual(q.filters,
query.ConjunctionNode(
query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)))
def testOrQuery(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
q = Employee.query().filter(query.OR(Employee.age >= 42))
self.assertEqual(q.filters, query.FilterNode('Age', '>=', 42))
q = Employee.query(query.OR(Employee.age < 42, Employee.rank > 5))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('Age', '<', 42),
query.FilterNode('rank', '>', 5)))
def testEmptyInFilter(self):
self.ExpectWarnings()
class Employee(model.Model):
name = model.StringProperty()
for arg in [], (), set(), frozenset():
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(q.filters, query.FalseNode())
self.assertNotEqual(q.filters, 42)
f = iter(q).has_next_async()
self.assertRaises(datastore_errors.BadQueryError, f.check_success)
def testSingletonInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['xyzzy']))
self.assertEqual(q.filters, query.FilterNode('name', '=', 'xyzzy'))
self.assertNotEqual(q.filters, 42)
e = Employee(name='xyzzy')
e.put()
self.assertEqual(q.get(), e)
def testInFilter(self):
class Employee(model.Model):
name = model.StringProperty()
q = Employee.query(Employee.name.IN(['a', 'b']))
self.assertEqual(q.filters,
query.DisjunctionNode(
query.FilterNode('name', '=', 'a'),
query.FilterNode('name', '=', 'b')))
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
self.assertEqual(list(q), [a, b])
def testInFilterArgTypes(self):
class Employee(model.Model):
name = model.StringProperty()
a = Employee(name='a')
a.put()
b = Employee(name='b')
b.put()
for arg in ('a', 'b'), set(['a', 'b']), frozenset(['a', 'b']):
q = Employee.query(Employee.name.IN(arg))
self.assertEqual(list(q), [a, b])
def testInFilterWithNone(self):
class Employee(model.Model):
# Try a few different property types, to get a good mix of what
# used to fail.
name = model.StringProperty()
boss = model.KeyProperty()
age = model.IntegerProperty()
date = model.DateProperty()
a = Employee(name='a', age=42L)
a.put()
bosskey = model.Key(Employee, 'x')
b = Employee(boss=bosskey, date=datetime.date(1996, 1, 31))
b.put()
keys = set([a.key, b.key])
q1 = Employee.query(Employee.name.IN(['a', None]))
self.assertEqual(set(e.key for e in q1), keys)
q2 = Employee.query(Employee.boss.IN([bosskey, None]))
self.assertEqual(set(e.key for e in q2), keys)
q3 = Employee.query(Employee.age.IN([42, None]))
self.assertEqual(set(e.key for e in q3), keys)
q4 = Employee.query(Employee.date.IN([datetime.date(1996, 1, 31), None]))
self.assertEqual(set(e.key for e in q4), keys)
def testQueryExceptions(self):
self.ExpectWarnings()
q = Foo.query(Foo.name > '', Foo.rate > 0)
f = q.fetch_async()
self.assertRaises(datastore_errors.BadRequestError, f.check_success)
def testQueryUnindexedFails(self):
# Shouldn't be able to query for unindexed properties
class SubModel(model.Model):
booh = model.IntegerProperty(indexed=False)
class Emp(model.Model):
name = model.StringProperty()
text = model.TextProperty()
blob = model.BlobProperty()
sub = model.StructuredProperty(SubModel)
struct = model.StructuredProperty(Foo, indexed=False)
local = model.LocalStructuredProperty(Foo)
Emp.query(Emp.name == 'a').fetch() # Should pass
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.text.IN(['a', 'b']))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.blob == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub == SubModel(booh=42))
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.sub.booh == 42)
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.struct == Foo(name='a'))
# TODO: Make this fail? See issue 89. http://goo.gl/K4gbY
# Currently StructuredProperty(..., indexed=False) has no effect.
## self.assertRaises(datastore_errors.BadFilterError,
## lambda: Emp.struct.name == 'a')
self.assertRaises(datastore_errors.BadFilterError,
lambda: Emp.local == Foo(name='a'))
def testFilterRepr(self):
class Employee(model.Model):
name = model.StringProperty()
f = (Employee.name == 'xyzzy')
self.assertEqual(repr(f), "FilterNode('name', '=', 'xyzzy')")
def testNodeComparisons(self):
a = query.FilterNode('foo', '=', 1)
b = query.FilterNode('foo', '=', 1)
c = query.FilterNode('foo', '=', 2)
d = query.FilterNode('foo', '<', 1)
# Don't use assertEqual/assertNotEqual; we want to be sure that
# __eq__ or __ne__ is really called here!
self.assertTrue(a == b)
self.assertTrue(a != c)
self.assertTrue(b != d)
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: a <= b)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: a >= b)
x = query.AND(a, b, c)
y = query.AND(a, b, c)
z = query.AND(a, d)
self.assertTrue(x == y)
self.assertTrue(x != z)
def testQueryForStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
b1 = Bar(name='b1', foo=Foo(name='nest', rate=1, tags=['tag1', 'tag2']))
b1.put()
b2 = Bar(name='b2', foo=Foo(name='best', rate=2, tags=['tag2', 'tag3']))
b2.put()
b3 = Bar(name='b3', foo=Foo(name='rest', rate=2, tags=['tag2']))
b3.put()
q1 = Bar.query().order(Bar.name)
self.assertEqual(q1.fetch(10), [b1, b2, b3])
q2 = Bar.query().filter(Bar.foo.rate >= 2)
self.assertEqual(q2.fetch(10), [b2, b3])
q3 = q2.order(Bar.foo.rate, -Bar.foo.name, +Bar.foo.rate)
self.assertEqual(q3.fetch(10), [b3, b2])
def testQueryForStructuredPropertyErrors(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
# Can't use inequalities.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo < Foo())
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo != Foo())
# Can't use an empty value.
self.assertRaises(datastore_errors.BadFilterError,
lambda: Bar.foo == Foo())
def testQueryForStructuredPropertyIn(self):
self.ExpectWarnings()
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
a = Bar(name='a', foo=Foo(name='a'))
a.put()
b = Bar(name='b', foo=Foo(name='b'))
b.put()
self.assertEqual(
Bar.query(Bar.foo.IN((Foo(name='a'), Foo(name='b')))).fetch(),
[a, b])
self.assertEqual(Bar.query(Bar.foo.IN([Foo(name='a')])).fetch(), [a])
# An IN query with empty argument can be constructed but not executed.
q = Bar.query(Bar.foo.IN(set()))
self.assertRaises(datastore_errors.BadQueryError, q.fetch)
# Passing a non-sequence argument should fail.
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 42)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, None)
self.assertRaises(datastore_errors.BadArgumentError,
Bar.foo.IN, 'not a sequence')
def testQueryForNestedStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
class Bak(model.Model):
bar = model.StructuredProperty(Bar)
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bak = model.StructuredProperty(Bak)
rank = model.IntegerProperty()
b1 = Baz(bar=Bar(foo=Foo(name='a')))
b1.put()
b2 = Baz(bar=Bar(foo=Foo(name='b')), bak=Bak(bar=Bar(foo=Foo(name='c'))))
b2.put()
q1 = Baz.query().filter(Baz.bar.foo.name >= 'a')
self.assertEqual(q1.fetch(10), [b1, b2])
q2 = Baz.query().filter(Baz.bak.bar.foo.name >= 'a')
self.assertEqual(q2.fetch(10), [b2])
def testQueryForWholeStructure(self):
class Employee(model.Model):
name = model.StringProperty()
rank = model.IntegerProperty()
class Manager(Employee):
report = model.StructuredProperty(Employee, repeated=True)
reports_a = []
for i in range(3):
e = Employee(name=str(i), rank=i)
e.put()
e.key = None
reports_a.append(e)
reports_b = []
for i in range(3, 6):
e = Employee(name=str(i), rank=0)
e.put()
e.key = None
reports_b.append(e)
mgr_a = Manager(name='a', report=reports_a)
mgr_a.put()
mgr_b = Manager(name='b', report=reports_b)
mgr_b.put()
mgr_c = Manager(name='c', report=reports_a + reports_b)
mgr_c.put()
res = list(Manager.query(Manager.report == Employee(name='1', rank=1)))
self.assertEqual(res, [mgr_a, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0)))
self.assertEqual(res, [mgr_a, mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='3')))
self.assertEqual(res, [mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='1')))
self.assertEqual(res, [])
res = list(Manager.query(Manager.report == Employee(rank=0, name='0'),
Manager.report == Employee(rank=1, name='1')))
self.assertEqual(res, [mgr_a, mgr_c])
q = Manager.query(Manager.report == Employee(rank=2, name='2'))
res = list(q)
self.assertEqual(res, [mgr_a, mgr_c])
res = list(q.iter(offset=1))
self.assertEqual(res, [mgr_c])
res = list(q.iter(limit=1))
self.assertEqual(res, [mgr_a])
def testQueryForWholeStructureCallsDatastoreType(self):
# See issue 87. http://goo.gl/Tl5Ed
class Event(model.Model):
what = model.StringProperty()
when = model.DateProperty() # Has non-trivial _datastore_type().
class Outer(model.Model):
who = model.StringProperty()
events = model.StructuredProperty(Event, repeated=True)
q = Outer.query(Outer.events == Event(what='stuff',
when=datetime.date.today()))
q.fetch() # Failed before the fix.
def testQueryForWholeNestedStructure(self):
class A(model.Model):
a1 = model.StringProperty()
a2 = model.StringProperty()
class B(model.Model):
b1 = model.StructuredProperty(A)
b2 = model.StructuredProperty(A)
class C(model.Model):
c = model.StructuredProperty(B)
x = C(c=B(b1=A(a1='a1', a2='a2'), b2=A(a1='a3', a2='a4')))
x.put()
q = C.query(C.c == x.c)
self.assertEqual(q.get(), x)
def testQueryForWholeStructureNone(self):
class X(model.Model):
name = model.StringProperty()
class Y(model.Model):
x = model.StructuredProperty(X)
y = Y(x=None)
y.put()
q = Y.query(Y.x == None)
self.assertEqual(q.fetch(), [y])
def testQueryAncestorConsistentWithAppId(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1)
self.assertEqual(a.app(), self.APP_ID) # Just checkin'.
Employee.query(ancestor=a, app=a.app()).fetch() # Shouldn't fail.
self.assertRaises(Exception, Employee.query, ancestor=a, app='notthisapp')
def testQueryAncestorConsistentWithNamespace(self):
class Employee(model.Model):
pass
a = model.Key(Employee, 1, namespace='ns')
self.assertEqual(a.namespace(), 'ns') # Just checkin'.
Employee.query(ancestor=a, namespace='ns').fetch()
Employee.query(ancestor=a, namespace=None).fetch()
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='another')
self.assertRaises(Exception,
Employee.query, ancestor=a, namespace='')
# And again with the default namespace.
b = model.Key(Employee, 1)
self.assertEqual(b.namespace(), '') # Just checkin'.
Employee.query(ancestor=b, namespace='')
Employee.query(ancestor=b, namespace=None)
self.assertRaises(Exception,
Employee.query, ancestor=b, namespace='ns')
# Finally some queries with a namespace but no ancestor.
Employee.query(namespace='').fetch()
Employee.query(namespace='ns').fetch()
def testQueryWithNamespace(self):
class Employee(model.Model):
pass
k = model.Key(Employee, None, namespace='ns')
e = Employee(key=k)
e.put()
self.assertEqual(Employee.query().fetch(), [])
self.assertEqual(Employee.query(namespace='ns').fetch(), [e])
def testQueryFilterAndOrderPreserveNamespace(self):
class Employee(model.Model):
name = model.StringProperty()
q1 = Employee.query(namespace='ns')
q2 = q1.filter(Employee.name == 'Joe')
self.assertEqual(q2.namespace, 'ns')
# Ditto for order()
q3 = q2.order(Employee.name)
self.assertEqual(q3.namespace, 'ns')
def testMultiQuery(self):
q1 = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
q2 = query.Query(kind='Foo').filter(Foo.tags == 'joe').order(Foo.name)
qq = query._MultiQuery([q1, q2])
res = list(qq)
self.assertEqual(res, [self.jill, self.joe])
def testIterAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMap(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
self.assertEqual(q.map(callback), ['jill', 'joe'])
self.assertEqual(q.map(callback_async), ['jill', 'joe'])
# TODO: Test map() with esoteric argument combinations
# e.g. keys_only, produce_cursors, and merge_future.
def testMapAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
@tasklets.synctasklet
def foo():
fut = q.map_async(callback)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
fut = q.map_async(callback_async)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
foo()
def testFetch(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10), [self.jill, self.joe])
self.assertEqual(q.fetch(2), [self.jill, self.joe])
self.assertEqual(q.fetch(1), [self.jill])
def testFetchAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.fetch_async(10)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(2)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(1)
self.assertEqual(res, [self.jill])
foo()
def testFetchEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.fetch(1), [])
def testFetchKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10, keys_only=True),
[self.jill.key, self.joe.key])
def testGet(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(), self.jill)
def testGetEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.get(), None)
def testGetKeysOnly(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(keys_only=True), self.jill.key)
def testCursors(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True)
expected = [self.joe, self.jill, self.moe]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsKeysOnly(self):
q = query.Query(kind='Foo')
it = q.iter(produce_cursors=True, keys_only=True)
expected = [self.joe.key, self.jill.key, self.moe.key]
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_before)
self.assertRaises(datastore_errors.BadArgumentError, it.cursor_after)
before = []
after = []
for i, ent in enumerate(it):
self.assertEqual(ent, expected[i])
before.append(it.cursor_before())
after.append(it.cursor_after())
before.append(it.cursor_before())
after.append(it.cursor_after())
self.assertEqual(before[1], after[0])
self.assertEqual(before[2], after[1])
self.assertEqual(before[3], after[2])
self.assertEqual(before[3], after[3]) # !!!
def testCursorsEfficientPaging(self):
# We want to read a 'page' of data, get the cursor just past the
# page, and know whether there is another page, all with a single
# RPC. To do this, set limit=pagesize+1, batch_size=pagesize.
q = query.Query(kind='Foo')
cursors = {}
mores = {}
for pagesize in [1, 2, 3, 4]:
it = q.iter(produce_cursors=True, limit=pagesize + 1, batch_size=pagesize)
todo = pagesize
for _ in it:
todo -= 1
if todo <= 0:
break
cursors[pagesize] = it.cursor_after()
mores[pagesize] = it.probably_has_next()
self.assertEqual(mores, {1: True, 2: True, 3: False, 4: False})
self.assertEqual(cursors[3], cursors[4])
# TODO: Assert that only one RPC call was made.
def create_index(self):
ci = datastore_stub_util.datastore_pb.CompositeIndex()
ci.set_app_id(os.environ['APPLICATION_ID'])
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Foo')
property = index.add_property()
property.set_name('name')
property.set_direction(property.DESCENDING)
property = index.add_property()
property.set_name('tags')
property.set_direction(property.ASCENDING)
stub = self.testbed.get_stub('datastore_v3')
stub.CreateIndex(ci)
def testIndexListPremature(self):
# Before calling next() we don't have the information.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
self.assertEqual(qi.index_list(), None)
def testIndexListEmpty(self):
# A simple query requires no composite indexes.
q = Foo.query(Foo.name == 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), [])
def testIndexListNontrivial(self):
# Test a non-trivial query.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
qi.next()
properties=[model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListExhausted(self):
# Test that the information is preserved after the iterator is
# exhausted.
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
qi = q.iter()
list(qi)
properties=[model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='asc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListWithIndexAndOrder(self):
# Test a non-trivial query with sort order and an actual composite
# index present.
self.create_index()
q = Foo.query(Foo.name >= 'joe', Foo.tags == 'joe')
q = q.order(-Foo.name, Foo.tags)
qi = q.iter()
qi.next()
# TODO: This is a little odd, because that's not exactly the index
# we created...?
properties=[model.IndexProperty(name='tags', direction='asc'),
model.IndexProperty(name='name', direction='desc')]
self.assertEqual(qi.index_list(),
[model.IndexState(
definition=model.Index(kind='Foo',
properties=properties,
ancestor=False),
state='serving',
id=0)])
def testIndexListMultiQuery(self):
self.create_index()
q = Foo.query(query.OR(Foo.name == 'joe', Foo.name == 'jill'))
qi = q.iter()
qi.next()
self.assertEqual(qi.index_list(), None)
def testCount(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(1), 1)
def testCountAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.count_async(10)
self.assertEqual(res, 2)
res = yield q.count_async(1)
self.assertEqual(res, 1)
foo()
def testCountEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.count(1), 0)
def testCountPostFilter(self):
class Froo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
age = model.IntegerProperty()
class Bar(model.Model):
name = model.StringProperty()
froo = model.StructuredProperty(Froo, repeated=True)
b1 = Bar(name='b1', froo=[Froo(name='a', rate=1)])
b1.put()
b2 = Bar(name='b2', froo=[Froo(name='a', rate=1)])
b2.put()
q = Bar.query(Bar.froo == Froo(name='a', rate=1))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testCountDisjunction(self):
q = Foo.query(Foo.name.IN(['joe', 'jill']))
self.assertEqual(q.count(3), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
def testFetchPage(self):
# This test implicitly also tests fetch_page_async().
q = query.Query(kind='Foo')
page_size = 1
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 2
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill])
self.assertTrue(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 3
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
page_size = 4
res, curs, more = q.fetch_page(page_size)
self.assertEqual(res, [self.joe, self.jill, self.moe])
self.assertFalse(more)
res, curs, more = q.fetch_page(page_size, start_cursor=curs)
self.assertEqual(res, [])
self.assertFalse(more)
def testMultiQueryIterator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
q = q.order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMultiQueryIteratorUnordered(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(set(r._key for r in res),
set([self.jill._key, self.joe._key]))
foo()
def testMultiQueryFetch(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(-Foo.name)
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryFetchUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
expected = [self.joe, self.jill]
self.assertEqual(q.fetch(10), expected)
self.assertEqual(q.fetch(None), expected)
self.assertEqual(q.fetch(), expected)
self.assertEqual(q.fetch(2), expected)
self.assertEqual(q.fetch(1), expected[:1])
self.assertEqual(q.fetch(10, offset=1), expected[1:])
self.assertEqual(q.fetch(1, offset=1), expected[1:])
self.assertEqual(q.fetch(10, keys_only=True), [e._key for e in expected])
def testMultiQueryCount(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill'])).order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(2), 2)
self.assertEqual(q.count(1), 1)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCountUnordered(self):
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(None), 2)
self.assertEqual(q.count(), 2)
self.assertEqual(q.count(10, keys_only=True), 2)
self.assertEqual(q.count(keys_only=True), 2)
def testMultiQueryCursors(self):
self.ExpectWarnings()
q = Foo.query(Foo.tags.IN(['joe', 'jill']))
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.tags)
self.assertRaises(datastore_errors.BadArgumentError, q.fetch_page, 1)
q = q.order(Foo.key)
expected = q.fetch()
self.assertEqual(len(expected), 2)
res, curs, more = q.fetch_page(1, keys_only=True)
self.assertEqual(res, [expected[0].key])
self.assertTrue(curs is not None)
self.assertTrue(more)
res, curs, more = q.fetch_page(1, keys_only=False, start_cursor=curs)
self.assertEqual(res, [expected[1]])
self.assertTrue(curs is not None)
self.assertFalse(more)
res, curs, more = q.fetch_page(1, start_cursor=curs)
self.assertEqual(res, [])
self.assertTrue(curs is None)
self.assertFalse(more)
def testMultiQueryWithAndWithoutAncestor(self):
class Benjamin(model.Model):
name = model.StringProperty()
ben = Benjamin(name='ben', parent=self.moe.key)
ben.put()
benji = Benjamin(name='benji')
benji.put()
bq = Benjamin.query()
baq = Benjamin.query(ancestor=self.moe.key)
mq = query._MultiQuery([bq, baq])
res = list(mq)
self.assertEqual(res, [benji, ben])
def testNotEqualOperator(self):
q = query.Query(kind='Foo').filter(Foo.rate != 2)
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testInOperator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(('jill', 'hello')))
res = list(q)
self.assertEqual(res, [self.joe, self.jill])
def testFullDistributiveLaw(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['jill', 'hello']))
q = q.filter(Foo.rate.IN([1, 2]))
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 2)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1)),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 2)))
self.assertEqual(q.filters, expected)
def testHalfDistributiveLaw(self):
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
filters = ConjunctionNode(
FilterNode('tags', 'in', ['jill', 'hello']),
ConjunctionNode(FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
expected = DisjunctionNode(
ConjunctionNode(FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')),
ConjunctionNode(FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')))
self.assertEqual(filters, expected)
def testKeyFilter(self):
class MyModel(model.Model):
number = model.IntegerProperty()
k1 = model.Key('MyModel', 'foo-1')
m1 = MyModel(key=k1)
m1.put()
k2 = model.Key('MyModel', 'foo-2')
m2 = MyModel(key=k2)
m2.put()
q = MyModel.query(MyModel.key == k1)
res = q.get()
self.assertEqual(res, m1)
q = MyModel.query(MyModel.key > k1)
res = q.get()
self.assertEqual(res, m2)
q = MyModel.query(MyModel.key < k2)
res = q.get()
self.assertEqual(res, m1)
def testUnicode(self):
class MyModel(model.Model):
n = model.IntegerProperty(u'\u4321')
@classmethod
def _get_kind(cls):
return u'\u1234'.encode('utf-8')
a = MyModel(n=42)
k = a.put()
b = k.get()
self.assertEqual(a, b)
self.assertFalse(a is b)
# So far so good, now try queries
res = MyModel.query(MyModel.n == 42).fetch()
self.assertEqual(res, [a])
def testBlobQuery(self):
class MyModel(model.Model):
b = model.BlobProperty(indexed=True)
a = MyModel(b='\xff\x00')
a.put()
q = MyModel.query(MyModel.b == '\xff\x00')
it = iter(q)
b = it.next()
self.assertEqual(a, b)
def testKindlessQuery(self):
class ParentModel(model.Model):
a = model.StringProperty()
class ChildModel(model.Model):
b = model.StringProperty()
p = ParentModel(a= "Test1")
p.put()
c = ChildModel(parent=p.key, b="Test2")
c.put()
q = query.Query(ancestor=p.key)
self.assertEqual(q.count(), 2)
l = q.fetch()
self.assertTrue(c in l)
self.assertTrue(p in l)
def testExpandoQueries(self):
class Foo(model.Expando):
pass
testdata = {'int': 42,
'float': 3.14,
'string': 'hello',
'bool': True,
# Don't call this 'key'; it interferes with the built-in
# key attribute (the entity's key).
'akey': model.Key('Foo', 1),
'point': model.GeoPt(52.35, 4.9166667),
'user': users.User('test@example.com', 'example.com', '123'),
'blobkey': model.BlobKey('blah'),
'none': None,
}
for name, value in testdata.iteritems():
foo = Foo()
setattr(foo, name, value)
foo.put()
qry = Foo.query(query.FilterNode(name, '=', value))
res = qry.get()
self.assertTrue(res is not None, name)
self.assertEqual(getattr(res, name), value)
res.key.delete()
def testQueryCacheInteraction(self):
class Bar(model.Model):
name = model.StringProperty()
ctx = tasklets.get_context()
ctx.set_cache_policy(True)
a = Bar(name='a')
a.put()
b = a.key.get()
self.assertTrue(b is a) # Just verifying that the cache is on.
b = Bar.query().get()
self.assertTrue(b is a)
a.name = 'x' # Modify, but don't write
b = Bar.query().get()
self.assertTrue(b is a)
self.assertEqual(a.name, 'x')
a.key = None # Invalidate cache by resetting key.
b = Bar.query().get()
self.assertFalse(b is a)
self.assertEqual(a.name, 'x')
self.assertEqual(b.name, 'a')
def testGqlMinimal(self):
qry = query.gql('SELECT * FROM Foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestor(self):
key = model.Key('Foo', 42)
qry = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS KEY('%s')" %
key.urlsafe())
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, key)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlAncestorWithParameter(self):
qry = query.gql('SELECT * FROM Foo WHERE ANCESTOR IS :1')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, query.Parameter(1))
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
def testGqlFilter(self):
qry = query.gql("SELECT * FROM Foo WHERE name = 'joe' AND rate = 1")
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.FilterNode('name', '=', 'joe'),
query.FilterNode('rate', '=', 1)))
self.assertEqual(qry.orders, None)
def testGqlOrder(self):
qry = query.gql('SELECT * FROM Foo ORDER BY name')
self.assertEqual(query._orders_to_orderings(qry.orders),
[('name', query._ASC)])
def testGqlOffset(self):
qry = query.gql('SELECT * FROM Foo OFFSET 2')
self.assertEqual(qry.default_options.offset, 2)
def testGqlLimit(self):
qry = query.gql('SELECT * FROM Foo LIMIT 2')
self.assertEqual(qry.default_options.limit, 2)
def testGqlParameters(self):
qry = query.gql('SELECT * FROM Foo WHERE name = :1 AND rate = :foo')
self.assertEqual(qry.kind, 'Foo')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
query.ParameterNode(Foo.name, '=',
query.Parameter(1)),
query.ParameterNode(Foo.rate, '=',
query.Parameter('foo'))))
self.assertEqual(qry.orders, None)
def testGqlBindParameters(self):
pqry = query.gql('SELECT * FROM Foo WHERE name = :1')
qry = pqry.bind('joe')
self.assertEqual(list(qry), [self.joe])
qry = pqry.bind('jill')
self.assertEqual(list(qry), [self.jill])
def testGqlUnresolvedParameters(self):
self.ExpectErrors()
qry = query.gql(
'SELECT * FROM Foo WHERE name = :1')
self.assertRaises(datastore_errors.BadArgumentError, qry.fetch)
self.assertRaises(datastore_errors.BadArgumentError, qry.count)
self.assertRaises(datastore_errors.BadArgumentError, list, qry)
self.assertRaises(datastore_errors.BadArgumentError, qry.iter)
def checkGql(self, expected, gql, args=(), kwds={},
fetch=lambda q: list(q)):
actual = fetch(query.gql(gql).bind(*args, **kwds))
self.assertEqual(expected, actual)
def testGqlBasicQueries(self):
self.checkGql([self.joe, self.jill, self.moe], "SELECT * FROM Foo")
def testGqlKeyQueries(self):
self.checkGql([self.joe.key, self.jill.key, self.moe.key],
"SELECT __key__ FROM Foo")
def testGqlOperatorQueries(self):
self.checkGql([self.joe], "SELECT * FROM Foo WHERE name = 'joe'")
self.checkGql([self.moe], "SELECT * FROM Foo WHERE name > 'joe'")
self.checkGql([self.jill], "SELECT * FROM Foo WHERE name < 'joe'")
self.checkGql([self.joe, self.moe],
"SELECT * FROM Foo WHERE name >= 'joe'")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name <= 'joe'")
self.checkGql([self.jill, self.moe],
"SELECT * FROM Foo WHERE name != 'joe'")
# NOTE: The ordering on these is questionable:
self.checkGql([self.joe, self.jill],
"SELECT * FROM Foo WHERE name IN ('joe', 'jill')")
self.checkGql([self.jill, self.joe],
"SELECT * FROM Foo WHERE name IN ('jill', 'joe')")
def testGqlOrderQueries(self):
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY name")
self.checkGql([self.moe, self.joe, self.jill],
"SELECT * FROM Foo ORDER BY name DESC")
self.checkGql([self.joe, self.jill, self.moe],
"SELECT * FROM Foo ORDER BY __key__ ASC")
self.checkGql([self.moe, self.jill, self.joe],
"SELECT * FROM Foo ORDER BY __key__ DESC")
self.checkGql([self.jill, self.joe, self.moe],
"SELECT * FROM Foo ORDER BY rate DESC, name")
def testGqlOffsetQuery(self):
self.checkGql([self.jill, self.moe], "SELECT * FROM Foo OFFSET 1")
def testGqlLimitQuery(self):
self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2")
def testGqlLimitOffsetQuery(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1")
def testGqlLimitOffsetQueryUsingFetch(self):
self.checkGql([self.jill], "SELECT * FROM Foo LIMIT 1 OFFSET 1",
fetch=lambda q: q.fetch())
# XXX TODO: Make this work:
## def testGqlLimitQueryUsingFetch(self):
## self.checkGql([self.joe, self.jill], "SELECT * FROM Foo LIMIT 2",
## fetch=lambda q: q.fetch(3))
def testGqlOffsetQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo LIMIT 2")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.joe], res1)
self.assertEqual(True, more1)
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1)
self.assertEqual([self.jill], res2)
# XXX TODO: Gotta make this work:
## self.assertEqual(False, more2)
## res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2)
## self.assertEqual([], res3)
## self.assertEqual(False, more3)
## self.assertEqual(None, cur3)
def testGqlLimitQueryUsingFetchPage(self):
q = query.gql("SELECT * FROM Foo OFFSET 1")
res1, cur1, more1 = q.fetch_page(1)
self.assertEqual([self.jill], res1)
self.assertEqual(True, more1)
# NOTE: Without offset=0, the following break.
res2, cur2, more2 = q.fetch_page(1, start_cursor=cur1, offset=0)
self.assertEqual([self.moe], res2)
self.assertEqual(False, more2)
res3, cur3, more3 = q.fetch_page(1, start_cursor=cur2, offset=0)
self.assertEqual([], res3)
self.assertEqual(False, more3)
self.assertEqual(None, cur3)
def testGqlParameterizedAncestor(self):
q = query.gql("SELECT * FROM Foo WHERE ANCESTOR IS :1")
self.assertEqual([self.moe], q.bind(self.moe.key).fetch())
def testGqlParameterizedInClause(self):
# NOTE: The ordering on these is questionable:
q = query.gql("SELECT * FROM Foo WHERE name IN :1")
self.assertEqual([self.jill, self.joe], q.bind(('jill', 'joe')).fetch())
# Exercise the LIST function.
q = query.gql("SELECT * FROM Foo WHERE name IN (:a, :b)")
self.assertEqual([self.jill, self.joe], q.bind(a='jill', b='joe').fetch())
# Generate OR/AND nodes containing parameter nodes.
q = query.gql("SELECT * FROM Foo WHERE name = :1 AND rate in (1, 2)")
self.assertEqual([self.jill], q.bind('jill').fetch())
def testGqlKeyFunction(self):
class Bar(model.Model):
ref = model.KeyProperty(kind=Foo)
noref = Bar()
noref.put()
joeref = Bar(ref=self.joe.key)
joeref.put()
moeref = Bar(ref=self.moe.key)
moeref.put()
self.assertEqual(
[noref],
Bar.gql("WHERE ref = NULL").fetch())
self.assertEqual(
[noref],
Bar.gql("WHERE ref = :1").bind(None).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = :1").bind(self.joe.key).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', %s)" % self.joe.key.id()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY(:1)").bind(self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joeref],
Bar.gql("WHERE ref = KEY('Foo', :1)").bind(self.joe.key.id()).fetch())
def testGqlKeyFunctionAncestor(self):
class Bar(model.Model):
pass
nobar = Bar()
nobar.put()
joebar = Bar(parent=self.joe.key)
joebar.put()
moebar = Bar(parent=self.moe.key)
moebar.put()
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('%s')" % self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS :1").bind(self.joe.key).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY(:1)").bind(self.joe.key.urlsafe()).fetch())
self.assertEqual(
[joebar],
Bar.gql("WHERE ANCESTOR IS KEY('Foo', :1)")
.bind(self.joe.key.id()).fetch())
def testGqlAncestorFunctionError(self):
self.assertRaises(TypeError,
query.gql, 'SELECT * FROM Foo WHERE ANCESTOR IS USER(:1)')
def testGqlOtherFunctions(self):
class Bar(model.Model):
auser = model.UserProperty()
apoint = model.GeoPtProperty()
adatetime = model.DateTimeProperty()
adate = model.DateProperty()
atime = model.TimeProperty()
abar = Bar(
auser=users.User('test@example.com'),
apoint=model.GeoPt(52.35, 4.9166667),
adatetime=datetime.datetime(2012, 2, 1, 14, 54, 0),
adate=datetime.date(2012, 2, 2),
atime=datetime.time(14, 54, 0),
)
abar.put()
bbar = Bar()
bbar.put()
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE auser=USER(:1)")
.bind('test@example.com').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE apoint=GEOPT(:1, :2)")
.bind(52.35, 4.9166667).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adatetime=DATETIME(:1)")
.bind('2012-02-01 14:54:00').fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE adate=DATE(:1, :2, :2)")
.bind(2012, 2).fetch())
self.assertEqual(
[abar.key],
query.gql("SELECT __key__ FROM Bar WHERE atime=TIME(:hour, :min, :sec)")
.bind(hour=14, min=54, sec=0).fetch())
def testGqlStructuredPropertyQuery(self):
class Bar(model.Model):
foo = model.StructuredProperty(Foo)
barf = Bar(foo=Foo(name='one', rate=3, tags=['a', 'b']))
barf.put()
barg = Bar(foo=Foo(name='two', rate=4, tags=['b', 'c']))
barg.put()
barh = Bar()
barh.put()
# TODO: Once SDK 1.6.3 is released, drop quotes around foo.name.
q = Bar.gql("WHERE \"foo.name\" = 'one'")
self.assertEqual([barf], q.fetch())
q = Bar.gql("WHERE foo = :1").bind(Foo(name='two', rate=4))
self.assertEqual([barg], q.fetch())
q = Bar.gql("WHERE foo = NULL")
self.assertEqual([barh], q.fetch())
q = Bar.gql("WHERE foo = :1")
self.assertEqual([barh], q.bind(None).fetch())
def testGqlExpandoProperty(self):
class Bar(model.Expando):
pass
babar = Bar(name='Babar')
babar.put()
bare = Bar(nude=42)
bare.put()
q = Bar.gql("WHERE name = 'Babar'")
self.assertEqual([babar], q.fetch())
q = Bar.gql("WHERE nude = :1")
self.assertEqual([bare], q.bind(42).fetch())
def testGqlExpandoInStructure(self):
class Bar(model.Expando):
pass
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bazar = Baz(bar=Bar(bow=1, wow=2))
bazar.put()
bazone = Baz()
bazone.put()
q = Baz.gql("WHERE \"bar.bow\" = 1")
self.assertEqual([bazar], q.fetch())
def testGqlKindlessQuery(self):
results = query.gql('SELECT *').fetch()
self.assertEqual([self.joe, self.jill, self.moe], results)
def testGqlSubclass(self):
# You can pass gql() a subclass of Query and it'll use that.
class MyQuery(query.Query):
pass
q = query._gql("SELECT * FROM Foo WHERE name = :1", query_class=MyQuery)
self.assertTrue(isinstance(q, MyQuery))
# And bind() preserves the class.
qb = q.bind('joe')
self.assertTrue(isinstance(qb, MyQuery))
# .filter() also preserves the class, as well as default_options.
qf = q.filter(Foo.rate == 1)
self.assertTrue(isinstance(qf, MyQuery))
self.assertEqual(qf.default_options, q.default_options)
# Same for .options().
qo = q.order(-Foo.name)
self.assertTrue(isinstance(qo, MyQuery))
self.assertEqual(qo.default_options, q.default_options)
def testGqlUnusedBindings(self):
# Only unused positional bindings raise an error.
q = Foo.gql("WHERE ANCESTOR IS :1 AND rate >= :2")
qb = q.bind(self.joe.key, 2, foo=42) # Must not fail
self.assertRaises(datastore_errors.BadArgumentError, q.bind)
self.assertRaises(datastore_errors.BadArgumentError, q.bind, self.joe.key)
self.assertRaises(datastore_errors.BadArgumentError, q.bind,
self.joe.key, 2, 42)
def testGqlWithBind(self):
q = Foo.gql("WHERE name = :1", 'joe')
self.assertEqual([self.joe], q.fetch())
def testGqlAnalyze(self):
q = Foo.gql("WHERE name = 'joe'")
self.assertEqual([], q.analyze())
q = Foo.gql("WHERE name = :1 AND rate = :2")
self.assertEqual([1, 2], q.analyze())
q = Foo.gql("WHERE name = :foo AND rate = :bar")
self.assertEqual(['bar', 'foo'], q.analyze())
q = Foo.gql("WHERE tags = :1 AND name = :foo AND rate = :bar")
self.assertEqual([1, 'bar', 'foo'], q.analyze())
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""An event loop.
This event loop should handle both asynchronous App Engine RPC objects
(specifically urlfetch, memcache and datastore RPC objects) and arbitrary
callback functions with an optional time delay.
Normally, event loops are singleton objects, though there is no
enforcement of this requirement.
The API here is inspired by Monocle.
"""
import collections
import logging
import os
import time
from .google_imports import apiproxy_rpc
from .google_imports import datastore_rpc
from . import utils
__all__ = ['EventLoop',
'add_idle', 'queue_call', 'queue_rpc',
'get_event_loop',
'run', 'run0', 'run1',
]
_logging_debug = utils.logging_debug
_IDLE = apiproxy_rpc.RPC.IDLE
_RUNNING = apiproxy_rpc.RPC.RUNNING
_FINISHING = apiproxy_rpc.RPC.FINISHING
class EventLoop(object):
"""An event loop."""
def __init__(self):
"""Constructor."""
self.current = collections.deque() # FIFO list of (callback, args, kwds)
self.idlers = collections.deque() # Cyclic list of (callback, args, kwds)
self.inactive = 0 # How many idlers in a row were no-ops
self.queue = [] # Sorted list of (time, callback, args, kwds)
self.rpcs = {} # Map of rpc -> (callback, args, kwds)
def clear(self):
"""Remove all pending events without running any."""
while self.current or self.idlers or self.queue or self.rpcs:
current = self.current
idlers = self.idlers
queue = self.queue
rpcs = self.rpcs
_logging_debug('Clearing stale EventLoop instance...')
if current:
_logging_debug(' current = %s', current)
if idlers:
_logging_debug(' idlers = %s', idlers)
if queue:
_logging_debug(' queue = %s', queue)
if rpcs:
_logging_debug(' rpcs = %s', rpcs)
self.__init__()
current.clear()
idlers.clear()
queue[:] = []
rpcs.clear()
_logging_debug('Cleared')
def insort_event_right(self, event, lo=0, hi=None):
"""Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(self.queue)
while lo < hi:
mid = (lo + hi) // 2
if event[0] < self.queue[mid][0]: hi = mid
else: lo = mid + 1
self.queue.insert(lo, event)
def queue_call(self, delay, callback, *args, **kwds):
"""Schedule a function call at a specific time in the future."""
if delay is None:
self.current.append((callback, args, kwds))
return
if delay < 1e9:
when = delay + time.time()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
self.insort_event_right((when, callback, args, kwds))
def queue_rpc(self, rpc, callback=None, *args, **kwds):
"""Schedule an RPC with an optional callback.
The caller must have previously sent the call to the service.
The optional callback is called with the remaining arguments.
NOTE: If the rpc is a MultiRpc, the callback will be called once
for each sub-RPC. TODO: Is this a good idea?
"""
if rpc is None:
return
if rpc.state not in (_RUNNING, _FINISHING):
raise RuntimeError('rpc must be sent to service before queueing')
if isinstance(rpc, datastore_rpc.MultiRpc):
rpcs = rpc.rpcs
if len(rpcs) > 1:
# Don't call the callback until all sub-rpcs have completed.
rpc.__done = False
def help_multi_rpc_along(r=rpc, c=callback, a=args, k=kwds):
if r.state == _FINISHING and not r.__done:
r.__done = True
c(*a, **k)
# TODO: And again, what about exceptions?
callback = help_multi_rpc_along
args = ()
kwds = {}
else:
rpcs = [rpc]
for rpc in rpcs:
self.rpcs[rpc] = (callback, args, kwds)
def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds))
def run_idle(self):
"""Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
"""
if not self.idlers or self.inactive >= len(self.idlers):
return False
idler = self.idlers.popleft()
callback, args, kwds = idler
_logging_debug('idler: %s', callback.__name__)
res = callback(*args, **kwds)
# See add_idle() for the meaning of the callback return value.
if res is not None:
if res:
self.inactive = 0
else:
self.inactive += 1
self.idlers.append(idler)
else:
_logging_debug('idler %s removed', callback.__name__)
return True
def run0(self):
"""Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
"""
if self.current:
self.inactive = 0
callback, args, kwds = self.current.popleft()
_logging_debug('nowevent: %s', callback.__name__)
callback(*args, **kwds)
return 0
if self.run_idle():
return 0
delay = None
if self.queue:
delay = self.queue[0][0] - time.time()
if delay <= 0:
self.inactive = 0
_, callback, args, kwds = self.queue.pop(0)
_logging_debug('event: %s', callback.__name__)
callback(*args, **kwds)
# TODO: What if it raises an exception?
return 0
if self.rpcs:
self.inactive = 0
rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs)
if rpc is not None:
_logging_debug('rpc: %s.%s', rpc.service, rpc.method)
# Yes, wait_any() may return None even for a non-empty argument.
# But no, it won't ever return an RPC not in its argument.
if rpc not in self.rpcs:
raise RuntimeError('rpc %r was not given to wait_any as a choice %r' %
(rpc, self.rpcs))
callback, args, kwds = self.rpcs[rpc]
del self.rpcs[rpc]
if callback is not None:
callback(*args, **kwds)
# TODO: Again, what about exceptions?
return 0
return delay
def run1(self):
"""Run one item (a callback or an RPC wait_any) or sleep.
Returns:
True if something happened; False if all queues are empty.
"""
delay = self.run0()
if delay is None:
return False
if delay > 0:
time.sleep(delay)
return True
def run(self):
"""Run until there's nothing left to do."""
# TODO: A way to stop running before the queue is empty.
self.inactive = 0
while True:
if not self.run1():
break
class _State(utils.threading_local):
event_loop = None
_EVENT_LOOP_KEY = '__EVENT_LOOP__'
_state = _State()
def get_event_loop():
"""Return a EventLoop instance.
A new instance is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request. Also, each thread gets its own loop.
"""
ev = _state.event_loop
if not os.getenv(_EVENT_LOOP_KEY) and ev is not None:
ev.clear()
_state.event_loop = None
ev = None
if ev is None:
ev = EventLoop()
_state.event_loop = ev
os.environ[_EVENT_LOOP_KEY] = '1'
return ev
def queue_call(*args, **kwds):
ev = get_event_loop()
ev.queue_call(*args, **kwds)
def queue_rpc(rpc, callback=None, *args, **kwds):
ev = get_event_loop()
ev.queue_rpc(rpc, callback, *args, **kwds)
def add_idle(callback, *args, **kwds):
ev = get_event_loop()
ev.add_idle(callback, *args, **kwds)
def run():
ev = get_event_loop()
ev.run()
def run1():
ev = get_event_loop()
return ev.run1()
def run0():
ev = get_event_loop()
return ev.run0()
| Python |
"""Low-level utilities used internally by NDB.
These are not meant for use by code outside NDB.
"""
import logging
import os
import sys
import threading
__all__ = []
DEBUG = True # Set to False for some speedups
def logging_debug(*args):
# NOTE: If you want to see debug messages, set the logging level
# manually to logging.DEBUG - 1; or for tests use -v -v -v (see below).
if DEBUG and logging.getLogger().level < logging.DEBUG:
logging.debug(*args)
def wrapping(wrapped):
# A decorator to decorate a decorator's wrapper. Following the lead
# of Twisted and Monocle, this is supposed to make debugging heavily
# decorated code easier. We'll see...
# TODO: Evaluate; so far it hasn't helped, and it has hurt some.
def wrapping_wrapper(wrapper):
try:
wrapper.__wrapped__ = wrapped
wrapper.__name__ = wrapped.__name__
wrapper.__doc__ = wrapped.__doc__
wrapper.__dict__.update(wrapped.__dict__)
except Exception:
pass
return wrapper
return wrapping_wrapper
# Define a base class for classes that need to be thread-local.
# This is pretty subtle; we want to use threading.local if threading
# is supported, but object if it is not.
if threading.local.__module__ == 'thread':
logging_debug('Using threading.local')
threading_local = threading.local
else:
logging_debug('Not using threading.local')
threading_local = object
def get_stack(limit=10):
# Return a list of strings showing where the current frame was called.
if not DEBUG:
return ()
frame = sys._getframe(1) # Always skip get_stack() itself.
lines = []
while len(lines) < limit and frame is not None:
f_locals = frame.f_locals
ndb_debug = f_locals.get('__ndb_debug__')
if ndb_debug != 'SKIP':
line = frame_info(frame)
if ndb_debug is not None:
line += ' # ' + str(ndb_debug)
lines.append(line)
frame = frame.f_back
return lines
def func_info(func, lineno=None):
if not DEBUG:
return None
func = getattr(func, '__wrapped__', func)
code = getattr(func, 'func_code', None)
return code_info(code, lineno)
def gen_info(gen):
if not DEBUG:
return None
frame = gen.gi_frame
if gen.gi_running:
prefix = 'running generator '
elif frame:
if frame.f_lasti < 0:
prefix = 'initial generator '
else:
prefix = 'suspended generator '
else:
prefix = 'terminated generator '
if frame:
return prefix + frame_info(frame)
code = getattr(gen, 'gi_code', None)
if code:
return prefix + code_info(code)
return prefix + hex(id(gen))
def frame_info(frame):
if not DEBUG:
return None
return code_info(frame.f_code, frame.f_lineno)
def code_info(code, lineno=None):
if not DEBUG or not code:
return ''
funcname = code.co_name
# TODO: Be cleverer about stripping filename,
# e.g. strip based on sys.path.
filename = os.path.basename(code.co_filename)
if lineno is None:
lineno = code.co_firstlineno
return '%s(%s:%s)' % (funcname, filename, lineno)
def positional(max_pos_args):
"""A decorator to declare that only the first N arguments may be positional.
Note that for methods, n includes 'self'.
"""
__ndb_debug__ = 'SKIP'
def positional_decorator(wrapped):
if not DEBUG:
return wrapped
__ndb_debug__ = 'SKIP'
@wrapping(wrapped)
def positional_wrapper(*args, **kwds):
__ndb_debug__ = 'SKIP'
if len(args) > max_pos_args:
plural_s = ''
if max_pos_args != 1:
plural_s = 's'
raise TypeError(
'%s() takes at most %d positional argument%s (%d given)' %
(wrapped.__name__, max_pos_args, plural_s, len(args)))
return wrapped(*args, **kwds)
return positional_wrapper
return positional_decorator
def tweak_logging():
# Hack for running tests with verbose logging. If there are two or
# more -v flags, turn on INFO logging; if there are 3 or more, DEBUG.
# (A single -v just tells unittest.main() to print the name of each
# test; we don't want to interfere with that.)
# Also, if there is a -q flag, set DEBUG to False, suppressing more
# debug info even from warnings.
q = 0
v = 0
for arg in sys.argv[1:]:
if arg.startswith('-v'):
v += arg.count('v')
if arg.startswith('-q'):
q += arg.count('q')
if v >= 2:
level = logging.INFO
if v >= 3:
level = logging.DEBUG - 1
logging.basicConfig(level=level)
if q > 0:
global DEBUG
DEBUG = False
if 'test' in os.path.basename(sys.argv[0]):
tweak_logging()
| Python |
"""Like google_imports.py, but for use by tests.
This imports the testbed package and some stubs.
"""
from . import google_imports
if google_imports.normal_environment:
from google.appengine.api.prospective_search import prospective_search_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
else:
# Prospective search is optional.
try:
from google3.apphosting.api.prospective_search import prospective_search_stub
except ImportError:
pass
from google3.apphosting.datastore import datastore_stub_util
from google3.apphosting.ext import testbed
| Python |
"""Tests for eventloop.py."""
import logging
import os
import time
import unittest
from .google_imports import apiproxy_stub_map
from .google_imports import datastore_rpc
from . import eventloop
from . import test_utils
class EventLoopTests(test_utils.NDBTest):
def setUp(self):
super(EventLoopTests, self).setUp()
if eventloop._EVENT_LOOP_KEY in os.environ:
del os.environ[eventloop._EVENT_LOOP_KEY]
self.ev = eventloop.get_event_loop()
the_module = eventloop
def testQueueTasklet(self):
def f(unused_number, unused_string, unused_a, unused_b): return 1
def g(unused_number, unused_string): return 2
def h(unused_c, unused_d): return 3
t_before = time.time()
eventloop.queue_call(1, f, 42, 'hello', unused_a=1, unused_b=2)
eventloop.queue_call(3, h, unused_c=3, unused_d=4)
eventloop.queue_call(2, g, 100, 'abc')
t_after = time.time()
self.assertEqual(len(self.ev.queue), 3)
[(t1, f1, a1, k1), (t2, f2, a2, k2), (t3, f3, a3, k3)] = self.ev.queue
self.assertTrue(t1 < t2)
self.assertTrue(t2 < t3)
self.assertTrue(abs(t1 - (t_before + 1)) <= t_after - t_before)
self.assertTrue(abs(t2 - (t_before + 2)) <= t_after - t_before)
self.assertTrue(abs(t3 - (t_before + 3)) <= t_after - t_before)
self.assertEqual(f1, f)
self.assertEqual(f2, g)
self.assertEqual(f3, h)
self.assertEqual(a1, (42, 'hello'))
self.assertEqual(a2, (100, 'abc'))
self.assertEqual(a3, ())
self.assertEqual(k1, {'unused_a': 1, 'unused_b': 2})
self.assertEqual(k2, {})
self.assertEqual(k3, {'unused_c': 3, 'unused_d': 4})
# Delete queued events (they would fail or take a long time).
ev = eventloop.get_event_loop()
ev.queue = []
ev.rpcs = {}
def testFifoOrderForEventsWithDelayNone(self):
order = []
def foo(arg): order.append(arg)
eventloop.queue_call(None, foo, 2)
eventloop.queue_call(None, foo, 1)
eventloop.queue_call(0, foo, 0)
self.assertEqual(len(self.ev.current), 2)
self.assertEqual(len(self.ev.queue), 1)
[(_f1, a1, _k1), (_f2, a2, _k2)] = self.ev.current
self.assertEqual(a1, (2,)) # first event should have arg = 2
self.assertEqual(a2, (1,)) # second event should have arg = 1
(_t, _f, a, _k) = self.ev.queue[0]
self.assertEqual(a, (0,)) # third event should have arg = 0
eventloop.run()
# test that events are executed in FIFO order, not sort order
self.assertEqual(order, [2, 1, 0])
def testRun(self):
record = []
def foo(arg):
record.append(arg)
eventloop.queue_call(0.2, foo, 42)
eventloop.queue_call(0.1, foo, arg='hello')
eventloop.run()
self.assertEqual(record, ['hello', 42])
def testRunWithRpcs(self):
record = []
def foo(arg):
record.append(arg)
eventloop.queue_call(0.1, foo, 42)
config = datastore_rpc.Configuration(on_completion=foo)
rpc = self.conn.async_get(config, [])
if not isinstance(rpc, apiproxy_stub_map.UserRPC):
self.assertEqual(len(rpc.rpcs), 1)
rpc = rpc.rpcs[0]
eventloop.queue_rpc(rpc)
eventloop.run()
self.assertEqual(record, [rpc, 42])
self.assertEqual(rpc.state, 2) # TODO: Use apiproxy_rpc.RPC.FINISHING.
def testIdle(self):
counters = [0, 0, 0]
def idler1():
logging.info('idler1 running')
counters[0] += 1
return False
def idler2(a, b=None):
logging.info('idler2 running: a=%s, b=%s', a, b)
counters[1] += 1
return False
def idler3(k=None):
logging.info('idler3 running: k=%s', k)
counters[2] += 1
return None
self.ev.add_idle(idler1)
self.ev.add_idle(idler2, 10, 20)
eventloop.add_idle(idler3, k=42)
self.ev.run()
self.assertEqual(counters, [1, 1, 1])
self.ev.run()
self.assertEqual(counters, [2, 2, 1])
def testMultiRpcReadiness(self):
from . import key
k1 = key.Key('Foo', 1)
k2 = key.Key('Foo', 2)
r1 = self.conn.async_get(None, [k1])
r2 = self.conn.async_get(None, [k2])
rpc = datastore_rpc.MultiRpc([r1, r2])
r1.wait()
r2.wait()
calls = []
def callback():
calls.append(1)
eventloop.queue_rpc(rpc, callback)
eventloop.run()
self.assertEqual(calls, [1])
def testCleanUpStaleEvents(self):
# See issue 127. http://goo.gl/2p5Pn
from . import model
class M(model.Model): pass
M().put()
M().put()
M().put()
# The fetch_page() call leaves an unnecessary but unavoidable RPC
# around that is never waited for. This was causing problems when
# it was being garbage-collected in get_event_loop(), especially
# with Python 2.5, where GeneratorExit derived from Exception.
M.query().fetch_page(2)
ev = eventloop.get_event_loop()
self.assertEqual(len(ev.rpcs), 1)
del os.environ[eventloop._EVENT_LOOP_KEY]
ev = eventloop.get_event_loop() # A new event loop.
self.assertEqual(len(ev.rpcs), 0)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Tests for context.py."""
import logging
import random
import socket
import threading
import time
import unittest
from .google_imports import datastore_errors
from .google_imports import memcache
from .google_imports import taskqueue
from .google_imports import datastore_rpc
from .google_imports import apiproxy_errors
from .google_test_imports import datastore_stub_util
from . import context
from . import eventloop
from . import model
from . import query
from . import tasklets
from . import test_utils
# Return values for memcache_{set,add,replace,cas}.
STORED = True
NOT_STORED = False
class MyAutoBatcher(context.AutoBatcher):
_log = []
@classmethod
def reset_log(cls):
cls._log = []
def __init__(self, todo_tasklet, limit):
def wrap(todo, options):
self.__class__._log.append((todo_tasklet.__name__, todo))
return todo_tasklet(todo, options)
super(MyAutoBatcher, self).__init__(wrap, limit)
class ContextTests(test_utils.NDBTest):
def setUp(self):
super(ContextTests, self).setUp()
MyAutoBatcher.reset_log()
self.ctx = context.Context(
conn=model.make_connection(default_model=model.Expando),
auto_batcher_class=MyAutoBatcher)
tasklets.set_context(self.ctx)
the_module = context
def testContext_AutoBatcher_Get(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=['Foo', 1])
key2 = model.Key(flat=['Foo', 2])
key3 = model.Key(flat=['Foo', 3])
fut1 = self.ctx.get(key1)
fut2 = self.ctx.get(key2)
fut3 = self.ctx.get(key3)
ent1 = yield fut1
ent2 = yield fut2
ent3 = yield fut3
raise tasklets.Return([ent1, ent2, ent3])
ents = foo().get_result()
self.assertEqual(ents, [None, None, None])
log = MyAutoBatcher._log
self.assertEqual(len(log), 4)
name, todo = log[0]
self.assertEqual(name, '_memcache_get_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[1]
self.assertEqual(name, '_memcache_set_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[2]
self.assertEqual(name, '_memcache_get_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[3]
self.assertEqual(name, '_get_tasklet')
self.assertEqual(len(todo), 3)
@tasklets.tasklet
def create_entities(self):
key0 = model.Key(flat=['Foo', None])
ent1 = model.Model(key=key0)
ent2 = model.Model(key=key0)
ent3 = model.Model(key=key0)
fut1 = self.ctx.put(ent1)
fut2 = self.ctx.put(ent2)
fut3 = self.ctx.put(ent3)
key1 = yield fut1
key2 = yield fut2
key3 = yield fut3
raise tasklets.Return([key1, key2, key3])
def testContext_AutoBatcher_Put(self):
keys = self.create_entities().get_result()
self.assertEqual(len(keys), 3)
self.assertTrue(None not in keys)
log = MyAutoBatcher._log
self.assertEqual(len(log), 2)
name, todo = log[0]
self.assertEqual(name, '_put_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[1]
self.assertEqual(name, '_memcache_del_tasklet')
self.assertEqual(len(todo), 3)
def testContext_AutoBatcher_Delete(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=['Foo', 1])
key2 = model.Key(flat=['Foo', 2])
key3 = model.Key(flat=['Foo', 3])
fut1 = self.ctx.delete(key1)
fut2 = self.ctx.delete(key2)
fut3 = self.ctx.delete(key3)
yield fut1
yield fut2
yield fut3
foo().check_success()
self.assertEqual(len(MyAutoBatcher._log), 2)
name, todo = MyAutoBatcher._log[0]
self.assertEqual(name, '_memcache_set_tasklet')
self.assertEqual(len(todo), 3)
name, todo = MyAutoBatcher._log[1]
self.assertEqual(name, '_delete_tasklet')
self.assertEqual(len(todo), 3)
def testContext_AutoBatcher_Limit(self):
# Check that the default limit is taken from the connection.
self.assertEqual(self.ctx._get_batcher._limit,
datastore_rpc.Connection.MAX_GET_KEYS)
# Create a Connection with config options that will be overridden
# by later config options
conn_config = context.ContextOptions(max_put_entities=3,
max_memcache_items=7)
conn = model.make_connection(config=conn_config,
default_model=model.Expando)
real_config = context.ContextOptions(max_put_entities=25,
max_memcache_items=100)
self.ctx = context.Context(
conn=conn,
auto_batcher_class=MyAutoBatcher,
config=real_config)
@tasklets.tasklet
def foo():
es = [model.Model(key=model.Key('Foo', None)) for _ in range(49)]
fs = [self.ctx.put(e) for e in es]
self.ctx.flush()
ks = yield fs
self.assertEqual(len(ks), 49)
self.assertTrue(all(isinstance(k, model.Key) for k in ks))
foo().get_result()
self.assertEqual(len(MyAutoBatcher._log), 4)
for name, todo in MyAutoBatcher._log[2:]:
self.assertEqual(name, '_memcache_del_tasklet')
self.assertTrue(len(todo) in (24, 25))
for name, todo in MyAutoBatcher._log[:2]:
self.assertEqual(name, '_put_tasklet')
self.assertTrue(len(todo) in (24, 25))
def testContext_MultiRpc(self):
# This test really tests the proper handling of MultiRpc by
# queue_rpc() in eventloop.py. It's easier to test from here, and
# gives more assurance that it works.
config = datastore_rpc.Configuration(max_get_keys=3, max_put_entities=3)
self.ctx._conn = model.make_connection(config, default_model=model.Expando)
@tasklets.tasklet
def foo():
ents = [model.Expando() for _ in range(10)]
futs = [self.ctx.put(ent) for ent in ents]
keys = yield futs
futs = [self.ctx.get(key) for key in keys]
ents2 = yield futs
self.assertEqual(ents2, ents)
raise tasklets.Return(keys)
keys = foo().get_result()
self.assertEqual(len(keys), 10)
def testContext_Cache(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
key = yield self.ctx.put(ent1)
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
b = yield self.ctx.get(key1)
self.assertTrue(a is b)
yield self.ctx.delete(key1)
self.assertTrue(self.ctx._cache[key] is None) # Whitebox.
a = yield self.ctx.get(key1)
self.assertTrue(a is None)
self.ctx.clear_cache()
self.assertEqual(self.ctx._cache, {}) # Whitebox.
foo().check_success()
def testContext_CacheMemcache(self):
# Test that when get() finds the value in memcache, it updates
# _cache.
class Foo(model.Model):
pass
ctx = self.ctx
ctx.set_cache_policy(False)
ctx.set_memcache_policy(False)
ent = Foo()
key = ent.put()
mkey = ctx._memcache_prefix + key.urlsafe()
self.assertFalse(key in ctx._cache)
self.assertEqual(None, memcache.get(mkey))
ctx.set_memcache_policy(True)
key.get()
self.assertFalse(key in ctx._cache)
self.assertNotEqual(None, memcache.get(mkey))
eventloop.run()
ctx.set_cache_policy(True)
key.get() # Satisfied from memcache
self.assertTrue(key in ctx._cache)
def testContext_CacheMisses(self):
# Test that get() caches misses if use_datastore is true but not
# if false. This involves whitebox checks using ctx._cache.
# See issue 106. http://goo.gl/DLiij
ctx = self.ctx
key = model.Key('Foo', 42)
self.assertFalse(key in ctx._cache)
ctx.get(key, use_datastore=False).wait()
self.assertFalse(key in ctx._cache)
ctx.get(key, use_memcache=False).wait()
self.assertTrue(key in ctx._cache)
self.assertEqual(ctx._cache[key], None)
ctx.clear_cache()
ctx.get(key).wait()
self.assertTrue(key in ctx._cache)
self.assertEqual(ctx._cache[key], None)
def testContext_CachePolicy(self):
def should_cache(unused_key):
return False
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
key = yield self.ctx.put(ent1)
self.assertTrue(key1 not in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
b = yield self.ctx.get(key1)
self.assertTrue(a is not b)
yield self.ctx.delete(key1)
self.assertTrue(key not in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
self.assertTrue(a is None)
self.ctx.set_cache_policy(should_cache)
self.ctx.set_memcache_policy(False)
foo().check_success()
def testContext_CachePolicyDisabledLater(self):
# If the cache is disabled after an entity is stored in the cache,
# further get() attempts *must not* return the result stored in cache.
self.ctx.set_cache_policy(lambda unused_key: True)
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1)
self.ctx.put(ent1).get_result()
# get() uses cache
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
self.assertEqual(self.ctx.get(key1).get_result(), ent1)
# get() uses cache
self.ctx._cache[key1] = None # Whitebox.
self.assertEqual(self.ctx.get(key1).get_result(), None)
# get() doesn't use cache
self.ctx.set_cache_policy(lambda unused_key: False)
self.assertEqual(self.ctx.get(key1).get_result(), ent1)
def testContext_NamespaceBonanza(self):
# Test that memcache ops issued for datastore caching use the
# correct namespace.
def assertNone(expr):
self.assertTrue(expr is None, repr(expr))
def assertNotNone(expr):
self.assertTrue(expr is not None, repr(expr))
def assertLocked(expr):
self.assertTrue(expr is context._LOCKED, repr(expr))
def assertProtobuf(expr, ent):
self.assertEqual(expr,
ent._to_pb(set_key=False).SerializePartialToString())
class Foo(model.Model):
pass
k1 = model.Key(Foo, 1, namespace='a')
k2 = model.Key(Foo, 2, namespace='b')
mk1 = self.ctx._memcache_prefix + k1.urlsafe()
mk2 = self.ctx._memcache_prefix + k2.urlsafe()
e1 = Foo(key=k1)
e2 = Foo(key=k2)
self.ctx.set_cache_policy(False)
self.ctx.set_memcache_policy(True)
self.ctx.set_datastore_policy(False) # This will vary in subtests
# Test put with datastore policy off
k1 = self.ctx.put(e1).get_result()
k2 = self.ctx.put(e2).get_result()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a'
assertProtobuf(memcache.get(mk1, namespace='a'), e1)
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertProtobuf(memcache.get(mk2, namespace='b'), e2)
memcache.flush_all()
self.ctx.set_datastore_policy(True)
# Test put with datastore policy on
k1_fut = self.ctx.put(e1)
while not self.ctx._put_batcher._running:
eventloop.run0()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a', as _LOCKED
assertLocked(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
self.assertEqual(k1_fut.get_result(), k1)
# Have to test one at a time, otherwise _LOCKED value may not be set
k2_fut = self.ctx.put(e2)
while not self.ctx._put_batcher._running:
eventloop.run0()
# Only k2 is found in namespace 'b', as _LOCKED
assertNone(memcache.get(mk1, namespace='b'))
assertLocked(memcache.get(mk2, namespace='b'))
# Keys should be identical
self.assertEqual(k2_fut.get_result(), k2)
memcache.flush_all()
# Test get with cold cache
e1 = self.ctx.get(k1).get_result()
e2 = self.ctx.get(k2).get_result()
eventloop.run() # Wait for memcache RPCs to run
# Neither is found in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a'
assertProtobuf(memcache.get(mk1, namespace='a'), e1)
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertProtobuf(memcache.get(mk2, namespace='b'), e2)
self.ctx.set_datastore_policy(False)
# Test get with warm cache
self.ctx.get(k1).get_result()
self.ctx.get(k2).get_result()
eventloop.run() # Wait for memcache RPCs to run
# Neither is found in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a'
assertNotNone(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertNotNone(memcache.get(mk2, namespace='b'))
self.ctx.set_datastore_policy(True)
# Test delete
self.ctx.delete(k1).check_success()
self.ctx.delete(k2).check_success()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a', as _LOCKED
assertLocked(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b', as _LOCKED
assertNone(memcache.get(mk1, namespace='b'))
assertLocked(memcache.get(mk2, namespace='b'))
memcache.flush_all()
# Test _clear_memcache (it deletes the keys)
self.ctx._clear_memcache([k1, k2]).check_success()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Nothing should be in namespace 'a'
assertNone(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
# Nothing should be in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertNone(memcache.get(mk2, namespace='b'))
def testContext_Memcache(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
key2 = model.Key(flat=('Foo', 2))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
self.ctx.set_memcache_policy(False) # Disable writing _LOCKED
k1, k2 = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.ctx.set_memcache_policy(True)
self.assertEqual(k1, key1)
self.assertEqual(k2, key2)
# Write to memcache.
yield (self.ctx.get(k1, use_cache=False),
self.ctx.get(k2, use_cache=False))
eventloop.run() # Let other tasklet complete.
keys = [k1.urlsafe(), k2.urlsafe()]
results = memcache.get_multi(keys, key_prefix=self.ctx._memcache_prefix)
self.assertEqual(
results,
{key1.urlsafe(): ent1._to_pb(set_key=False).SerializePartialToString(),
key2.urlsafe(): ent2._to_pb(set_key=False).SerializePartialToString(),
})
foo().check_success()
def testContext_MemcachePolicy(self):
badkeys = []
def tracking_add_async(*args, **kwds):
try:
res = save_add_async(*args, **kwds)
if badkeys and not res:
res = badkeys
track.append((args, kwds, res, None))
return res
except Exception, err:
track.append((args, kwds, None, err))
raise
@tasklets.tasklet
def foo():
k1, k2 = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.assertEqual(k1, key1)
self.assertEqual(k2, key2)
# Write to memcache.
yield (self.ctx.get(k1, use_cache=False),
self.ctx.get(k2, use_cache=False))
eventloop.run() # Let other tasklet complete.
key1 = model.Key('Foo', 1)
key2 = model.Key('Foo', 2)
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
save_add_multi_async = self.ctx._memcache.add_multi_async
try:
self.ctx._memcache.add_multi_async = tracking_add_multi_async
yield self.ctx._memcache.flush_all_async()
track = []
foo().check_success()
self.assertEqual(len(track), 1)
self.assertEqual(track[0][0],
({key1.urlsafe(): ent1._to_pb(),
key2.urlsafe(): ent2._to_pb()},))
self.assertEqual(track[0][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 0})
yield self.ctx._memcache.flush_all_async()
track = []
self.ctx.set_memcache_policy(lambda unused_key: False)
foo().check_success()
self.assertEqual(len(track), 0)
yield self.ctx._memcache.flush_all_async()
track = []
self.ctx.set_memcache_policy(lambda key: key == key1)
foo().check_success()
self.assertEqual(len(track), 1)
self.assertEqual(track[0][0],
({key1.urlsafe(): ent1._to_pb()},))
self.assertEqual(track[0][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 0})
yield self.ctx._memcache.flush_all_async()
track = []
self.ctx.set_memcache_policy(lambda unused_key: True)
self.ctx.set_memcache_timeout_policy(lambda key: key.id())
foo().check_success()
self.assertEqual(len(track), 2)
self.assertEqual(track[0][0],
({key1.urlsafe(): ent1._to_pb()},))
self.assertEqual(track[0][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 1})
self.assertEqual(track[1][0],
({key2.urlsafe(): ent2._to_pb()},))
self.assertEqual(track[1][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 2})
yield self.ctx._memcache.flush_all_async()
track = []
badkeys = [key2.urlsafe()]
self.ctx.set_memcache_timeout_policy(lambda unused_key: 0)
foo().check_success()
self.assertEqual(len(track), 1)
self.assertEqual(track[0][2], badkeys)
yield self.ctx._memcache.flush_all_async()
finally:
self.ctx._memcache.add_multi_async = save_add_multi_async
def testContext_CacheQuery(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
key2 = model.Key(flat=('Foo', 2))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
key1a, key2a = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
self.assertTrue(key2 in self.ctx._cache) # Whitebox.
self.assertEqual(key1, key1a)
self.assertEqual(key2, key2a)
@tasklets.tasklet
def callback(ent):
return ent
qry = query.Query(kind='Foo')
results = yield self.ctx.map_query(qry, callback)
self.assertEqual(results, [ent1, ent2])
self.assertTrue(results[0] is self.ctx._cache[ent1.key])
self.assertTrue(results[1] is self.ctx._cache[ent2.key])
foo().check_success()
def testContext_AllocateIds(self):
@tasklets.tasklet
def foo():
key = model.Key(flat=('Foo', 1))
lo_hi = yield self.ctx.allocate_ids(key, size=10)
self.assertEqual(lo_hi, (1, 10))
lo_hi = yield self.ctx.allocate_ids(key, max=20)
self.assertEqual(lo_hi, (11, 20))
foo().check_success()
def testContext_MapQuery(self):
@tasklets.tasklet
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([1, 2, 3]))
def testContext_MapQuery_NoCallback(self):
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, None)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(len(res), 3)
for i, ent in enumerate(res):
self.assertTrue(isinstance(ent, model.Model))
self.assertEqual(ent.key.flat(), ('Foo', i + 1))
def testContext_MapQuery_NonTaskletCallback(self):
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(res, [1, 2, 3])
def testContext_MapQuery_CustomFuture(self):
mfut = tasklets.QueueFuture()
@tasklets.tasklet
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, merge_future=mfut)
self.assertEqual(res, None)
vals = set()
for _ in range(3):
val = yield mfut.getq()
vals.add(val)
fail = mfut.getq()
self.assertRaises(EOFError, fail.get_result)
raise tasklets.Return(vals)
res = foo().get_result()
self.assertEqual(res, set([1, 2, 3]))
def testContext_MapQuery_KeysOnly(self):
qo = query.QueryOptions(keys_only=True)
@tasklets.tasklet
def callback(key):
return key.pairs()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, options=qo)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([('Foo', 1), ('Foo', 2), ('Foo', 3)]))
def testContext_MapQuery_Cursors(self):
qo = query.QueryOptions(produce_cursors=True)
@tasklets.tasklet
def callback(ent):
return ent.key.pairs()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, options=qo)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([('Foo', 1), ('Foo', 2), ('Foo', 3)]))
def testContext_IterQuery(self):
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
it = self.ctx.iter_query(qry)
res = []
while True:
try:
ent = yield it.getq()
except EOFError:
break
res.append(ent)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(len(res), 3)
for i, ent in enumerate(res):
self.assertTrue(isinstance(ent, model.Model))
self.assertEqual(ent.key.flat(), ('Foo', i + 1))
def testContext_TransactionFailed(self):
@tasklets.tasklet
def foo():
key = model.Key(flat=('Foo', 1))
ent = model.Expando(key=key, bar=1)
yield self.ctx.put(ent)
@tasklets.tasklet
def callback():
ctx = tasklets.get_context()
self.assertTrue(key not in ctx._cache) # Whitebox.
e = yield key.get_async()
self.assertTrue(key in ctx._cache) # Whitebox.
e.bar = 2
yield e.put_async()
yield self.ctx.transaction(callback)
self.assertEqual(self.ctx._cache[key].bar, 2)
foo().check_success()
def testContext_TransactionException(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
yield ent.put_async()
raise Exception('foo')
yield self.ctx.transaction(callback)
self.assertRaises(Exception, foo().check_success)
self.assertEqual(key.get(), None)
def testContext_TransactionRollback(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
yield ent.put_async()
raise model.Rollback()
yield self.ctx.transaction(callback)
foo().check_success()
self.assertEqual(key.get(), None)
def testContext_TransactionAddTask(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
ctx = tasklets.get_context()
yield ctx.put(ent)
taskqueue.add(url='/', transactional=True)
yield self.ctx.transaction(callback)
foo().check_success()
def testContext_TransactionXG(self):
self.ExpectWarnings()
# The XG option only works on the HRD datastore
ds_stub = self.testbed.get_stub('datastore_v3')
hrd_policy = datastore_stub_util.BaseHighReplicationConsistencyPolicy()
ds_stub.SetConsistencyPolicy(hrd_policy)
key1 = model.Key('Foo', 1)
key2 = model.Key('Foo', 2)
@tasklets.tasklet
def tx():
ctx = tasklets.get_context()
ent1 = model.Expando(key=key1, foo=1)
ent2 = model.Expando(key=key2, bar=2)
yield ctx.put(ent1), ctx.put(ent2)
raise tasklets.Return(42)
self.assertRaises(datastore_errors.BadRequestError,
self.ctx.transaction(tx).check_success)
res = self.ctx.transaction(tx, xg=True).get_result()
self.assertEqual(res, 42)
def testContext_TransactionMemcache(self):
class Foo(model.Model):
name = model.StringProperty()
foo1 = Foo(name='foo1')
foo2 = Foo(name='foo2')
key1 = foo1.put()
key2 = foo2.put()
skey1 = self.ctx._memcache_prefix + key1.urlsafe()
skey2 = self.ctx._memcache_prefix + key2.urlsafe()
# Be sure nothing is in memcache.
self.assertEqual(memcache.get(skey1), None)
self.assertEqual(memcache.get(skey2), None)
# Be sure nothing is in the context cache.
self.ctx.clear_cache()
# Run some code in a transaction.
def txn():
ctx = tasklets.get_context()
self.assertTrue(ctx is not self.ctx)
f1 = key1.get()
f2 = key1.get()
f1.name += 'a'
f1.put()
# Don't put f2.
# Verify the state of memcache.
self.assertEqual(memcache.get(skey1), context._LOCKED)
self.assertEqual(memcache.get(skey2), None)
self.ctx.transaction(txn).wait()
# Verify memcache is cleared.
self.assertEqual(memcache.get(skey1), None)
self.assertEqual(memcache.get(skey2), None)
# Clear the context cache.
self.ctx.clear_cache()
# Non-transactional get() updates memcache.
f1 = key1.get()
f2 = key2.get()
eventloop.run() # Wait for memcache.set() RPCs
self.assertNotEqual(memcache.get(skey1), None)
self.assertNotEqual(memcache.get(skey2), None)
def testDefaultContextTransaction(self):
@tasklets.synctasklet
def outer():
ctx1 = tasklets.get_context()
@tasklets.tasklet
def inner():
ctx2 = tasklets.get_context()
self.assertTrue(ctx1 is not ctx2)
self.assertTrue(isinstance(ctx2._conn,
datastore_rpc.TransactionalConnection))
return 42
a = yield tasklets.get_context().transaction(inner)
ctx1a = tasklets.get_context()
self.assertTrue(ctx1 is ctx1a)
raise tasklets.Return(a)
b = outer()
self.assertEqual(b, 42)
def testExplicitTransactionClearsDefaultContext(self):
old_ctx = tasklets.get_context()
@tasklets.synctasklet
def outer():
ctx1 = tasklets.get_context()
@tasklets.tasklet
def inner():
ctx = tasklets.get_context()
self.assertTrue(ctx is not ctx1)
key = model.Key('Account', 1)
ent = yield key.get_async()
self.assertTrue(tasklets.get_context() is ctx)
self.assertTrue(ent is None)
raise tasklets.Return(42)
fut = ctx1.transaction(inner)
self.assertEqual(tasklets.get_context(), ctx1)
val = yield fut
self.assertEqual(tasklets.get_context(), ctx1)
raise tasklets.Return(val)
val = outer()
self.assertEqual(val, 42)
self.assertTrue(tasklets.get_context() is old_ctx)
def testKindError(self):
self.ExpectWarnings()
ctx = context.Context()
# If the cache is enabled, attempts to retrieve the object we just put will
# be satisfied from the cache, so the adapter we're testing will never get
# called.
ctx.set_cache_policy(lambda unused_key: False)
@tasklets.tasklet
def foo():
# Foo class is declared in query_test, so let's get a unusual class name.
key1 = model.Key(flat=('ThisModelClassDoesntExist', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
yield ctx.put(ent1)
yield ctx.get(key1)
self.assertRaises(model.KindError, foo().check_success)
def testMemcachePolicy(self):
# Bug reported by Jack Hebert.
class P(model.Model): pass
class Q(model.Model): pass
def policy(key): return key.kind() != 'P'
self.ctx.set_cache_policy(policy)
self.ctx.set_memcache_policy(policy)
k1 = model.Key(P, 1)
k2 = model.Key(Q, 1)
f1 = self.ctx.get(k1)
f2 = self.ctx.get(k2)
self.assertTrue(f1.get_result() is None)
self.assertTrue(f2.get_result() is None)
def testMemcacheDeleteThenGet(self):
# Test that memcache is written synchronously when datastore policy is off.
self.ctx.set_cache_policy(False)
self.ctx.set_datastore_policy(False)
self.ctx.set_memcache_policy(True)
class EmptyModel(model.Model):
pass
key = model.Key(EmptyModel, 1)
# Delete the key (just to be sure).
del_fut = self.ctx.delete(key)
del_fut.get_result()
# Create and store a new model instance using the key we just deleted.
# Because datastore policy is off, this attempts to write it to memcache.
EmptyModel(key=key).put()
# Verify that it is now in memcache.
get_fut = self.ctx.get(key)
ent = get_fut.get_result()
self.assertTrue(ent is not None,
'Memcache delete did block memcache set %r' % ent)
def testMemcacheAPI(self):
self.ExpectErrors()
@tasklets.tasklet
def foo():
ctx = tasklets.get_context()
k1 = 'k1'
k2 = 'k2'
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, [None, None])
v1 = '24'
v2 = 42
vv = yield ctx.memcache_set(k1, v1), ctx.memcache_set(k2, v2)
self.assertEqual(vv, [STORED, STORED])
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, [v1, v2])
vv = yield ctx.memcache_incr(k1), ctx.memcache_decr(k2)
self.assertEqual(vv, [25, 41])
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, ['25', 41])
vv = yield ctx.memcache_incr(k1, -1), ctx.memcache_decr(k2, -1)
self.assertEqual(vv, [24, 42])
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, [v1, v2])
vv = yield ctx.memcache_add(k1, 'a'), ctx.memcache_add(k2, 'b')
self.assertEqual(vv, [NOT_STORED, NOT_STORED])
vv = yield ctx.memcache_replace(k1, 'a'), ctx.memcache_replace(k2, 'b')
self.assertEqual(vv, [STORED, STORED])
vv = yield ctx.memcache_delete(k1), ctx.memcache_delete(k2)
self.assertEqual(vv, [memcache.DELETE_SUCCESSFUL,
memcache.DELETE_SUCCESSFUL])
vv = yield ctx.memcache_delete(k1), ctx.memcache_delete(k2)
self.assertEqual(vv, [memcache.DELETE_ITEM_MISSING,
memcache.DELETE_ITEM_MISSING])
vv = yield ctx.memcache_incr(k1), ctx.memcache_decr(k2)
self.assertEqual(vv, [None, None])
vv = yield ctx.memcache_replace(k1, 'a'), ctx.memcache_replace(k2, 'b')
self.assertEqual(vv, [NOT_STORED, NOT_STORED])
vv = yield ctx.memcache_add(k1, 'a'), ctx.memcache_add(k2, 'b')
self.assertEqual(vv, [STORED, STORED])
logging.warn('Following two errors are expected:')
vv = yield ctx.memcache_incr(k1), ctx.memcache_decr(k2)
self.assertEqual(vv, [None, None])
foo().get_result()
def testMemcacheCAS(self):
@tasklets.tasklet
def foo():
c1 = context.Context()
c2 = context.Context()
k1 = 'k1'
k2 = 'k2'
yield c1.memcache_set(k1, 'a'), c1.memcache_set(k2, 'b')
vv = yield c2.memcache_get(k1), c2.memcache_get(k2)
self.assertEqual(vv, ['a', 'b'])
vv = yield c1.memcache_gets(k1), c1.memcache_get(k2, for_cas=True)
self.assertEqual(vv, ['a', 'b'])
ffff = [c1.memcache_cas(k1, 'x'), c1.memcache_cas(k2, 'y'),
c2.memcache_cas(k1, 'p'), c2.memcache_cas(k2, 'q')]
vvvv = yield ffff
self.assertEqual(vvvv, [STORED, STORED, NOT_STORED, NOT_STORED])
foo().get_result()
def testMemcacheErrors(self):
# See issue 94. http://goo.gl/E7OBH
# Install an error handler.
save_create_rpc = memcache.create_rpc
def fake_check_success(*args):
raise apiproxy_errors.Error('fake error')
def fake_create_rpc(*args, **kwds):
rpc = save_create_rpc(*args, **kwds)
rpc.check_success = fake_check_success
return rpc
try:
memcache.create_rpc = fake_create_rpc
val = self.ctx.memcache_get('key2').get_result()
self.assertEqual(val, None)
val = self.ctx.memcache_incr('key2').get_result()
self.assertEqual(val, None)
ok = self.ctx.memcache_set('key2', 'value2').get_result()
self.assertFalse(ok)
ok = self.ctx.memcache_delete('key2').get_result()
self.assertEqual(ok, memcache.DELETE_NETWORK_FAILURE)
finally:
memcache.create_rpc = save_create_rpc
def testMemcacheNamespaces(self):
@tasklets.tasklet
def foo():
k1 = 'k1'
k2 = 'k2'
ns = 'ns'
# Write two values in the namespace
s1, s2 = yield (self.ctx.memcache_set(k1, 42, namespace=ns),
self.ctx.memcache_add(k2, 100, namespace=ns))
self.assertEqual(s1, STORED)
self.assertEqual(s2, STORED)
# Check that they aren't in the default namespace
v1n, v2n = yield (self.ctx.memcache_get(k1),
self.ctx.memcache_get(k2))
self.assertEqual(v1n, None)
self.assertEqual(v2n, None)
# Read them back using get and gets
v1, v2 = yield (self.ctx.memcache_get(k1, namespace=ns),
self.ctx.memcache_gets(k2, namespace=ns))
self.assertEqual(v1, 42)
self.assertEqual(v2, 100)
# Write v1+1 back using replace, v2+1 using cas
s1, s2 = yield (self.ctx.memcache_replace(k1, v1 + 1, namespace=ns),
self.ctx.memcache_cas(k2, v2 + 1, namespace=ns))
self.assertEqual(s1, STORED)
self.assertEqual(s2, STORED)
# Apply incr/decr to both
v1, v2 = yield (self.ctx.memcache_incr(k1, delta=10, namespace=ns),
self.ctx.memcache_decr(k2, delta=10, namespace=ns))
self.assertEqual(v1, 53) # 42 + 1 + 10
self.assertEqual(v2, 91) # 100 + 1 - 100
# Delete both
s1, s2 = yield (self.ctx.memcache_delete(k1, namespace=ns),
self.ctx.memcache_delete(k2, namespace=ns))
self.assertEqual(s1, memcache.DELETE_SUCCESSFUL)
self.assertEqual(s2, memcache.DELETE_SUCCESSFUL)
foo().check_success()
def testMemcacheLocking(self):
# See issue 66. http://goo.gl/ANBns
self.ctx.set_cache_policy(False)
# Prepare: write some entity using Context.put().
class EmptyModel(model.Model):
pass
key = model.Key(EmptyModel, 1)
mkey = self.ctx._memcache_prefix + key.urlsafe()
ent = EmptyModel(key=key)
put_fut = self.ctx.put(ent)
eventloop.run0()
self.assertTrue(self.ctx._memcache_set_batcher._queues)
eventloop.run0()
self.assertTrue(self.ctx._memcache_set_batcher._running)
while self.ctx._memcache_set_batcher._running:
eventloop.run0()
# Verify that memcache now contains the special _LOCKED value.
val = memcache.get(mkey)
self.assertEqual(val, context._LOCKED)
put_fut.check_success()
# Verify that memcache _LOCKED value has been removed..
val = memcache.get(mkey)
self.assertEqual(val, None)
def testMemcacheDefaultNamespaceBatching(self):
self.ctx.set_datastore_policy(False)
key = model.Key('Foo', 1)
keyfut = key.get_async()
mfut = self.ctx.memcache_get('bar')
keyfut.check_success()
mfut.check_success()
log = MyAutoBatcher._log
self.assertEqual(len(log), 1, log)
def testAsyncInTransaction(self):
# See issue 81. http://goo.gl/F097l
class Bar(model.Model):
name = model.StringProperty()
bar = Bar(id='bar', name='bar')
bar.put()
@tasklets.tasklet
def trans():
bar = Bar.get_by_id('bar')
bar.name = 'updated-bar'
bar.put_async() # PROBLEM IS HERE, with yield it properly works
model.transaction_async(trans).get_result()
bar = bar.key.get()
self.assertEqual(bar.name, 'updated-bar')
def testMemcacheProtobufEncoding(self):
# Test that when memcache is used implicitly, it stores encoded
# protobufs, not pickled ones.
class Employee(model.Model):
_use_cache = False
e = Employee()
k = e.put(use_memcache=False)
k.get(use_memcache=True)
eventloop.run()
ks = self.ctx._memcache_prefix + k.urlsafe()
v = memcache.get(ks)
self.assertTrue(isinstance(v, str))
def testCorruptMemcache(self):
# Check that corrupt memcache entries silently fail.
self.ExpectWarnings()
self.ctx.set_cache_policy(False)
# Create a simple entity/key
class EmptyModel(model.Model):
pass
ent = EmptyModel()
key = ent.put()
# Prime memcache
key.get()
eventloop.run()
# Sanity check that memcache is primed
mkey = self.ctx._memcache_prefix + key.urlsafe()
self.assertEqual(memcache.get(mkey),
ent._to_pb(set_key=False).SerializePartialToString())
# Inject a corrupt memcache value
memcache.set(mkey, 'booby trap')
# Check that ndb ignores the corrupt memcache value
self.assertEqual(ent, key.get())
def start_test_server(self):
host = '127.0.0.1'
lock = threading.Lock()
lock.acquire()
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
for i in range(10):
port = random.randrange(32768, 60000)
try:
s.bind((host, port))
break
except socket.error:
continue
else:
self.fail('Could not find an unused port in 10 tries')
def run():
s.listen(1)
lock.release() # Signal socket is all set up.
c, addr = s.accept()
s.close()
c.recv(1000) # Throw away request.
c.send('HTTP/1.0 200 Ok\r\n\r\n') # Emptiest response.
c.close()
t = threading.Thread(target=run)
t.setDaemon(True)
t.start()
return host, port, lock
def testUrlFetch(self):
self.testbed.init_urlfetch_stub()
host, port, lock = self.start_test_server()
# Block until socket is set up, or 5 seconds have passed.
for i in xrange(500):
if lock.acquire(False):
break
time.sleep(0.01)
else:
self.fail('Socket was not ready in 5 seconds')
fut = self.ctx.urlfetch('http://%s:%d' % (host, port))
result = fut.get_result()
self.assertEqual(result.status_code, 200)
self.assertTrue(isinstance(result.content, str))
class ContextFutureCachingTests(test_utils.NDBTest):
# See issue 62. http://goo.gl/5zLkK
def setUp(self):
super(ContextFutureCachingTests, self).setUp()
MyAutoBatcher.reset_log()
config = context.ContextOptions(max_get_keys=1, max_memcache_items=1)
self.ctx = context.Context(
conn=model.make_connection(default_model=model.Expando),
auto_batcher_class=MyAutoBatcher, config=config)
self.ctx.set_cache_policy(False)
tasklets.set_context(self.ctx)
def testGetFutureCachingOn(self):
self.ctx.set_memcache_policy(False)
class EmptyModel(model.Model):
pass
key = EmptyModel().put()
MyAutoBatcher.reset_log() # TODO Find out why put calls get_tasklet
self.ctx.set_cache_policy(True)
f1, f2 = self.ctx.get(key), self.ctx.get(key)
self.assertFalse(f1 is f2, 'Context get futures are being cached, '
'instead of tasklets.')
e1, e2 = f1.get_result(), f2.get_result()
self.assertTrue(e1 is e2, 'Results of concurrent gets are not the same '
'with future caching on.')
self.assertEqual(len(self.ctx._get_batcher._log), 1)
self.assertFalse(f1 is self.ctx.get(key), 'Future cache persisted.')
def testGetFutureCachingOff(self):
self.ctx.set_memcache_policy(False)
class EmptyModel(model.Model):
pass
key = EmptyModel().put()
MyAutoBatcher.reset_log() # TODO Find out why put calls get_tasklet
f1, f2 = self.ctx.get(key), self.ctx.get(key)
self.assertFalse(f1 is f2, 'Context get futures are being cached '
'with future caching off.')
e1, e2 = f1.get_result(), f2.get_result()
self.assertTrue(e1 is not e2, 'Results of concurrent gets are the same '
'with future caching off.')
self.assertEqual(len(self.ctx._get_batcher._log), 2)
def testMemcacheGetFutureCaching(self):
key = 'foo'
f1 = self.ctx.memcache_get(key, use_cache=True)
f2 = self.ctx.memcache_get(key, use_cache=True)
self.assertTrue(f1 is f2,
'Context memcache get futures are not cached.')
f3 = self.ctx.memcache_get(key)
self.assertFalse(f1 is f3,
'Context memcache get futures are cached by default.')
f1.check_success()
f4 = self.ctx.memcache_get(key, use_cache=True)
self.assertFalse(f1 is f4,
'Context memcache get future cached after result known.')
def testMemcacheSetFutureCaching(self):
key = 'foo'
value = 'bar'
f1 = self.ctx.memcache_set(key, value, use_cache=True)
f2 = self.ctx.memcache_set(key, value, use_cache=True)
self.assertTrue(f1 is f2,
'Context memcache get futures are not cached.')
f3 = self.ctx.memcache_set(key, value)
self.assertFalse(f1 is f3,
'Context memcache get futures are cached by default.')
f1.check_success()
f4 = self.ctx.memcache_set(key, value, use_cache=True)
self.assertFalse(f1 is f4,
'Context memcache get future cached after result known.')
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""A tasklet decorator.
Tasklets are a way to write concurrently running functions without
threads; tasklets are executed by an event loop and can suspend
themselves blocking for I/O or some other operation using a yield
statement. The notion of a blocking operation is abstracted into the
Future class, but a tasklet may also yield an RPC in order to wait for
that RPC to complete.
The @tasklet decorator wraps generator function so that when it is
called, a Future is returned while the generator is executed by the
event loop. For example:
@tasklet
def foo():
a = yield <some Future>
c = yield <another Future>
raise Return(a + b)
def main():
f = foo()
x = f.get_result()
print x
Note that blocking until the Future's result is available using
get_result() is somewhat inefficient (though not vastly -- it is not
busy-waiting). In most cases such code should be rewritten as a tasklet
instead:
@tasklet
def main_tasklet():
f = foo()
x = yield f
print x
Calling a tasklet automatically schedules it with the event loop:
def main():
f = main_tasklet()
eventloop.run() # Run until no tasklets left to do
f.done() # Returns True
As a special feature, if the wrapped function is not a generator
function, its return value is returned via the Future. This makes the
following two equivalent:
@tasklet
def foo():
return 42
@tasklet
def foo():
if False: yield # The presence of 'yield' makes foo a generator
raise Return(42) # Or, after PEP 380, return 42
This feature (inspired by Monocle) is handy in case you are
implementing an interface that expects tasklets but you have no need to
suspend -- there's no need to insert a dummy yield in order to make
the tasklet into a generator.
"""
import collections
import logging
import os
import sys
import types
from .google_imports import apiproxy_stub_map
from .google_imports import apiproxy_rpc
from .google_imports import datastore_rpc
from . import eventloop
from . import utils
__all__ = ['Return', 'tasklet', 'synctasklet', 'toplevel', 'sleep',
'add_flow_exception', 'get_return_value',
'get_context', 'set_context',
'make_default_context', 'make_context',
'Future', 'MultiFuture', 'QueueFuture', 'SerialQueueFuture',
'ReducingFuture',
]
_logging_debug = utils.logging_debug
def _is_generator(obj):
"""Helper to test for a generator object.
NOTE: This tests for the (iterable) object returned by calling a
generator function, not for a generator function.
"""
return isinstance(obj, types.GeneratorType)
class _State(utils.threading_local):
"""Hold thread-local state."""
current_context = None
def __init__(self):
super(_State, self).__init__()
self.all_pending = set()
def add_pending(self, fut):
_logging_debug('all_pending: add %s', fut)
self.all_pending.add(fut)
def remove_pending(self, fut, status='success'):
if fut in self.all_pending:
_logging_debug('all_pending: %s: remove %s', status, fut)
self.all_pending.remove(fut)
else:
_logging_debug('all_pending: %s: not found %s', status, fut)
def clear_all_pending(self):
if self.all_pending:
logging.info('all_pending: clear %s', self.all_pending)
self.all_pending.clear()
else:
_logging_debug('all_pending: clear no-op')
def dump_all_pending(self, verbose=False):
pending = []
for fut in self.all_pending:
if verbose:
line = fut.dump() + ('\n' + '-'*40)
else:
line = fut.dump_stack()
pending.append(line)
return '\n'.join(pending)
_state = _State()
# Tuple of exceptions that should not be logged (except in debug mode).
_flow_exceptions = ()
def add_flow_exception(exc):
"""Add an exception that should not be logged.
The argument must be a subclass of Exception.
"""
global _flow_exceptions
if not isinstance(exc, type) or not issubclass(exc, Exception):
raise TypeError('Expected an Exception subclass, got %r' % (exc,))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set)
def _init_flow_exceptions():
"""Internal helper to initialize _flow_exceptions.
This automatically adds webob.exc.HTTPException, if it can be imported.
"""
global _flow_exceptions
_flow_exceptions = ()
try:
from webob import exc
except ImportError:
pass
else:
add_flow_exception(exc.HTTPException)
_init_flow_exceptions()
class Future(object):
"""A Future has 0 or more callbacks.
The callbacks will be called when the result is ready.
NOTE: This is somewhat inspired but not conformant to the Future interface
defined by PEP 3148. It is also inspired (and tries to be somewhat
compatible with) the App Engine specific UserRPC and MultiRpc classes.
"""
# TODO: Trim the API; there are too many ways to do the same thing.
# TODO: Compare to Monocle's much simpler Callback class.
# Constants for state property.
IDLE = apiproxy_rpc.RPC.IDLE # Not yet running (unused)
RUNNING = apiproxy_rpc.RPC.RUNNING # Not yet completed.
FINISHING = apiproxy_rpc.RPC.FINISHING # Completed.
# XXX Add docstrings to all methods. Separate PEP 3148 API from RPC API.
_geninfo = None # Extra info about suspended generator.
def __init__(self, info=None):
# TODO: Make done a method, to match PEP 3148?
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._info = info # Info from the caller about this Future's purpose.
self._where = utils.get_stack()
self._context = None
self._reset()
def _reset(self):
self._done = False
self._result = None
self._exception = None
self._traceback = None
self._callbacks = []
self._immediate_callbacks = []
_state.add_pending(self)
self._next = None # Links suspended Futures together in a stack.
# TODO: Add a __del__ that complains if neither get_exception() nor
# check_success() was ever called? What if it's not even done?
def __repr__(self):
if self._done:
if self._exception is not None:
state = 'exception %s: %s' % (self._exception.__class__.__name__,
self._exception)
else:
state = 'result %r' % (self._result,)
else:
state = 'pending'
line = '?'
for line in self._where:
if 'tasklets.py' not in line:
break
if self._info:
line += ' for %s' % self._info
if self._geninfo:
line += ' %s' % self._geninfo
return '<%s %x created by %s; %s>' % (
self.__class__.__name__, id(self), line, state)
def dump(self):
return '%s\nCreated by %s' % (self.dump_stack(),
'\n called by '.join(self._where))
def dump_stack(self):
lines = []
fut = self
while fut is not None:
lines.append(str(fut))
fut = fut._next
return '\n waiting for '.join(lines)
def add_callback(self, callback, *args, **kwds):
if self._done:
eventloop.queue_call(None, callback, *args, **kwds)
else:
self._callbacks.append((callback, args, kwds))
def add_immediate_callback(self, callback, *args, **kwds):
if self._done:
callback(*args, **kwds)
else:
self._immediate_callbacks.append((callback, args, kwds))
def set_result(self, result):
if self._done:
raise RuntimeError('Result cannot be set twice.')
self._result = result
self._done = True
_state.remove_pending(self)
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def set_exception(self, exc, tb=None):
if not isinstance(exc, BaseException):
raise TypeError('exc must be an Exception; received %r' % exc)
if self._done:
raise RuntimeError('Exception cannot be set twice.')
self._exception = exc
self._traceback = tb
self._done = True
_state.remove_pending(self, status='fail')
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def done(self):
return self._done
@property
def state(self):
# This is just for compatibility with UserRPC and MultiRpc.
# A Future is considered running as soon as it is created.
if self._done:
return self.FINISHING
else:
return self.RUNNING
def wait(self):
if self._done:
return
ev = eventloop.get_event_loop()
while not self._done:
if not ev.run1():
logging.info('Deadlock in %s', self)
logging.info('All pending Futures:\n%s', _state.dump_all_pending())
_logging_debug('All pending Futures (verbose):\n%s',
_state.dump_all_pending(verbose=True))
self.set_exception(RuntimeError('Deadlock waiting for %s' % self))
def get_exception(self):
self.wait()
return self._exception
def get_traceback(self):
self.wait()
return self._traceback
def check_success(self):
self.wait()
if self._exception is not None:
raise self._exception.__class__, self._exception, self._traceback
def get_result(self):
self.check_success()
return self._result
# TODO: Have a tasklet that does this
@classmethod
def wait_any(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
for f in waiting_on:
if f.state == cls.FINISHING:
return f
ev.run1()
return None
# TODO: Have a tasklet that does this
@classmethod
def wait_all(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
waiting_on = set(f for f in waiting_on if f.state == cls.RUNNING)
ev.run1()
def _help_tasklet_along(self, gen, val=None, exc=None, tb=None):
# XXX Docstring
info = utils.gen_info(gen)
__ndb_debug__ = info
try:
save_context = get_context()
try:
set_context(self._context)
if exc is not None:
_logging_debug('Throwing %s(%s) into %s',
exc.__class__.__name__, exc, info)
value = gen.throw(exc.__class__, exc, tb)
else:
_logging_debug('Sending %r to %s', val, info)
value = gen.send(val)
self._context = get_context()
finally:
set_context(save_context)
except StopIteration, err:
result = get_return_value(err)
_logging_debug('%s returned %r', info, result)
self.set_result(result)
return
except GeneratorExit:
# In Python 2.5, this derives from Exception, but we don't want
# to handle it like other Exception instances. So we catch and
# re-raise it immediately. See issue 127. http://goo.gl/2p5Pn
# TODO: Remove when Python 2.5 is no longer supported.
raise
except Exception, err:
_, _, tb = sys.exc_info()
if isinstance(err, _flow_exceptions):
# Flow exceptions aren't logged except in "heavy debug" mode,
# and then only at DEBUG level, without a traceback.
_logging_debug('%s raised %s(%s)',
info, err.__class__.__name__, err)
elif utils.DEBUG and logging.getLogger().level < logging.DEBUG:
# In "heavy debug" mode, log a warning with traceback.
# (This is the same condition as used in utils.logging_debug().)
logging.warning('%s raised %s(%s)',
info, err.__class__.__name__, err, exc_info=True)
else:
# Otherwise, log a warning without a traceback.
logging.warning('%s raised %s(%s)', info, err.__class__.__name__, err)
self.set_exception(err, tb)
return
else:
_logging_debug('%s yielded %r', info, value)
if isinstance(value, (apiproxy_stub_map.UserRPC,
datastore_rpc.MultiRpc)):
# TODO: Tail recursion if the RPC is already complete.
eventloop.queue_rpc(value, self._on_rpc_completion, value, gen)
return
if isinstance(value, Future):
# TODO: Tail recursion if the Future is already done.
if self._next:
raise RuntimeError('Future has already completed yet next is %r' %
self._next)
self._next = value
self._geninfo = utils.gen_info(gen)
_logging_debug('%s is now blocked waiting for %s', self, value)
value.add_callback(self._on_future_completion, value, gen)
return
if isinstance(value, (tuple, list)):
# Arrange for yield to return a list of results (not Futures).
info = 'multi-yield from %s' % utils.gen_info(gen)
mfut = MultiFuture(info)
try:
for subfuture in value:
mfut.add_dependent(subfuture)
mfut.complete()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
mfut.set_exception(err, tb)
mfut.add_callback(self._on_future_completion, mfut, gen)
return
if _is_generator(value):
# TODO: emulate PEP 380 here?
raise NotImplementedError('Cannot defer to another generator.')
raise RuntimeError('A tasklet should not yield plain values.')
def _on_rpc_completion(self, rpc, gen):
try:
result = rpc.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self._help_tasklet_along(gen, exc=err, tb=tb)
else:
self._help_tasklet_along(gen, result)
def _on_future_completion(self, future, gen):
if self._next is future:
self._next = None
self._geninfo = None
_logging_debug('%s is no longer blocked waiting for %s', self, future)
exc = future.get_exception()
if exc is not None:
self._help_tasklet_along(gen, exc=exc, tb=future.get_traceback())
else:
val = future.get_result() # This won't raise an exception.
self._help_tasklet_along(gen, val)
def sleep(dt):
"""Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec.
"""
fut = Future('sleep(%.3f)' % dt)
eventloop.queue_call(dt, fut.set_result, None)
return fut
class MultiFuture(Future):
"""A Future that depends on multiple other Futures.
This is used internally by 'v1, v2, ... = yield f1, f2, ...'; the
semantics (e.g. error handling) are constrained by that use case.
The protocol from the caller's POV is:
mf = MultiFuture()
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
.
. (More mf.add_dependent() and/or mf.putq() calls)
.
mf.complete() # No more dependents will be added.
.
. (Time passes)
.
results = mf.get_result()
Now, results is a list of results from all dependent Futures in
the order in which they were added.
It is legal to add the same dependent multiple times.
Callbacks can be added at any point.
From a dependent Future POV, there's nothing to be done: a callback
is automatically added to each dependent Future which will signal
its completion to the MultiFuture.
Error handling: if any dependent future raises an error, it is
propagated to mf. To force an early error, you can call
mf.set_exception() instead of mf.complete(). After this you can't
call mf.add_dependent() or mf.putq() any more.
"""
def __init__(self, info=None):
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._full = False
self._dependents = set()
self._results = []
super(MultiFuture, self).__init__(info=info)
def __repr__(self):
# TODO: This may be invoked before __init__() returns,
# from Future.__init__(). Beware.
line = super(MultiFuture, self).__repr__()
lines = [line]
for fut in self._results:
lines.append(fut.dump_stack().replace('\n', '\n '))
return '\n waiting for '.join(lines)
# TODO: Maybe rename this method, since completion of a Future/RPC
# already means something else. But to what?
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._finish()
# TODO: Maybe don't overload set_exception() with this?
def set_exception(self, exc, tb=None):
self._full = True
super(MultiFuture, self).set_exception(exc, tb)
def _finish(self):
if not self._full:
raise RuntimeError('MultiFuture cannot finish until completed.')
if self._dependents:
raise RuntimeError('MultiFuture cannot finish whilst waiting for '
'dependents %r' % self._dependents)
if self._done:
raise RuntimeError('MultiFuture done before finishing.')
try:
result = [r.get_result() for r in self._results]
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
else:
self.set_result(result)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if isinstance(fut, list):
mfut = MultiFuture()
map(mfut.add_dependent, fut)
mfut.complete()
fut = mfut
elif not isinstance(fut, Future):
raise TypeError('Expected Future received %r' % fut)
if self._full:
raise RuntimeError('MultiFuture cannot add a dependent once complete.')
self._results.append(fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
self._dependents.remove(fut)
if self._full and not self._dependents and not self._done:
self._finish()
class QueueFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However, instead of returning results as a list, it lets you
retrieve results as soon as they are ready, one at a time, using
getq(). The Future itself finishes with a result of None when the
last result is ready (regardless of whether it was retrieved).
The getq() method returns a Future which blocks until the next
result is ready, and then returns that result. Each getq() call
retrieves one unique result. Extra getq() calls after the last
result is already returned return EOFError as their Future's
exception. (I.e., q.getq() returns a Future as always, but yieding
that Future raises EOFError.)
NOTE: Values can also be pushed directly via .putq(value). However
there is no flow control -- if the producer is faster than the
consumer, the queue will grow unbounded.
"""
# TODO: Refactor to share code with MultiFuture.
def __init__(self, info=None):
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._waiting = collections.deque()
# Invariant: at least one of _completed and _waiting is empty.
# Also: _full and not _dependents <==> _done.
super(QueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self.set_result(None)
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
super(QueueFuture, self).set_exception(exc, tb)
if not self._dependents:
self._mark_finished()
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('QueueFuture add dependent once complete.')
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
exc = fut.get_exception()
tb = fut.get_traceback()
val = None
if exc is None:
val = fut.get_result()
if self._waiting:
waiter = self._waiting.popleft()
self._pass_result(waiter, exc, tb, val)
else:
self._completed.append((exc, tb, val))
if self._full and not self._dependents and not self._done:
self.set_result(None)
self._mark_finished()
def _mark_finished(self):
if not self.done():
raise RuntimeError('Future not done before marking as finished.')
while self._waiting:
waiter = self._waiting.popleft()
self._pass_eof(waiter)
def getq(self):
fut = Future()
if self._completed:
exc, tb, val = self._completed.popleft()
self._pass_result(fut, exc, tb, val)
elif self._full and not self._dependents:
self._pass_eof(fut)
else:
self._waiting.append(fut)
return fut
def _pass_eof(self, fut):
if not self._done:
raise RuntimeError('QueueFuture cannot pass EOF until done.')
exc = self.get_exception()
if exc is not None:
tb = self.get_traceback()
else:
exc = EOFError('Queue is empty')
tb = None
self._pass_result(fut, exc, tb, None)
def _pass_result(self, fut, exc, tb, val):
if exc is not None:
fut.set_exception(exc, tb)
else:
fut.set_result(val)
class SerialQueueFuture(Future):
"""Like QueueFuture but maintains the order of insertion.
This class is used by Query operations.
Invariants:
- At least one of _queue and _waiting is empty.
- The Futures in _waiting are always pending.
(The Futures in _queue may be pending or completed.)
In the discussion below, add_dependent() is treated the same way as
putq().
If putq() is ahead of getq(), the situation is like this:
putq()
v
_queue: [f1, f2, ...]; _waiting: []
^
getq()
Here, putq() appends a Future to the right of _queue, and getq()
removes one from the left.
If getq() is ahead of putq(), it's like this:
putq()
v
_queue: []; _waiting: [f1, f2, ...]
^
getq()
Here, putq() removes a Future from the left of _waiting, and getq()
appends one to the right.
When both are empty, putq() appends a Future to the right of _queue,
while getq() appends one to the right of _waiting.
The _full flag means that no more calls to putq() will be made; it
is set by calling either complete() or set_exception().
Calling complete() signals that no more putq() calls will be made.
If getq() is behind, subsequent getq() calls will eat up _queue
until it is empty, and after that will return a Future that passes
EOFError (note that getq() itself never raises EOFError). If getq()
is ahead when complete() is called, the Futures in _waiting are all
passed an EOFError exception (thereby eating up _waiting).
If, instead of complete(), set_exception() is called, the exception
and traceback set there will be used instead of EOFError.
"""
def __init__(self, info=None):
self._full = False
self._queue = collections.deque()
self._waiting = collections.deque()
super(SerialQueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('SerialQueueFuture cannot complete twice.')
self._full = True
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(EOFError('Queue is empty'))
if not self._queue:
self.set_result(None)
def set_exception(self, exc, tb=None):
self._full = True
super(SerialQueueFuture, self).set_exception(exc, tb)
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
if self._waiting:
waiter = self._waiting.popleft()
waiter.set_result(value)
return
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('SerialQueueFuture cannot add dependent '
'once complete.')
if self._waiting:
waiter = self._waiting.popleft()
fut.add_callback(_transfer_result, fut, waiter)
else:
self._queue.append(fut)
def getq(self):
if self._queue:
fut = self._queue.popleft()
# TODO: Isn't it better to call self.set_result(None) in complete()?
if not self._queue and self._full and not self._done:
self.set_result(None)
else:
fut = Future()
if self._full:
if not self._done:
raise RuntimeError('self._queue should be non-empty.')
err = self.get_exception()
if err is not None:
tb = self.get_traceback()
else:
err = EOFError('Queue is empty')
tb = None
fut.set_exception(err, tb)
else:
self._waiting.append(fut)
return fut
def _transfer_result(fut1, fut2):
"""Helper to transfer result or errors from one Future to another."""
exc = fut1.get_exception()
if exc is not None:
tb = fut1.get_traceback()
fut2.set_exception(exc, tb)
else:
val = fut1.get_result()
fut2.set_result(val)
class ReducingFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However the result, instead of being a list of results of dependent
Futures, is computed by calling a 'reducer' tasklet. The reducer tasklet
takes a list of values and returns a single value. It may be called
multiple times on sublists of values and should behave like
e.g. sum().
NOTE: The reducer input values may be reordered compared to the
order in which they were added to the queue.
"""
# TODO: Refactor to reuse some code with MultiFuture.
def __init__(self, reducer, info=None, batch_size=20):
self._reducer = reducer
self._batch_size = batch_size
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._queue = collections.deque()
super(ReducingFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('ReducingFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
self._queue.clear()
super(ReducingFuture, self).set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if self._full:
raise RuntimeError('ReducingFuture cannot add dependent once complete.')
self._internal_add_dependent(fut)
def _internal_add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future; received %r' % fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
if self._done:
return # Already done.
try:
val = fut.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
self._queue.append(val)
if len(self._queue) >= self._batch_size:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self._queue.append(nval)
if self._full and not self._dependents:
self._mark_finished()
def _mark_finished(self):
if not self._queue:
self.set_result(None)
elif len(self._queue) == 1:
self.set_result(self._queue.pop())
else:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self.set_result(nval)
# Alias for StopIteration used to mark return values.
# To use this, raise Return(<your return value>). The semantics
# are exactly the same as raise StopIteration(<your return value>)
# but using Return clarifies that you are intending this to be the
# return value of a tasklet.
# TODO: According to Monocle authors Steve and Greg Hazel, Twisted
# used an exception to signal a return value from a generator early
# on, and they found out it was error-prone. Should I worry?
Return = StopIteration
def get_return_value(err):
# XXX Docstring
if not err.args:
result = None
elif len(err.args) == 1:
result = err.args[0]
else:
result = err.args
return result
def tasklet(func):
# XXX Docstring
@utils.wrapping(func)
def tasklet_wrapper(*args, **kwds):
# XXX Docstring
# TODO: make most of this a public function so you can take a bare
# generator and turn it into a tasklet dynamically. (Monocle has
# this I believe.)
# __ndb_debug__ = utils.func_info(func)
fut = Future('tasklet %s' % utils.func_info(func))
fut._context = get_context()
try:
result = func(*args, **kwds)
except StopIteration, err:
# Just in case the function is not a generator but still uses
# the "raise Return(...)" idiom, we'll extract the return value.
result = get_return_value(err)
if _is_generator(result):
eventloop.queue_call(None, fut._help_tasklet_along, result)
else:
fut.set_result(result)
return fut
return tasklet_wrapper
def synctasklet(func):
"""Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method).
"""
@utils.wrapping(func)
def synctasklet_wrapper(*args, **kwds):
__ndb_debug__ = utils.func_info(func)
taskletfunc = tasklet(func)
return taskletfunc(*args, **kwds).get_result()
return synctasklet_wrapper
def toplevel(func):
"""A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions.
"""
@utils.wrapping(func)
def add_context_wrapper(*args, **kwds):
__ndb_debug__ = utils.func_info(func)
_state.clear_all_pending()
# Create and install a new context.
ctx = make_default_context()
try:
set_context(ctx)
return synctasklet(func)(*args, **kwds)
finally:
set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
return add_context_wrapper
_CONTEXT_KEY = '__CONTEXT__'
def get_context():
# XXX Docstring
ctx = None
if os.getenv(_CONTEXT_KEY):
ctx = _state.current_context
if ctx is None:
ctx = make_default_context()
set_context(ctx)
return ctx
def make_default_context():
# XXX Docstring
return make_context()
@utils.positional(0)
def make_context(conn=None, config=None):
# XXX Docstring
from . import context # Late import to deal with circular imports.
return context.Context(conn=conn, config=config)
def set_context(new_context):
# XXX Docstring
os.environ[_CONTEXT_KEY] = '1'
_state.current_context = new_context
# TODO: Rework the following into documentation.
# A tasklet/coroutine/generator can yield the following things:
# - Another tasklet/coroutine/generator; this is entirely equivalent to
# "for x in g: yield x"; this is handled entirely by the @tasklet wrapper.
# (Actually, not. @tasklet returns a function that when called returns
# a Future. You can use the pep380 module's @gwrap decorator to support
# yielding bare generators though.)
# - An RPC (or MultiRpc); the tasklet will be resumed when this completes.
# This does not use the RPC's callback mechanism.
# - A Future; the tasklet will be resumed when the Future is done.
# This uses the Future's callback mechanism.
# A Future can be used in several ways:
# - Yield it from a tasklet; see above.
# - Check (poll) its status via f.done.
# - Call its wait() method, perhaps indirectly via check_success()
# or get_result(). This invokes the event loop.
# - Call the Future.wait_any() or Future.wait_all() method.
# This is waits for any or all Futures and RPCs in the argument list.
# XXX HIRO XXX
# - A tasklet is a (generator) function decorated with @tasklet.
# - Calling a tasklet schedules the function for execution and returns a Future.
# - A function implementing a tasklet may:
# = yield a Future; this waits for the Future which returns f.get_result();
# = yield an RPC; this waits for the RPC and then returns rpc.get_result();
# = raise Return(result); this sets the outer Future's result;
# = raise StopIteration or return; this sets the outer Future's result;
# = raise another exception: this sets the outer Future's exception.
# - If a function implementing a tasklet is not a generator it will be
# immediately executed to completion and the tasklet wrapper will
# return a Future that is already done. (XXX Alternative behavior:
# it schedules the call to be run by the event loop.)
# - Code not running in a tasklet can call f.get_result() or f.wait() on
# a future. This is implemented by a simple loop like the following:
# while not self._done:
# eventloop.run1()
# - Here eventloop.run1() runs one "atomic" part of the event loop:
# = either it calls one immediately ready callback;
# = or it waits for the first RPC to complete;
# = or it sleeps until the first callback should be ready;
# = or it raises an exception indicating all queues are empty.
# - It is possible but suboptimal to call rpc.get_result() or
# rpc.wait() directly on an RPC object since this will not allow
# other callbacks to run as they become ready. Wrapping an RPC in a
# Future will take care of this issue.
# - The important insight is that when a generator function
# implementing a tasklet yields, raises or returns, there is always a
# wrapper that catches this event and either turns it into a
# callback sent to the event loop, or sets the result or exception
# for the tasklet's Future.
| Python |
"""Tests for polymodel.py.
See issue 35. http://goo.gl/iHkCm
"""
import pickle
import unittest
from .google_imports import namespace_manager
from .google_imports import datastore_types
from . import polymodel
from . import model
from . import test_utils
PolyModel = polymodel.PolyModel
class PolyModelTests(test_utils.NDBTest):
def setUp(self):
super(PolyModelTests, self).setUp()
the_module = polymodel
def testBasics(self):
# Test basic PolyModel functionality.
class Shoe(PolyModel):
color = model.StringProperty()
class Moccasin(Shoe):
leather = model.StringProperty()
class Sneaker(Shoe):
pump = model.BooleanProperty()
self.assertEqual(Shoe._class_name(), 'Shoe')
self.assertEqual(Shoe._class_key(), ['Shoe'])
self.assertEqual(Moccasin._class_name(), 'Moccasin')
self.assertEqual(Moccasin._class_key(), ['Shoe', 'Moccasin'])
self.assertEqual(Sneaker._class_name(), 'Sneaker')
self.assertEqual(Sneaker._class_key(), ['Shoe', 'Sneaker'])
s_key = model.Key('Shoe', 1)
self.assertEqual(Shoe().put(), s_key)
s = s_key.get()
self.assertEqual(s._get_kind(), 'Shoe')
self.assertEqual(s._class_key(), ['Shoe'])
self.assertEqual(s.class_, ['Shoe'])
m_key = model.Key('Shoe', 2)
self.assertEqual(Moccasin(color='brown', leather='cattlehide').put(),
m_key)
m = m_key.get()
self.assertEqual(m._get_kind(), 'Shoe')
self.assertEqual(m.class_, ['Shoe', 'Moccasin'])
snkr_key = model.Key('Shoe', 3)
self.assertEqual(Sneaker(color='red', pump=False).put(), snkr_key)
snkr = snkr_key.get()
self.assertEqual(snkr._get_kind(), 'Shoe')
self.assertEqual(snkr.class_, ['Shoe', 'Sneaker'])
self.assertEqual(Shoe.query().fetch(), [s, m, snkr])
self.assertEqual(Shoe.query(Sneaker.pump == False).fetch(), [snkr])
self.assertEqual(Moccasin.query().fetch(), [m])
self.assertEqual(Sneaker.query().fetch(), [snkr])
def testBlobKeyProperty(self):
class MyModel(PolyModel):
pass
class MyDerivedModel(MyModel):
image = model.BlobKeyProperty()
test_blobkey = datastore_types.BlobKey('testkey123')
m = MyDerivedModel()
m.image = test_blobkey
m.put()
m = m.key.get()
m.image = test_blobkey
m.put()
self.assertTrue(isinstance(m.image, datastore_types.BlobKey))
self.assertEqual(str(m.image), str(test_blobkey))
def testClassKeyProperty(self):
# Tests for the class_ property.
class Animal(PolyModel):
pass
class Dog(Animal):
pass
fido = Dog()
self.assertEqual(fido.class_, ['Animal', 'Dog'])
self.assertRaises(TypeError, setattr, fido, 'class_', ['Animal', 'Dog'])
def testPolyExpando(self):
# Test that PolyModel can be combined with Expando.
# (See also testExpandoPoly, and the Ghoul class in testInheritance.)
class Animal(PolyModel, model.Expando):
pass
class Mammal(Animal):
pass
cat = Mammal(name='Tom', naps=18, sound='purr')
cat1 = cat.put().get()
self.assertFalse(cat1 is cat)
self.assertEqual(cat1, cat)
self.assertEqual(cat1.name, 'Tom')
self.assertEqual(cat1.naps, 18)
self.assertEqual(cat1.sound, 'purr')
def testExpandoPoly(self):
# Like testPolyExpando, but switch the order of the base classes.
# It should work either way.
class Animal(model.Expando, PolyModel):
pass
class Mammal(Animal):
pass
cat = Mammal(name='Tom', naps=18, sound='purr')
cat1 = cat.put().get()
self.assertFalse(cat1 is cat)
self.assertEqual(cat1, cat)
self.assertEqual(cat1.name, 'Tom')
self.assertEqual(cat1.naps, 18)
self.assertEqual(cat1.sound, 'purr')
def testInheritance(self):
# Tests focused on the inheritance model, including diamond inheritance.
class NamedThing(model.Model):
name = model.StringProperty()
class Animal(PolyModel, NamedThing):
legs = model.IntegerProperty(default=4)
class Canine(Animal):
pass
class Dog(Canine):
breed = model.StringProperty(default='mutt')
class Wolf(Canine):
mythical = model.BooleanProperty(default=False)
class Feline(Animal):
sound = model.StringProperty()
class Cat(Feline):
naps = model.IntegerProperty()
class Panther(Feline):
pass
class Monster(Dog, Cat):
ancestry = model.StringProperty()
class Ghoul(Monster, model.Expando):
pass
k9 = Canine(name='Reynard')
self.assertEqual(k9.legs, 4)
self.assertEqual(k9._get_kind(), 'Animal')
self.assertEqual(k9._class_name(), 'Canine')
self.assertEqual(k9._class_key(), ['Animal', 'Canine'])
tom = Cat(name='Tom', naps=12, sound='purr')
self.assertTrue(isinstance(tom, Cat))
self.assertTrue(isinstance(tom, Feline))
self.assertTrue(isinstance(tom, Animal))
self.assertTrue(isinstance(tom, PolyModel))
self.assertEqual(tom.naps, 12)
self.assertEqual(tom.sound, 'purr')
self.assertEqual(tom.legs, 4)
self.assertEqual(tom._get_kind(), 'Animal')
self.assertEqual(tom._class_name(), 'Cat')
self.assertEqual(tom._class_key(), ['Animal', 'Feline', 'Cat'])
fido = Wolf(name='Warg')
self.assertEqual(fido._get_kind(), 'Animal')
self.assertEqual(fido._class_name(), 'Wolf')
self.assertEqual(fido._class_key(), ['Animal', 'Canine', 'Wolf'])
self.assertRaises(AttributeError, lambda: fido.breed)
scary = Ghoul(name='Westminster', book='The Graveyard Book')
self.assertEqual(scary.ancestry, None)
self.assertEqual(scary._get_kind(), 'Animal')
self.assertEqual(scary._class_name(), 'Ghoul')
self.assertEqual(scary._class_key(), ['Animal',
'Feline', 'Cat',
'Canine', 'Dog',
'Monster', 'Ghoul'])
k91 = k9.put().get()
self.assertTrue(isinstance(k9, Canine))
self.assertEqual(k9.name, 'Reynard')
self.assertEqual(k9._get_kind(), 'Animal')
self.assertEqual(k9._class_name(), 'Canine')
self.assertEqual(k9._class_key(), ['Animal', 'Canine'])
self.assertTrue(isinstance(k91, Canine))
self.assertEqual(k91.name, 'Reynard')
self.assertEqual(k91._get_kind(), 'Animal')
self.assertEqual(k91._class_name(), 'Canine')
self.assertEqual(k91._class_key(), ['Animal', 'Canine'])
self.assertEqual(k91, k9)
tom1 = tom.put().get()
self.assertEqual(tom1, tom)
fido1 = fido.put().get()
self.assertEqual(fido1, fido)
scary1 = scary.put().get()
self.assertEqual(scary1, scary)
self.assertEqual(scary1.book, 'The Graveyard Book')
def testPickling(self):
# Test that PolyModel instances are pickled and unpickled properly.
global Animal, Dog
class Animal(PolyModel):
name = model.StringProperty()
class Dog(Animal):
breed = model.StringProperty()
for proto in 0, 1, 2:
fido = Dog(name='Fido', breed='chihuahua')
s = pickle.dumps(fido, proto)
fido1 = pickle.loads(s)
self.assertEqual(fido1.name, 'Fido')
self.assertEqual(fido1.breed, 'chihuahua')
self.assertEqual(fido1.class_, ['Animal', 'Dog'])
self.assertEqual(fido, fido1)
def testClassNameOverride(self):
# Test that overriding _class_name() works.
class Animal(PolyModel):
pass
class Feline(Animal):
pass
class Cat(Feline):
@classmethod
def _class_name(cls):
return 'Pussycat'
tom = Cat()
self.assertEqual(tom.class_, ['Animal', 'Feline', 'Pussycat'])
tom.put()
self.assertEqual(Cat.query().fetch(), [tom])
def testEdgeCases(self):
# Test some edge cases.
self.assertEqual(PolyModel._get_kind(), 'PolyModel')
TOM_PB = """\
key <
app: "_"
path <
Element {
type: "Animal"
id: 0
}
>
>
entity_group <
>
property <
name: "class"
value <
stringValue: "Animal"
>
multiple: true
>
property <
name: "class"
value <
stringValue: "Feline"
>
multiple: true
>
property <
name: "class"
value <
stringValue: "Cat"
>
multiple: true
>
property <
name: "name"
value <
stringValue: "Tom"
>
multiple: false
>
property <
name: "purr"
value <
stringValue: "loud"
>
multiple: false
>
property <
name: "whiskers"
value <
booleanValue: true
>
multiple: false
>
"""
class CompatibilityTests(test_utils.NDBTest):
def testCompatibility(self):
class Animal(PolyModel):
name = model.StringProperty()
class Feline(Animal):
whiskers = model.BooleanProperty()
class Cat(Feline):
purr = model.StringProperty()
tom = Cat(name='Tom', purr='loud', whiskers=True)
tom._prepare_for_put()
self.assertEqual(str(tom._to_pb()), TOM_PB)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Prospective Search for NDB.
This reimplements all of the standard APIs with the following changes:
- A document_class argument must be an NDB Model class.
- A document must be an NDB Model instance.
- get_document() always returns an NDB Model instance.
The exceptions and public constants exported by the standard module
are re-exported here.
"""
# TODO: Ideally prospective search would support NDB natively, or
# support protobufs natively (in addition to ENTITY and MODEL).
# TODO: Should we try to support async calls as well? That can't be
# done without rewriting the standard prospective_search API module.
import base64
from .google_imports import datastore
from .google_imports import datastore_types
from .google_imports import prospective_search
from .google_imports import prospective_search_pb
from .google_imports import entity_pb
from . import model
from . import tasklets
# Re-export constants and exceptions from prospective_search.
DEFAULT_RESULT_BATCH_SIZE = prospective_search.DEFAULT_RESULT_BATCH_SIZE
DEFAULT_LEASE_DURATION_SEC = prospective_search.DEFAULT_LEASE_DURATION_SEC
DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS = \
prospective_search.DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS
DEFAULT_LIST_TOPICS_MAX_RESULTS = \
prospective_search.DEFAULT_LIST_TOPICS_MAX_RESULTS
Error = prospective_search.Error
DocumentTypeError = prospective_search.DocumentTypeError
QuerySyntaxError = prospective_search.QuerySyntaxError
SchemaError = prospective_search.SchemaError
SubscriptionDoesNotExist = prospective_search.SubscriptionDoesNotExist
TopicNotSpecified = prospective_search.TopicNotSpecified
SubscriptionState = prospective_search.SubscriptionState
subscription_state_name = prospective_search.subscription_state_name
__all__ = ['get_document',
'get_subscription',
'list_subscriptions',
'list_topics',
'match',
'unsubscribe',
'subscribe',
'subscription_state_name',
'DEFAULT_RESULT_BATCH_SIZE',
'DEFAULT_LEASE_DURATION_SEC',
'DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS',
'DEFAULT_LIST_TOPICS_MAX_RESULTS',
'DocumentTypeError',
'Error',
'QuerySyntaxError',
'SchemaError',
'SubscriptionDoesNotExist',
'SubscriptionState',
'TopicNotSpecified']
_doc_class = prospective_search_pb.MatchRequest # For testing get_document().
_MODEL_TYPE_TO_PYTHON_TYPE = {
model.StringProperty: str,
model.IntegerProperty: int,
model.BooleanProperty: bool,
model.FloatProperty: float,
model.TextProperty: str,
}
def _add_schema_entry(prop_class, name, schema):
"""Add single entry to SchemaEntries by invoking add_entry."""
python_type = _MODEL_TYPE_TO_PYTHON_TYPE.get(prop_class, None)
if not python_type:
return
if python_type not in schema:
schema[python_type] = [name]
else:
schema[python_type].append(name)
def _model_to_entity_schema(document_class):
"""Produce schema from NDB Model class."""
schema = {}
for name, prop in document_class._properties.iteritems():
_add_schema_entry(prop.__class__, name, schema)
return schema
def _get_document_topic(document_class, topic):
assert issubclass(document_class, model.Model)
if topic:
return topic
return document_class._get_kind()
def subscribe(document_class,
query,
sub_id,
schema=None,
topic=None,
lease_duration_sec=DEFAULT_LEASE_DURATION_SEC):
"""Subscribe a query."""
assert schema is None
topic = _get_document_topic(document_class, topic)
schema = _model_to_entity_schema(document_class)
return prospective_search.subscribe(
datastore.Entity,
query,
sub_id,
schema=schema,
topic=topic,
lease_duration_sec=lease_duration_sec)
def unsubscribe(document_class, sub_id, topic=None):
topic = _get_document_topic(document_class, topic)
prospective_search.unsubscribe(datastore.Entity, sub_id, topic=topic)
def get_subscription(document_class, sub_id, topic=None):
"""Get subscription information."""
topic = _get_document_topic(document_class, topic)
return prospective_search.get_subscription(datastore.Entity, sub_id,
topic=topic)
def list_subscriptions(document_class,
sub_id_start='',
topic=None,
max_results=DEFAULT_LIST_SUBSCRIPTIONS_MAX_RESULTS,
expires_before=None):
"""List subscriptions on a topic."""
topic = _get_document_topic(document_class, topic)
return prospective_search.list_subscriptions(
datastore.Entity,
sub_id_start=sub_id_start,
topic=topic,
max_results=max_results,
expires_before=expires_before)
list_topics = prospective_search.list_topics
def match(document,
topic=None,
result_key=None,
result_relative_url='/_ah/prospective_search',
result_task_queue='default',
result_batch_size=DEFAULT_RESULT_BATCH_SIZE,
result_return_document=True):
"""Match document with all subscribed queries on specified topic."""
# Convert document to datastore.Entity.
topic = _get_document_topic(document.__class__, topic)
pb = document._to_pb()
entity = datastore.Entity('temp-kind').FromPb(pb)
return prospective_search.match(
entity,
topic=topic,
result_key=result_key,
result_relative_url=result_relative_url,
result_task_queue=result_task_queue,
result_batch_size=result_batch_size,
result_return_document=result_return_document)
def get_document(request):
"""Decodes document from prospective_search result POST request.
Args:
request: received POST request
Returns:
document: original NDB Model document from match call.
Raises:
DocumentTypeError: if document class is not recognized.
"""
doc_class = request.get('python_document_class')
if not doc_class:
return None
entity = entity_pb.EntityProto()
entity.ParseFromString(base64.urlsafe_b64decode(
request.get('document').encode('utf-8')))
doc_class = int(doc_class)
ctx = tasklets.get_context()
adapter = ctx._conn.adapter
return adapter.pb_to_entity(entity)
| Python |
"""Context class."""
import logging
import sys
from .google_imports import datastore # For taskqueue coordination
from .google_imports import datastore_errors
from .google_imports import memcache
from .google_imports import namespace_manager
from .google_imports import urlfetch
from .google_imports import datastore_rpc
from .google_imports import entity_pb
from .google_imports import ProtocolBuffer
from . import key as key_module
from . import model
from . import tasklets
from . import eventloop
from . import utils
__all__ = ['Context', 'ContextOptions', 'AutoBatcher',
'EVENTUAL_CONSISTENCY',
]
_LOCK_TIME = 32 # Time to lock out memcache.add() after datastore updates.
_LOCKED = 0 # Special value to store in memcache indicating locked value.
# Constant for read_policy.
EVENTUAL_CONSISTENCY = datastore_rpc.Configuration.EVENTUAL_CONSISTENCY
class ContextOptions(datastore_rpc.TransactionOptions):
"""Configuration options that may be passed along with get/put/delete."""
@datastore_rpc.ConfigOption
def use_cache(value):
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'use_cache should be a bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def use_memcache(value):
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'use_memcache should be a bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def use_datastore(value):
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'use_datastore should be a bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def memcache_timeout(value):
if not isinstance(value, (int, long)):
raise datastore_errors.BadArgumentError(
'memcache_timeout should be an integer (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def max_memcache_items(value):
if not isinstance(value, (int, long)):
raise datastore_errors.BadArgumentError(
'max_memcache_items should be an integer (%r)' % (value,))
return value
# options and config can be used interchangeably.
_OPTION_TRANSLATIONS = {
'options': 'config',
}
def _make_ctx_options(ctx_options):
"""Helper to construct a ContextOptions object from keyword arguments.
Args:
ctx_options: a dict of keyword arguments.
Note that either 'options' or 'config' can be used to pass another
ContextOptions object, but not both. If another ContextOptions
object is given it provides default values.
Returns:
A ContextOptions object, or None if ctx_options is empty.
"""
if not ctx_options:
return None
for key in list(ctx_options):
translation = _OPTION_TRANSLATIONS.get(key)
if translation:
if translation in ctx_options:
raise ValueError('Cannot specify %s and %s at the same time' %
(key, translation))
ctx_options[translation] = ctx_options.pop(key)
return ContextOptions(**ctx_options)
class AutoBatcher(object):
def __init__(self, todo_tasklet, limit):
# todo_tasklet is a tasklet to be called with list of (future, arg) pairs
self._todo_tasklet = todo_tasklet
self._limit = limit # No more than this many per callback
self._queues = {} # Map options to lists of (future, arg) tuples
self._running = [] # Currently running tasklets
self._cache = {} # Cache of in-flight todo_tasklet futures
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._todo_tasklet.__name__)
def run_queue(self, options, todo):
utils.logging_debug('AutoBatcher(%s): %d items',
self._todo_tasklet.__name__, len(todo))
fut = self._todo_tasklet(todo, options)
self._running.append(fut)
# Add a callback when we're done.
fut.add_callback(self._finished_callback, fut)
def _on_idle(self):
if not self.action():
return None
return True
def add(self, arg, options=None):
fut = tasklets.Future('%s.add(%s, %s)' % (self, arg, options))
todo = self._queues.get(options)
if todo is None:
utils.logging_debug('AutoBatcher(%s): creating new queue for %r',
self._todo_tasklet.__name__, options)
if not self._queues:
eventloop.add_idle(self._on_idle)
todo = self._queues[options] = []
todo.append((fut, arg))
if len(todo) >= self._limit:
del self._queues[options]
self.run_queue(options, todo)
return fut
def add_once(self, arg, options=None):
cache_key = (arg, options)
fut = self._cache.get(cache_key)
if fut is None:
fut = self.add(arg, options)
self._cache[cache_key] = fut
fut.add_immediate_callback(self._cache.__delitem__, cache_key)
return fut
def action(self):
queues = self._queues
if not queues:
return False
options, todo = queues.popitem() # TODO: Should this use FIFO ordering?
self.run_queue(options, todo)
return True
def _finished_callback(self, fut):
self._running.remove(fut)
fut.check_success()
@tasklets.tasklet
def flush(self):
while self._running or self.action():
if self._running:
yield self._running # A list of Futures
class Context(object):
def __init__(self, conn=None, auto_batcher_class=AutoBatcher, config=None):
# NOTE: If conn is not None, config is only used to get the
# auto-batcher limits.
if conn is None:
conn = model.make_connection(config)
self._conn = conn
self._auto_batcher_class = auto_batcher_class
# Get the get/put/delete limits (defaults 1000, 500, 500).
# Note that the explicit config passed in overrides the config
# attached to the connection, if it was passed in.
max_get = (datastore_rpc.Configuration.max_get_keys(config, conn.config) or
datastore_rpc.Connection.MAX_GET_KEYS)
max_put = (datastore_rpc.Configuration.max_put_entities(config,
conn.config) or
datastore_rpc.Connection.MAX_PUT_ENTITIES)
max_delete = (datastore_rpc.Configuration.max_delete_keys(config,
conn.config) or
datastore_rpc.Connection.MAX_DELETE_KEYS)
# Create the get/put/delete auto-batchers.
self._get_batcher = auto_batcher_class(self._get_tasklet, max_get)
self._put_batcher = auto_batcher_class(self._put_tasklet, max_put)
self._delete_batcher = auto_batcher_class(self._delete_tasklet, max_delete)
# We only have a single limit for memcache (default 1000).
max_memcache = (ContextOptions.max_memcache_items(config, conn.config) or
datastore_rpc.Connection.MAX_GET_KEYS)
# Create the memcache auto-batchers.
self._memcache_get_batcher = auto_batcher_class(self._memcache_get_tasklet,
max_memcache)
self._memcache_set_batcher = auto_batcher_class(self._memcache_set_tasklet,
max_memcache)
self._memcache_del_batcher = auto_batcher_class(self._memcache_del_tasklet,
max_memcache)
self._memcache_off_batcher = auto_batcher_class(self._memcache_off_tasklet,
max_memcache)
# Create a list of batchers for flush().
self._batchers = [self._get_batcher,
self._put_batcher,
self._delete_batcher,
self._memcache_get_batcher,
self._memcache_set_batcher,
self._memcache_del_batcher,
self._memcache_off_batcher,
]
self._cache = {}
self._memcache = memcache.Client()
# NOTE: The default memcache prefix is altered if an incompatible change is
# required. Remember to check release notes when using a custom prefix.
_memcache_prefix = 'NDB9:' # TODO: Might make this configurable.
@tasklets.tasklet
def flush(self):
# Rinse and repeat until all batchers are completely out of work.
more = True
while more:
yield [batcher.flush() for batcher in self._batchers]
more = False
for batcher in self._batchers:
if batcher._running or batcher._queues:
more = True
break
@tasklets.tasklet
def _get_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
# Make the datastore RPC call.
datastore_keys = []
for unused_fut, key in todo:
datastore_keys.append(key)
# Now wait for the datastore RPC(s) and pass the results to the futures.
entities = yield self._conn.async_get(options, datastore_keys)
for ent, (fut, unused_key) in zip(entities, todo):
fut.set_result(ent)
@tasklets.tasklet
def _put_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
# TODO: What if the same entity is being put twice?
# TODO: What if two entities with the same key are being put?
datastore_entities = []
for unused_fut, ent in todo:
datastore_entities.append(ent)
# Wait for datastore RPC(s).
keys = yield self._conn.async_put(options, datastore_entities)
for key, (fut, ent) in zip(keys, todo):
if key != ent._key:
if ent._has_complete_key():
raise datastore_errors.BadKeyError(
'Entity key differs from the one returned by the datastore. '
'Expected %r, got %r' % (key, ent._key))
ent._key = key
fut.set_result(key)
@tasklets.tasklet
def _delete_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
futures = []
datastore_keys = []
for fut, key in todo:
futures.append(fut)
datastore_keys.append(key)
# Wait for datastore RPC(s).
yield self._conn.async_delete(options, datastore_keys)
# Send a dummy result to all original Futures.
for fut in futures:
fut.set_result(None)
# TODO: Unify the policy docstrings (they're getting too verbose).
# All the policy functions may also:
# - be a constant of the right type (instead of a function);
# - return None (instead of a value of the right type);
# - be None (instead of a function or constant).
# Model classes may define class variables or class methods
# _use_{cache,memcache,datastore} or _memcache_timeout to set the
# default policy of that type for that class.
@staticmethod
def default_cache_policy(key):
"""Default cache policy.
This defers to _use_cache on the Model class.
Args:
key: Key instance.
Returns:
A bool or None.
"""
flag = None
if key is not None:
modelclass = model.Model._kind_map.get(key.kind())
if modelclass is not None:
policy = getattr(modelclass, '_use_cache', None)
if policy is not None:
if isinstance(policy, bool):
flag = policy
else:
flag = policy(key)
return flag
_cache_policy = default_cache_policy
def get_cache_policy(self):
"""Return the current context cache policy function.
Returns:
A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
"""
return self._cache_policy
def set_cache_policy(self, func):
"""Set the context cache policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
"""
if func is None:
func = self.default_cache_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._cache_policy = func
def _use_cache(self, key, options=None):
"""Return whether to use the context cache for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the key should be cached, False otherwise.
"""
flag = ContextOptions.use_cache(options)
if flag is None:
flag = self._cache_policy(key)
if flag is None:
flag = ContextOptions.use_cache(self._conn.config)
if flag is None:
flag = True
return flag
@staticmethod
def default_memcache_policy(key):
"""Default memcache policy.
This defers to _use_memcache on the Model class.
Args:
key: Key instance.
Returns:
A bool or None.
"""
flag = None
if key is not None:
modelclass = model.Model._kind_map.get(key.kind())
if modelclass is not None:
policy = getattr(modelclass, '_use_memcache', None)
if policy is not None:
if isinstance(policy, bool):
flag = policy
else:
flag = policy(key)
return flag
_memcache_policy = default_memcache_policy
def get_memcache_policy(self):
"""Return the current memcache policy function.
Returns:
A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
"""
return self._memcache_policy
def set_memcache_policy(self, func):
"""Set the memcache policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
"""
if func is None:
func = self.default_memcache_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._memcache_policy = func
def _use_memcache(self, key, options=None):
"""Return whether to use memcache for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the key should be cached in memcache, False otherwise.
"""
flag = ContextOptions.use_memcache(options)
if flag is None:
flag = self._memcache_policy(key)
if flag is None:
flag = ContextOptions.use_memcache(self._conn.config)
if flag is None:
flag = True
return flag
@staticmethod
def default_datastore_policy(key):
"""Default datastore policy.
This defers to _use_datastore on the Model class.
Args:
key: Key instance.
Returns:
A bool or None.
"""
flag = None
if key is not None:
modelclass = model.Model._kind_map.get(key.kind())
if modelclass is not None:
policy = getattr(modelclass, '_use_datastore', None)
if policy is not None:
if isinstance(policy, bool):
flag = policy
else:
flag = policy(key)
return flag
_datastore_policy = default_datastore_policy
def get_datastore_policy(self):
"""Return the current context datastore policy function.
Returns:
A function that accepts a Key instance as argument and returns
a bool indicating if it should use the datastore. May be None.
"""
return self._datastore_policy
def set_datastore_policy(self, func):
"""Set the context datastore policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should use the datastore. May be None.
"""
if func is None:
func = self.default_datastore_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._datastore_policy = func
def _use_datastore(self, key, options=None):
"""Return whether to use the datastore for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the datastore should be used, False otherwise.
"""
flag = ContextOptions.use_datastore(options)
if flag is None:
flag = self._datastore_policy(key)
if flag is None:
flag = ContextOptions.use_datastore(self._conn.config)
if flag is None:
flag = True
return flag
@staticmethod
def default_memcache_timeout_policy(key):
"""Default memcache timeout policy.
This defers to _memcache_timeout on the Model class.
Args:
key: Key instance.
Returns:
Memcache timeout to use (integer), or None.
"""
timeout = None
if key is not None and isinstance(key, model.Key):
modelclass = model.Model._kind_map.get(key.kind())
if modelclass is not None:
policy = getattr(modelclass, '_memcache_timeout', None)
if policy is not None:
if isinstance(policy, (int, long)):
timeout = policy
else:
timeout = policy(key)
return timeout
_memcache_timeout_policy = default_memcache_timeout_policy
def set_memcache_timeout_policy(self, func):
"""Set the policy function for memcache timeout (expiration).
Args:
func: A function that accepts a key instance as argument and returns
an integer indicating the desired memcache timeout. May be None.
If the function returns 0 it implies the default timeout.
"""
if func is None:
func = self.default_memcache_timeout_policy
elif isinstance(func, (int, long)):
func = lambda unused_key, flag=func: flag
self._memcache_timeout_policy = func
def get_memcache_timeout_policy(self):
"""Return the current policy function for memcache timeout (expiration)."""
return self._memcache_timeout_policy
def _get_memcache_timeout(self, key, options=None):
"""Return the memcache timeout (expiration) for this key."""
timeout = ContextOptions.memcache_timeout(options)
if timeout is None:
timeout = self._memcache_timeout_policy(key)
if timeout is None:
timeout = ContextOptions.memcache_timeout(self._conn.config)
if timeout is None:
timeout = 0
return timeout
# TODO: What about conflicting requests to different autobatchers,
# e.g. tasklet A calls get() on a given key while tasklet B calls
# delete()? The outcome is nondeterministic, depending on which
# autobatcher gets run first. Maybe we should just flag such
# conflicts as errors, with an overridable policy to resolve them
# differently?
@tasklets.tasklet
def get(self, key, **ctx_options):
"""Return a Model instance given the entity key.
It will use the context cache if the cache policy for the given
key is enabled.
Args:
key: Key instance.
**ctx_options: Context options.
Returns:
A Model instance it the key exists in the datastore; None otherwise.
"""
options = _make_ctx_options(ctx_options)
use_cache = self._use_cache(key, options)
if use_cache:
if key in self._cache:
entity = self._cache[key] # May be None, meaning "doesn't exist".
if entity is None or entity._key == key:
# If entity's key didn't change later, it is ok.
# See issue 13. http://goo.gl/jxjOP
raise tasklets.Return(entity)
use_datastore = self._use_datastore(key, options)
if (use_datastore and
isinstance(self._conn, datastore_rpc.TransactionalConnection)):
use_memcache = False
else:
use_memcache = self._use_memcache(key, options)
ns = key.namespace()
if use_memcache:
mkey = self._memcache_prefix + key.urlsafe()
mvalue = yield self.memcache_get(mkey, for_cas=use_datastore,
namespace=ns, use_cache=True)
if mvalue not in (_LOCKED, None):
cls = model.Model._kind_map.get(key.kind())
if cls is None:
raise TypeError('Cannot find model class for kind %s' % key.kind())
pb = entity_pb.EntityProto()
try:
pb.MergePartialFromString(mvalue)
except ProtocolBuffer.ProtocolBufferDecodeError:
logging.warning('Corrupt memcache entry found '
'with key %s and namespace %s' % (mkey, ns))
mvalue = None
else:
entity = cls._from_pb(pb)
# Store the key on the entity since it wasn't written to memcache.
entity._key = key
if use_cache:
# Update in-memory cache.
self._cache[key] = entity
raise tasklets.Return(entity)
if mvalue is None and use_datastore:
yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME, namespace=ns,
use_cache=True)
yield self.memcache_gets(mkey, namespace=ns, use_cache=True)
if not use_datastore:
# NOTE: Do not cache this miss. In some scenarios this would
# prevent an app from working properly.
raise tasklets.Return(None)
if use_cache:
entity = yield self._get_batcher.add_once(key, options)
else:
entity = yield self._get_batcher.add(key, options)
if entity is not None:
if use_memcache and mvalue != _LOCKED:
# Don't serialize the key since it's already the memcache key.
pbs = entity._to_pb(set_key=False).SerializePartialToString()
timeout = self._get_memcache_timeout(key, options)
# Don't yield -- this can run in the background.
# TODO: See issue 105 though.
self.memcache_cas(mkey, pbs, time=timeout, namespace=ns)
if use_cache:
# Cache hit or miss. NOTE: In this case it is okay to cache a
# miss; the datastore is the ultimate authority.
self._cache[key] = entity
raise tasklets.Return(entity)
@tasklets.tasklet
def put(self, entity, **ctx_options):
options = _make_ctx_options(ctx_options)
# TODO: What if the same entity is being put twice?
# TODO: What if two entities with the same key are being put?
key = entity._key
if key is None:
# Pass a dummy Key to _use_datastore().
key = model.Key(entity.__class__, None)
use_datastore = self._use_datastore(key, options)
use_memcache = None
if entity._has_complete_key():
use_memcache = self._use_memcache(key, options)
if use_memcache:
# Wait for memcache operations before starting datastore RPCs.
mkey = self._memcache_prefix + key.urlsafe()
ns = key.namespace()
if use_datastore:
yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME,
namespace=ns, use_cache=True)
else:
pbs = entity._to_pb(set_key=False).SerializePartialToString()
timeout = self._get_memcache_timeout(key, options)
yield self.memcache_set(mkey, pbs, time=timeout, namespace=ns)
if use_datastore:
key = yield self._put_batcher.add(entity, options)
if not isinstance(self._conn, datastore_rpc.TransactionalConnection):
if use_memcache is None:
use_memcache = self._use_memcache(key, options)
if use_memcache:
mkey = self._memcache_prefix + key.urlsafe()
ns = key.namespace()
# TODO: Maybe don't yield here, like it get()?
yield self.memcache_delete(mkey, namespace=ns)
if key is not None:
if entity._key != key:
logging.info('replacing key %s with %s', entity._key, key)
entity._key = key
# TODO: For updated entities, could we update the cache first?
if self._use_cache(key, options):
# TODO: What if by now the entity is already in the cache?
self._cache[key] = entity
raise tasklets.Return(key)
@tasklets.tasklet
def delete(self, key, **ctx_options):
options = _make_ctx_options(ctx_options)
if self._use_memcache(key, options):
mkey = self._memcache_prefix + key.urlsafe()
ns = key.namespace()
# TODO: If not use_datastore, delete instead of lock?
yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME, namespace=ns,
use_cache=True)
if self._use_datastore(key, options):
yield self._delete_batcher.add(key, options)
# TODO: Delete from memcache here?
if self._use_cache(key, options):
self._cache[key] = None
@tasklets.tasklet
def allocate_ids(self, key, size=None, max=None, **ctx_options):
options = _make_ctx_options(ctx_options)
lo_hi = yield self._conn.async_allocate_ids(options, key, size, max)
raise tasklets.Return(lo_hi)
@tasklets.tasklet
def get_indexes(self, **ctx_options):
options = _make_ctx_options(ctx_options)
index_list = yield self._conn.async_get_indexes(options)
raise tasklets.Return(index_list)
@utils.positional(3)
def map_query(self, query, callback, pass_batch_into_callback=None,
options=None, merge_future=None):
mfut = merge_future
if mfut is None:
mfut = tasklets.MultiFuture('map_query')
@tasklets.tasklet
def helper():
try:
inq = tasklets.SerialQueueFuture()
query.run_to_queue(inq, self._conn, options)
is_ancestor_query = query.ancestor is not None
while True:
try:
batch, i, ent = yield inq.getq()
except EOFError:
break
ent = self._update_cache_from_query_result(ent, options)
if ent is None:
continue
if callback is None:
val = ent
else:
# TODO: If the callback raises, log and ignore.
if pass_batch_into_callback:
val = callback(batch, i, ent)
else:
val = callback(ent)
mfut.putq(val)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
mfut.set_exception(err, tb)
raise
else:
mfut.complete()
helper()
return mfut
def _update_cache_from_query_result(self, ent, options):
if isinstance(ent, model.Key):
return ent # It was a keys-only query and ent is really a Key.
key = ent._key
if not self._use_cache(key, options):
return ent # This key should not be cached.
# Check the cache. If there is a valid cached entry, substitute
# that for the result, even if the cache has an explicit None.
if key in self._cache:
cached_ent = self._cache[key]
if (cached_ent is None or
cached_ent.key == key and cached_ent.__class__ is ent.__class__):
return cached_ent
# Update the cache.
self._cache[key] = ent
return ent
@utils.positional(2)
def iter_query(self, query, callback=None, pass_batch_into_callback=None,
options=None):
return self.map_query(query, callback=callback, options=options,
pass_batch_into_callback=pass_batch_into_callback,
merge_future=tasklets.SerialQueueFuture())
@tasklets.tasklet
def transaction(self, callback, **ctx_options):
# Will invoke callback() one or more times with the default
# context set to a new, transactional Context. Returns a Future.
# Callback may be a tasklet.
options = _make_ctx_options(ctx_options)
app = ContextOptions.app(options) or key_module._DefaultAppId()
# Note: zero retries means try it once.
retries = ContextOptions.retries(options)
if retries is None:
retries = 3
yield self.flush()
for _ in xrange(1 + max(0, retries)):
transaction = yield self._conn.async_begin_transaction(options, app)
tconn = datastore_rpc.TransactionalConnection(
adapter=self._conn.adapter,
config=self._conn.config,
transaction=transaction)
old_ds_conn = datastore._GetConnection()
tctx = self.__class__(conn=tconn,
auto_batcher_class=self._auto_batcher_class)
try:
# Copy memcache policies. Note that get() will never use
# memcache in a transaction, but put and delete should do their
# memcache thing (which is to mark the key as deleted for
# _LOCK_TIME seconds). Also note that the in-process cache and
# datastore policies keep their default (on) state.
tctx.set_memcache_policy(self.get_memcache_policy())
tctx.set_memcache_timeout_policy(self.get_memcache_timeout_policy())
tasklets.set_context(tctx)
datastore._SetConnection(tconn) # For taskqueue coordination
try:
try:
result = callback()
if isinstance(result, tasklets.Future):
result = yield result
finally:
yield tctx.flush()
except GeneratorExit:
raise
except Exception:
t, e, tb = sys.exc_info()
yield tconn.async_rollback(options) # TODO: Don't block???
if issubclass(t, datastore_errors.Rollback):
# TODO: Raise value using tasklets.get_return_value(t)?
return
else:
raise t, e, tb
else:
ok = yield tconn.async_commit(options)
if ok:
# TODO: This is questionable when self is transactional.
self._cache.update(tctx._cache)
yield self._clear_memcache(tctx._cache)
raise tasklets.Return(result)
finally:
datastore._SetConnection(old_ds_conn)
# Out of retries
raise datastore_errors.TransactionFailedError(
'The transaction could not be committed. Please try again.')
def in_transaction(self):
"""Return whether a transaction is currently active."""
return isinstance(self._conn, datastore_rpc.TransactionalConnection)
def clear_cache(self):
"""Clears the in-memory cache.
NOTE: This does not affect memcache.
"""
self._cache.clear()
@tasklets.tasklet
def _clear_memcache(self, keys):
keys = set(key for key in keys if self._use_memcache(key))
futures = []
for key in keys:
mkey = self._memcache_prefix + key.urlsafe()
ns = key.namespace()
fut = self.memcache_delete(mkey, namespace=ns)
futures.append(fut)
yield futures
@tasklets.tasklet
def _memcache_get_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
for_cas, namespace = options
keys = set()
for unused_fut, key in todo:
keys.add(key)
results = yield self._memcache.get_multi_async(keys, for_cas=for_cas,
namespace=namespace)
for fut, key in todo:
fut.set_result(results.get(key))
@tasklets.tasklet
def _memcache_set_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
opname, time, namespace = options
methodname = opname + '_multi_async'
method = getattr(self._memcache, methodname)
mapping = {}
for unused_fut, (key, value) in todo:
mapping[key] = value
results = yield method(mapping, time=time, namespace=namespace)
for fut, (key, unused_value) in todo:
if results is None:
status = memcache.MemcacheSetResponse.ERROR
else:
status = results.get(key)
fut.set_result(status == memcache.MemcacheSetResponse.STORED)
@tasklets.tasklet
def _memcache_del_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
seconds, namespace = options
keys = set()
for unused_fut, key in todo:
keys.add(key)
statuses = yield self._memcache.delete_multi_async(keys, seconds=seconds,
namespace=namespace)
status_key_mapping = {}
if statuses: # On network error, statuses is None.
for key, status in zip(keys, statuses):
status_key_mapping[key] = status
for fut, key in todo:
status = status_key_mapping.get(key, memcache.DELETE_NETWORK_FAILURE)
fut.set_result(status)
@tasklets.tasklet
def _memcache_off_tasklet(self, todo, options):
if not todo:
raise RuntimeError('Nothing to do.')
initial_value, namespace = options
mapping = {} # {key: delta}
for unused_fut, (key, delta) in todo:
mapping[key] = delta
results = yield self._memcache.offset_multi_async(mapping,
initial_value=initial_value, namespace=namespace)
for fut, (key, unused_delta) in todo:
result = results.get(key)
if isinstance(result, basestring):
# See http://code.google.com/p/googleappengine/issues/detail?id=2012
# We can fix this without waiting for App Engine to fix it.
result = int(result)
fut.set_result(result)
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False):
"""An auto-batching wrapper for memcache.get() or .get_multi().
Args:
key: Key to set. This must be a string; no prefix is applied.
for_cas: If True, request and store CAS ids on the Context.
namespace: Optional namespace.
Returns:
A Future (!) whose return value is the value retrieved from
memcache, or None.
"""
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(for_cas, bool):
raise TypeError('for_cas must be a bool; received %r' % for_cas)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = (for_cas, namespace)
batcher = self._memcache_get_batcher
if use_cache:
return batcher.add_once(key, options)
else:
return batcher.add(key, options)
# XXX: Docstrings below.
def memcache_gets(self, key, namespace=None, use_cache=False):
return self.memcache_get(key, for_cas=True, namespace=namespace,
use_cache=use_cache)
def memcache_set(self, key, value, time=0, namespace=None, use_cache=False):
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
options = ('set', time, namespace)
batcher = self._memcache_set_batcher
if use_cache:
return batcher.add_once((key, value), options)
else:
return batcher.add((key, value), options)
def memcache_add(self, key, value, time=0, namespace=None):
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self._memcache_set_batcher.add((key, value),
('add', time, namespace))
def memcache_replace(self, key, value, time=0, namespace=None):
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self._memcache_set_batcher.add((key, value),
('replace', time, namespace))
def memcache_cas(self, key, value, time=0, namespace=None):
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(time, (int, long)):
raise TypeError('time must be a number; received %r' % time)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self._memcache_set_batcher.add((key, value),
('cas', time, namespace))
def memcache_delete(self, key, seconds=0, namespace=None):
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(seconds, (int, long)):
raise TypeError('seconds must be a number; received %r' % seconds)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self._memcache_del_batcher.add(key, (seconds, namespace))
def memcache_incr(self, key, delta=1, initial_value=None, namespace=None):
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(delta, (int, long)):
raise TypeError('delta must be a number; received %r' % delta)
if initial_value is not None and not isinstance(initial_value, (int, long)):
raise TypeError('initial_value must be a number or None; received %r' %
initial_value)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self._memcache_off_batcher.add((key, delta),
(initial_value, namespace))
def memcache_decr(self, key, delta=1, initial_value=None, namespace=None):
if not isinstance(key, str):
raise TypeError('key must be a string; received %r' % key)
if not isinstance(delta, (int, long)):
raise TypeError('delta must be a number; received %r' % delta)
if initial_value is not None and not isinstance(initial_value, (int, long)):
raise TypeError('initial_value must be a number or None; received %r' %
initial_value)
if namespace is None:
namespace = namespace_manager.get_namespace()
return self._memcache_off_batcher.add((key, -delta),
(initial_value, namespace))
@tasklets.tasklet
def urlfetch(self, url, payload=None, method='GET', headers={},
allow_truncated=False, follow_redirects=True,
validate_certificate=None, deadline=None, callback=None):
rpc = urlfetch.create_rpc(deadline=deadline, callback=callback)
urlfetch.make_fetch_call(rpc, url,
payload=payload,
method=method,
headers=headers,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects,
validate_certificate=validate_certificate)
result = yield rpc
raise tasklets.Return(result)
| Python |
"""Tests for stats.py."""
import datetime
import os
import unittest
from .google_imports import datastore
from . import stats
from . import test_utils
class StatsTests(test_utils.NDBTest):
def setUp(self):
"""Setup test infrastructure."""
super(StatsTests, self).setUp()
self.PopulateStatEntities()
the_module = stats
def PopulateStatEntities(self):
"""Insert stat entities into the datastore."""
# GlobalStat
self.CreateStatEntity(stats.GlobalStat.STORED_KIND_NAME,
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# NamespaceStat
self.CreateStatEntity(stats.NamespaceStat.STORED_KIND_NAME,
subject_namespace='name-space',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# KindStat
self.CreateStatEntity(stats.KindStat.STORED_KIND_NAME, 'foo',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
self.CreateStatEntity(stats.KindStat.STORED_KIND_NAME, 'foo2',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# KindRootEntityStat
self.CreateStatEntity(stats.KindRootEntityStat.STORED_KIND_NAME, 'foo3',
has_entity_bytes=True)
self.CreateStatEntity(stats.KindRootEntityStat.STORED_KIND_NAME, 'foo4',
has_entity_bytes=True)
# KindNonRootEntityStat
self.CreateStatEntity(stats.KindNonRootEntityStat.STORED_KIND_NAME, 'foo5',
has_entity_bytes=True)
self.CreateStatEntity(stats.KindNonRootEntityStat.STORED_KIND_NAME, 'foo6',
has_entity_bytes=True)
# PropertyTypeStat
self.CreateStatEntity(stats.PropertyTypeStat.STORED_KIND_NAME,
property_type='pt1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.PropertyTypeStat.STORED_KIND_NAME,
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyTypeStat
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo1',
property_type='pt1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo1',
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo2',
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyNameStat
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo11',
property_name='pn1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo11',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo21',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyNamePropertyTypeStat
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo12',
property_type='pt1',
property_name='pn1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo12',
property_type='pt2',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo22',
property_type='pt2',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindCompositeIndexStat
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo12',
composite_index_id=1)
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo12',
composite_index_id=2)
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo22',
composite_index_id=3)
def CreateStatEntity(self,
kind,
kind_name=None,
property_type=None,
property_name=None,
subject_namespace=None,
composite_index_id=None,
has_entity_bytes=None,
has_builtin_index_stats=None,
has_composite_index_stats=None):
"""Create a single Statistic datastore entity.
Args:
kind: The name of the kind to store.
kind_name: The value of the 'kind_name' property to set on the entity.
property_type: The value of the 'property_type' property to set on the
entity.
property_name: The value of the 'property_name' property to set on the
entity.
subject_namespace: The namespace for NamespaceStat entities.
composite_index_id: The index id of composite index.
has_entity_bytes: The stat has the entity_bytes property.
has_builtin_index_stats: The stat entity has builtin_index_bytes and
builtin_index_count.
has_composite_index_stats: The stat entity has composite_index_bytes and
composite_index_count.
"""
stat = datastore.Entity(kind)
stat['bytes'] = 4
stat['count'] = 2
stat['timestamp'] = datetime.datetime.utcfromtimestamp(40)
if has_entity_bytes:
stat['entity_bytes'] = 2
if has_builtin_index_stats:
stat['builtin_index_count'] = 3
stat['builtin_index_bytes'] = 1
if has_composite_index_stats:
stat['composite_index_count'] = 2
stat['composite_index_bytes'] = 1
if kind_name is not None:
stat['kind_name'] = kind_name
if property_type is not None:
stat['property_type'] = property_type
if property_name is not None:
stat['property_name'] = property_name
if subject_namespace is not None:
stat['subject_namespace'] = subject_namespace
if composite_index_id is not None:
stat['index_id'] = composite_index_id
datastore.Put(stat)
def testGlobalStat(self):
"""Test fetching the global stat singleton."""
res = stats.GlobalStat.query().fetch()
self.assertEquals(1, len(res))
self.assertEquals(4, res[0].bytes)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testNamespaceStat(self):
"""Test fetching the global stat singleton."""
res = stats.NamespaceStat.query().fetch()
self.assertEquals(1, len(res))
self.assertEquals(4, res[0].bytes)
self.assertEquals('name-space', res[0].subject_namespace)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testKindStat(self):
"""Test fetching the Kind stats."""
res = stats.KindStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo', res[0].kind_name)
self.assertEquals('foo2', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testKindRootEntityStat(self):
"""Test fetching the Kind root entity stats."""
res = stats.KindRootEntityStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo3', res[0].kind_name)
self.assertEquals('foo4', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
def testKindNonRootEntityStat(self):
"""Test fetching the Kind non-root entity stats."""
res = stats.KindNonRootEntityStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo5', res[0].kind_name)
self.assertEquals('foo6', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
def testPropertyTypeStat(self):
"""Test fetching the property type stats."""
res = stats.PropertyTypeStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
def testKindPropertyTypeStat(self):
"""Test fetching the (kind, property type) stats."""
res = stats.KindPropertyTypeStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo1', res[0].kind_name)
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('foo1', res[1].kind_name)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals('foo2', res[2].kind_name)
self.assertEquals('pt2', res[2].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyTypeStat.query(
stats.KindPropertyTypeStat.kind_name == 'foo2')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo2', res[0].kind_name)
def testKindPropertyNameStat(self):
"""Test fetching the (kind, property name) type stats."""
res = stats.KindPropertyNameStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo11', res[0].kind_name)
self.assertEquals('pn1', res[0].property_name)
self.assertEquals('foo11', res[1].kind_name)
self.assertEquals('pn2', res[1].property_name)
self.assertEquals('foo21', res[2].kind_name)
self.assertEquals('pn2', res[2].property_name)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyNameStat.query(
stats.KindPropertyNameStat.kind_name == 'foo21')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo21', res[0].kind_name)
def testKindPropertyNamePropertyTypeStat(self):
"""Test fetching the (kind, property name, property type) stats."""
res = stats.KindPropertyNamePropertyTypeStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo12', res[0].kind_name)
self.assertEquals('pn1', res[0].property_name)
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('foo12', res[1].kind_name)
self.assertEquals('pn2', res[1].property_name)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals('foo22', res[2].kind_name)
self.assertEquals('pn2', res[2].property_name)
self.assertEquals('pt2', res[2].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyNamePropertyTypeStat.query(
stats.KindPropertyNamePropertyTypeStat.kind_name == 'foo22')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo22', res[0].kind_name)
def testKindCompositeIndex(self):
"""Test fetching the (kind, composite index id) stats."""
res = stats.KindCompositeIndexStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo12', res[0].kind_name)
self.assertEquals(1, res[0].index_id)
self.assertEquals('foo12', res[1].kind_name)
self.assertEquals(2, res[1].index_id)
self.assertEquals('foo22', res[2].kind_name)
self.assertEquals(3, res[2].index_id)
self.assertEquals(4, res[0].bytes)
self.assertEquals(2, res[0].count)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Django middleware for NDB."""
__author__ = 'James A. Morrison'
from . import eventloop, tasklets
class NdbDjangoMiddleware(object):
"""Django middleware for NDB.
To use NDB with django, add
'ndb.NdbDjangoMiddleware',
to the MIDDLEWARE_CLASSES entry in your Django settings.py file.
Or, if you are using the ndb version from the SDK, use
'google.appengine.ext.ndb.NdbDjangoMiddleware',
It's best to insert it in front of any other middleware classes,
since some other middleware may make datastore calls and those won't be
handled properly if that middleware is invoked before this middleware.
See http://docs.djangoproject.com/en/dev/topics/http/middleware/.
"""
def process_request(self, unused_request):
"""Called by Django before deciding which view to execute."""
# Compare to the first half of toplevel() in context.py.
tasklets._state.clear_all_pending()
# Create and install a new context.
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
@staticmethod
def _finish():
# Compare to the finally clause in toplevel() in context.py.
ctx = tasklets.get_context()
tasklets.set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
def process_response(self, request, response):
"""Called by Django just before returning a response."""
self._finish()
return response
def process_exception(self, unused_request, unused_exception):
"""Called by Django when a view raises an exception."""
self._finish()
return None
| Python |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Django middleware for NDB."""
__author__ = 'James A. Morrison'
from . import eventloop, tasklets
class NdbDjangoMiddleware(object):
"""Django middleware for NDB.
To use NDB with django, add
'ndb.NdbDjangoMiddleware',
to the MIDDLEWARE_CLASSES entry in your Django settings.py file.
Or, if you are using the ndb version from the SDK, use
'google.appengine.ext.ndb.NdbDjangoMiddleware',
It's best to insert it in front of any other middleware classes,
since some other middleware may make datastore calls and those won't be
handled properly if that middleware is invoked before this middleware.
See http://docs.djangoproject.com/en/dev/topics/http/middleware/.
"""
def process_request(self, unused_request):
"""Called by Django before deciding which view to execute."""
# Compare to the first half of toplevel() in context.py.
tasklets._state.clear_all_pending()
# Create and install a new context.
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
@staticmethod
def _finish():
# Compare to the finally clause in toplevel() in context.py.
ctx = tasklets.get_context()
tasklets.set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
def process_response(self, request, response):
"""Called by Django just before returning a response."""
self._finish()
return response
def process_exception(self, unused_request, unused_exception):
"""Called by Django when a view raises an exception."""
self._finish()
return None
| Python |
"""Polymorphic models and queries.
The standard NDB Model class only supports 'functional polymorphism'.
That is, you can create a subclass of Model, and then subclass that
class, as many generations as necessary, and those classes will share
all the same properties and behaviors of their base classes. However,
subclassing Model in this way gives each subclass its own kind. This
means that it is not possible to do 'polymorphic queries'. Building a
query on a base class will only return entities whose kind matches
that base class's kind, and exclude entities that are instances of
some subclass of that base class.
The PolyModel class defined here lets you create class hierarchies
that support polymorphic queries. Simply subclass PolyModel instead
of Model.
"""
from . import model
__all__ = ['PolyModel']
_CLASS_KEY_PROPERTY = 'class'
class _ClassKeyProperty(model.StringProperty):
"""Property to store the 'class key' of a polymorphic class.
The class key is a list of strings describing a polymorphic entity's
place within its class hierarchy. This property is automatically
calculated. For example:
class Foo(PolyModel): ...
class Bar(Foo): ...
class Baz(Bar): ...
Foo().class_ == ['Foo']
Bar().class_ == ['Foo', 'Bar']
Baz().class_ == ['Foo', 'Bar', 'Baz']
"""
def __init__(self, name=_CLASS_KEY_PROPERTY, indexed=True):
"""Constructor.
If you really want to you can give this a different datastore name
or make it unindexed. For example:
class Foo(PolyModel):
class_ = _ClassKeyProperty(indexed=False)
"""
super(_ClassKeyProperty, self).__init__(name=name, indexed=indexed,
repeated=True)
def _set_value(self, entity, value):
"""The class_ property is read-only from the user's perspective."""
raise TypeError('%s is a read-only property' % self._code_name)
def _get_value(self, entity):
"""Compute and store a default value if necessary."""
value = super(_ClassKeyProperty, self)._get_value(entity)
if not value:
value = entity._class_key()
self._store_value(entity, value)
return value
def _prepare_for_put(self, entity):
"""Ensure the class_ property is initialized before it is serialized."""
self._get_value(entity) # For its side effects.
class PolyModel(model.Model):
"""Base class for class hierarchies supporting polymorphic queries.
Use this class to build hierarchies that can be queried based on
their types.
Example:
Consider the following model hierarchy:
+------+
|Animal|
+------+
|
+-----------------+
| |
+------+ +------+
|Canine| |Feline|
+------+ +------+
| |
+-------+ +-------+
| | | |
+---+ +----+ +---+ +-------+
|Dog| |Wolf| |Cat| |Panther|
+---+ +----+ +---+ +-------+
This class hierarchy has three levels. The first is the 'root
class'. All models in a single class hierarchy must inherit from
this root. All models in the hierarchy are stored as the same
kind as the root class. For example, Panther entities when stored
to the datastore are of the kind 'Animal'. Querying against the
Animal kind will retrieve Cats, Dogs and Canines, for example,
that match your query. Different classes stored in the root
class' kind are identified by their class key. When loaded from
the datastore, it is mapped to the appropriate implementation
class.
Polymorphic properties:
Properties that are defined in a given base class within a
hierarchy are stored in the datastore for all subclasses only.
So, if the Feline class had a property called 'whiskers', the Cat
and Panther enties would also have whiskers, but not Animal,
Canine, Dog or Wolf.
Polymorphic queries:
When written to the datastore, all polymorphic objects
automatically have a property called 'class' that you can query
against. Using this property it is possible to easily write a
query against any sub-hierarchy. For example, to fetch only
Canine objects, including all Dogs and Wolves:
Canine.query()
The 'class' property is not meant to be used by your code other
than for queries. Since it is supposed to represents the real
Python class it is intended to be hidden from view. (Although if
you feel the need, it is accessible as the 'class_' attribute.)
Root class:
The root class is the class from which all other classes of the
hierarchy inherits from. Each hierarchy has a single root class.
A class is a root class if it is an immediate child of PolyModel.
The subclasses of the root class are all the same kind as the root
class. In other words:
Animal.kind() == Feline.kind() == Panther.kind() == 'Animal'
Note:
All classes in a given hierarchy must have unique names, since
the class name is used to identify the appropriate subclass.
"""
class_ = _ClassKeyProperty()
_class_map = {} # Map class key -> suitable subclass.
@classmethod
def _update_kind_map(cls):
"""Override; called by Model._fix_up_properties().
Update the kind map as well as the class map, except for PolyModel
itself.
"""
bases = cls._get_hierarchy()
if bases:
cls._kind_map[cls._get_kind()] = bases[0]
cls._class_map[tuple(cls._class_key())] = cls
@classmethod
def _from_pb(cls, pb, set_key=True, ent=None, key=None):
"""Override.
Use the class map to give the entity the correct subclass.
"""
prop_name = cls.class_._name
class_name = []
for plist in [pb.property_list(), pb.raw_property_list()]:
for p in plist:
if p.name() == prop_name:
class_name.append(p.value().stringvalue())
cls = cls._class_map.get(tuple(class_name), cls)
return super(PolyModel, cls)._from_pb(pb, set_key, ent, key)
@classmethod
def _class_key(cls):
"""Return the class key.
This is a list of class names, e.g. ['Animal', 'Feline', 'Cat'].
"""
return [c._class_name() for c in cls._get_hierarchy()]
@classmethod
def _get_kind(cls):
"""Override.
Make sure that the kind returned is the root class of the
polymorphic hierarchy.
"""
bases = cls._get_hierarchy()
if not bases:
# We have to jump through some hoops to call the superclass'
# _get_kind() method. First, this is called by the metaclass
# before the PolyModel name is defined, so it can't use
# super(PolyModel, cls)._get_kind(). Second, we can't just call
# Model._get_kind() because that always returns 'Model'. Hence
# the 'im_func' hack.
return model.Model._get_kind.im_func(cls)
else:
return bases[0]._class_name()
@classmethod
def _class_name(cls):
"""Return the class name.
This is overridable in case you want to use a different class
name. The main use case is probably to maintain backwards
compatibility with datastore contents after renaming a class.
NOTE: When overriding this for an intermediate class in your
hierarchy (as opposed to a leaf class), make sure to test
cls.__name__, or else all subclasses will appear to have the
same class name.
"""
return cls.__name__
@classmethod
def _get_hierarchy(cls):
"""Internal helper to return the list of polymorphic base classes.
This returns a list of class objects, e.g. [Animal, Feline, Cat].
"""
bases = []
for base in cls.mro(): # pragma: no branch
if not hasattr(base, '_get_hierarchy'):
break
bases.append(base)
del bases[-1] # Delete PolyModel itself
bases.reverse()
return bases
@classmethod
def _query(cls, *args, **kwds):
"""Override.
This inserts an implicit filter on the class property.
"""
qry = super(PolyModel, cls)._query(**kwds)
qry = qry.filter(cls.class_ == cls._class_name())
if args:
qry = qry.filter(*args)
return qry
query = _query
| Python |
"""Models to be used when accessing app specific datastore usage statistics.
These entities cannot be created by users, but are populated in the
application's datastore by offline processes run by the Google App Engine team.
"""
# NOTE: All constant strings in this file should be kept in sync with
# those in google/appengine/ext/db/stats.py.
from . import model
__all__ = ['BaseKindStatistic',
'BaseStatistic',
'GlobalStat',
'KindCompositeIndexStat',
'KindNonRootEntityStat',
'KindPropertyNamePropertyTypeStat',
'KindPropertyNameStat',
'KindPropertyTypeStat',
'KindRootEntityStat',
'KindStat',
'NamespaceGlobalStat',
'NamespaceKindCompositeIndexStat',
'NamespaceKindNonRootEntityStat',
'NamespaceKindPropertyNamePropertyTypeStat',
'NamespaceKindPropertyNameStat',
'NamespaceKindPropertyTypeStat',
'NamespaceKindRootEntityStat',
'NamespaceKindStat',
'NamespacePropertyTypeStat',
'NamespaceStat',
'PropertyTypeStat',
]
class BaseStatistic(model.Model):
"""Base Statistic Model class.
Attributes:
bytes: the total number of bytes taken up in the datastore for the
statistic instance.
count: attribute is the total number of occurrences of the statistic
in the datastore.
timestamp: the time the statistic instance was written to the datastore.
"""
# This is necessary for the _get_kind() classmethod override.
STORED_KIND_NAME = '__BaseStatistic__'
# The number of bytes that is taken up.
bytes = model.IntegerProperty()
# The number of entity records.
count = model.IntegerProperty()
# When this statistic was inserted into the datastore.
timestamp = model.DateTimeProperty()
@classmethod
def _get_kind(cls):
"""Kind name override."""
return cls.STORED_KIND_NAME
class BaseKindStatistic(BaseStatistic):
"""Base Statistic Model class for stats associated with kinds.
Attributes:
kind_name: the name of the kind associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
"""
# This is necessary for the _get_kind() classmethod override.
STORED_KIND_NAME = '__BaseKindStatistic__'
# The name of the kind.
kind_name = model.StringProperty()
# The number of bytes that is taken up in entity table. entity_bytes does not
# reflect the storage allocated for indexes, either built-in or composite
# indexes.
entity_bytes = model.IntegerProperty(default=0L)
class GlobalStat(BaseStatistic):
"""An aggregate of all entities across the entire application.
This statistic only has a single instance in the datastore that contains the
total number of entities stored and the total number of bytes they take up.
Attributes:
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Total__'
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class NamespaceStat(BaseStatistic):
"""An aggregate of all entities across an entire namespace.
This statistic has one instance per namespace. The key_name is the
represented namespace. NamespaceStat entities will only be found
in the namespace "" (empty string). It contains the total
number of entities stored and the total number of bytes they take up.
Attributes:
subject_namespace: the namespace associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Namespace__'
# The namespace name this NamespaceStat refers to.
subject_namespace = model.StringProperty()
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class KindStat(BaseKindStatistic):
"""An aggregate of all entities at the granularity of their Kind.
There is an instance of the KindStat for every Kind that is in the
application's datastore. This stat contains per-Kind statistics.
Attributes:
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Kind__'
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class KindRootEntityStat(BaseKindStatistic):
"""Statistics of the number of root entities in the datastore by Kind.
There is an instance of the KindRootEntityState for every Kind that is in the
application's datastore and has an instance that is a root entity. This stat
contains statistics regarding these root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_IsRootEntity__'
class KindNonRootEntityStat(BaseKindStatistic):
"""Statistics of the number of non root entities in the datastore by Kind.
There is an instance of the KindNonRootEntityStat for every Kind that is in
the application's datastore that is a not a root entity. This stat contains
statistics regarding thse non root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_NotRootEntity__'
class PropertyTypeStat(BaseStatistic):
"""An aggregate of all properties across the entire application by type.
There is an instance of the PropertyTypeStat for every property type
(google.appengine.api.datastore_types._PROPERTY_TYPES) in use by the
application in its datastore.
Attributes:
property_type: the property type associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType__'
# The name of the property_type.
property_type = model.StringProperty()
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyTypeStat(BaseKindStatistic):
"""Statistics on (kind, property_type) tuples in the app's datastore.
There is an instance of the KindPropertyTypeStat for every
(kind, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_Kind__'
# The name of the property_type.
property_type = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyNameStat(BaseKindStatistic):
"""Statistics on (kind, property_name) tuples in the app's datastore.
There is an instance of the KindPropertyNameStat for every
(kind, property_name) tuple in the application's datastore.
Attributes:
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyName_Kind__'
# The name of the property.
property_name = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyNamePropertyTypeStat(BaseKindStatistic):
"""Statistic on (kind, property_name, property_type) tuples in the datastore.
There is an instance of the KindPropertyNamePropertyTypeStat for every
(kind, property_name, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_PropertyName_Kind__'
# The name of the property type.
property_type = model.StringProperty()
# The name of the property.
property_name = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindCompositeIndexStat(BaseStatistic):
"""Statistic on (kind, composite_index_id) tuples in the datastore.
There is an instance of the KindCompositeIndexStat for every unique
(kind, composite_index_id) tuple in the application's datastore indexes.
Attributes:
index_id: the id of the composite index associated with the statistic
instance.
kind_name: the name of the kind associated with the statistic instance.
"""
STORED_KIND_NAME = '__Stat_Kind_CompositeIndex__'
# The id of the composite index
index_id = model.IntegerProperty()
# The name of the kind.
kind_name = model.StringProperty()
# The following specify namespace-specific stats.
# These types are specific to the datastore namespace they are located
# within. These will only be produced if datastore entities exist
# in a namespace other than the empty namespace (i.e. namespace="").
class NamespaceGlobalStat(GlobalStat):
"""GlobalStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Total__'
class NamespaceKindStat(KindStat):
"""KindStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind__'
class NamespaceKindRootEntityStat(KindRootEntityStat):
"""KindRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_IsRootEntity__'
class NamespaceKindNonRootEntityStat(KindNonRootEntityStat):
"""KindNonRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_NotRootEntity__'
class NamespacePropertyTypeStat(PropertyTypeStat):
"""PropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType__'
class NamespaceKindPropertyTypeStat(KindPropertyTypeStat):
"""KindPropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_Kind__'
class NamespaceKindPropertyNameStat(KindPropertyNameStat):
"""KindPropertyNameStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyName_Kind__'
class NamespaceKindPropertyNamePropertyTypeStat(
KindPropertyNamePropertyTypeStat):
"""KindPropertyNamePropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_PropertyName_Kind__'
class NamespaceKindCompositeIndexStat(KindCompositeIndexStat):
"""KindCompositeIndexStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_CompositeIndex__'
# Maps a datastore stat entity kind name to its respective model class.
# NOTE: Any new stats added to this module should also be added here.
_DATASTORE_STATS_CLASSES_BY_KIND = {
GlobalStat.STORED_KIND_NAME: GlobalStat,
NamespaceStat.STORED_KIND_NAME: NamespaceStat,
KindStat.STORED_KIND_NAME: KindStat,
KindRootEntityStat.STORED_KIND_NAME: KindRootEntityStat,
KindNonRootEntityStat.STORED_KIND_NAME: KindNonRootEntityStat,
PropertyTypeStat.STORED_KIND_NAME: PropertyTypeStat,
KindPropertyTypeStat.STORED_KIND_NAME: KindPropertyTypeStat,
KindPropertyNameStat.STORED_KIND_NAME: KindPropertyNameStat,
KindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
KindPropertyNamePropertyTypeStat,
KindCompositeIndexStat.STORED_KIND_NAME: KindCompositeIndexStat,
NamespaceGlobalStat.STORED_KIND_NAME: NamespaceGlobalStat,
NamespaceKindStat.STORED_KIND_NAME: NamespaceKindStat,
NamespaceKindRootEntityStat.STORED_KIND_NAME: NamespaceKindRootEntityStat,
NamespaceKindNonRootEntityStat.STORED_KIND_NAME:
NamespaceKindNonRootEntityStat,
NamespacePropertyTypeStat.STORED_KIND_NAME: NamespacePropertyTypeStat,
NamespaceKindPropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyTypeStat,
NamespaceKindPropertyNameStat.STORED_KIND_NAME:
NamespaceKindPropertyNameStat,
NamespaceKindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyNamePropertyTypeStat,
NamespaceKindCompositeIndexStat.STORED_KIND_NAME:
NamespaceKindCompositeIndexStat,
}
| Python |
"""Models and helper functions for access to app's datastore metadata.
These entities cannot be created by users, but are created as results of
__namespace__, __kind__ and __property__ metadata queries.
A simplified API is also offered:
ndb.metadata.get_namespaces(): A list of namespace names.
ndb.metadata.get_kinds(): A list of kind names.
ndb.metadata.get_properties_of_kind(kind):
A list of property names for the given kind name.
ndb.metadata.get_representations_of_kind(kind):
A dict mapping property names to lists of representation ids.
All but get_namespaces() implicitly apply to the current namespace.
All have optional start and end arguments to limit the query to a
range of names, such that start <= name < end.
"""
from . import model
__all__ = ['Namespace', 'Kind', 'Property',
'get_namespaces', 'get_kinds',
'get_properties_of_kind', 'get_representations_of_kind',
]
class _BaseMetadata(model.Model):
"""Base class for all metadata models."""
KIND_NAME = '' # Don't instantiate this class; always use a subclass.
@classmethod
def _get_kind(cls):
"""Kind name override."""
return cls.KIND_NAME
class Namespace(_BaseMetadata):
"""Model for __namespace__ metadata query results."""
KIND_NAME = '__namespace__'
EMPTY_NAMESPACE_ID = 1 # == datastore_types._EMPTY_NAMESPACE_ID
@property
def namespace_name(self):
"""Return the namespace name specified by this entity's key."""
return self.key_to_namespace(self.key)
@classmethod
def key_for_namespace(cls, namespace):
"""Return the Key for a namespace.
Args:
namespace: A string giving the namespace whose key is requested.
Returns:
The Key for the namespace.
"""
if namespace:
return model.Key(cls.KIND_NAME, namespace)
else:
return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)
@classmethod
def key_to_namespace(cls, key):
"""Return the namespace specified by a given __namespace__ key.
Args:
key: key whose name is requested.
Returns:
The namespace specified by key.
"""
return key.string_id() or ''
class Kind(_BaseMetadata):
"""Model for __kind__ metadata query results."""
KIND_NAME = '__kind__'
@property
def kind_name(self):
"""Return the kind name specified by this entity's key."""
return self.key_to_kind(self.key)
@classmethod
def key_for_kind(cls, kind):
"""Return the __kind__ key for kind.
Args:
kind: kind whose key is requested.
Returns:
The key for kind.
"""
return model.Key(cls.KIND_NAME, kind)
@classmethod
def key_to_kind(cls, key):
"""Return the kind specified by a given __kind__ key.
Args:
key: key whose name is requested.
Returns:
The kind specified by key.
"""
return key.id()
class Property(_BaseMetadata):
"""Model for __property__ metadata query results."""
KIND_NAME = '__property__'
@property
def property_name(self):
"""Return the property name specified by this entity's key."""
return self.key_to_property(self.key)
@property
def kind_name(self):
"""Return the kind name specified by this entity's key."""
return self.key_to_kind(self.key)
property_representation = model.StringProperty(repeated=True)
@classmethod
def key_for_kind(cls, kind):
"""Return the __property__ key for kind.
Args:
kind: kind whose key is requested.
Returns:
The parent key for __property__ keys of kind.
"""
return model.Key(Kind.KIND_NAME, kind)
@classmethod
def key_for_property(cls, kind, property):
"""Return the __property__ key for property of kind.
Args:
kind: kind whose key is requested.
property: property whose key is requested.
Returns:
The key for property of kind.
"""
return model.Key(Kind.KIND_NAME, kind, Property.KIND_NAME, property)
@classmethod
def key_to_kind(cls, key):
"""Return the kind specified by a given __property__ key.
Args:
key: key whose kind name is requested.
Returns:
The kind specified by key.
"""
if key.kind() == Kind.KIND_NAME:
return key.id()
else:
return key.parent().id()
@classmethod
def key_to_property(cls, key):
"""Return the property specified by a given __property__ key.
Args:
key: key whose property name is requested.
Returns:
property specified by key, or None if the key specified only a kind.
"""
if key.kind() == Kind.KIND_NAME:
return None
else:
return key.id()
def get_namespaces(start=None, end=None):
"""Return all namespaces in the specified range.
Args:
start: only return namespaces >= start if start is not None.
end: only return namespaces < end if end is not None.
Returns:
A list of namespace names between the (optional) start and end values.
"""
q = Namespace.query()
if start is not None:
q = q.filter(Namespace.key >= Namespace.key_for_namespace(start))
if end is not None:
q = q.filter(Namespace.key < Namespace.key_for_namespace(end))
return [x.namespace_name for x in q]
def get_kinds(start=None, end=None):
"""Return all kinds in the specified range, for the current namespace.
Args:
start: only return kinds >= start if start is not None.
end: only return kinds < end if end is not None.
Returns:
A list of kind names between the (optional) start and end values.
"""
q = Kind.query()
if start is not None and start != '':
q = q.filter(Kind.key >= Kind.key_for_kind(start))
if end is not None:
if end == '':
return []
q = q.filter(Kind.key < Kind.key_for_kind(end))
return [x.kind_name for x in q]
def get_properties_of_kind(kind, start=None, end=None):
"""Return all properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A list of property names of kind between the (optional) start and end
values.
"""
q = Property.query(ancestor=Property.key_for_kind(kind))
if start is not None and start != '':
q = q.filter(Property.key >= Property.key_for_property(kind, start))
if end is not None:
if end == '':
return []
q = q.filter(Property.key < Property.key_for_property(kind, end))
return [Property.key_to_property(k) for k in q.iter(keys_only=True)]
def get_representations_of_kind(kind, start=None, end=None):
"""Return all representations of properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A dictionary mapping property names to its list of representations.
"""
q = Property.query(ancestor=Property.key_for_kind(kind))
if start is not None and start != '':
q = q.filter(Property.key >= Property.key_for_property(kind, start))
if end is not None:
if end == '':
return {}
q = q.filter(Property.key < Property.key_for_property(kind, end))
result = {}
for property in q:
result[property.property_name] = property.property_representation
return result
| Python |
"""Dynamically decide from where to import Google App Engine modules.
All other NDB code should import its Google App Engine modules from
this module. If necessary, add new imports here (in both places).
"""
try:
from google.appengine import api
normal_environment = True
except ImportError:
from google3.apphosting import api
normal_environment = False
if normal_environment:
from google.appengine.api.blobstore import blobstore as api_blobstore
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.api import prospective_search
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.api.prospective_search import prospective_search_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import entity_pb
from google.appengine.ext.blobstore import blobstore as ext_blobstore
from google.appengine.ext import db
from google.appengine.ext import gql
from google.appengine.runtime import apiproxy_errors
from google.net.proto import ProtocolBuffer
else:
from google3.apphosting.api.blobstore import blobstore as api_blobstore
from google3.apphosting.api import apiproxy_rpc
from google3.apphosting.api import apiproxy_stub_map
from google3.apphosting.api import datastore
from google3.apphosting.api import datastore_errors
from google3.apphosting.api import datastore_types
from google3.apphosting.api import memcache
from google3.apphosting.api import namespace_manager
from google3.apphosting.api import taskqueue
from google3.apphosting.api import urlfetch
from google3.apphosting.api import users
from google3.apphosting.datastore import datastore_query
from google3.apphosting.datastore import datastore_rpc
from google3.storage.onestore.v3 import entity_pb
from google3.apphosting.ext.blobstore import blobstore as ext_blobstore
from google3.apphosting.ext import db
from google3.apphosting.ext import gql
from google3.apphosting.runtime import apiproxy_errors
from google3.net.proto import ProtocolBuffer
# Prospective search is optional.
try:
from google3.apphosting.api import prospective_search
from google3.apphosting.api.prospective_search import prospective_search_pb
except ImportError:
pass
| Python |
"""Higher-level Query wrapper.
There are perhaps too many query APIs in the world.
The fundamental API here overloads the 6 comparisons operators to
represent filters on property values, and supports AND and OR
operations (implemented as functions -- Python's 'and' and 'or'
operators cannot be overloaded, and the '&' and '|' operators have a
priority that conflicts with the priority of comparison operators).
For example:
class Employee(Model):
name = StringProperty()
age = IntegerProperty()
rank = IntegerProperty()
@classmethod
def demographic(cls, min_age, max_age):
return cls.query().filter(AND(cls.age >= min_age, cls.age <= max_age))
@classmethod
def ranked(cls, rank):
return cls.query(cls.rank == rank).order(cls.age)
for emp in Employee.seniors(42, 5):
print emp.name, emp.age, emp.rank
The 'in' operator cannot be overloaded, but is supported through the
IN() method. For example:
Employee.query().filter(Employee.rank.IN([4, 5, 6]))
Sort orders are supported through the order() method; unary minus is
overloaded on the Property class to represent a descending order:
Employee.query().order(Employee.name, -Employee.age)
Besides using AND() and OR(), filters can also be combined by
repeatedly calling .filter():
q1 = Employee.query() # A query that returns all employees
q2 = q1.filter(Employee.age >= 30) # Only those over 30
q3 = q2.filter(Employee.age < 40) # Only those in their 30s
A further shortcut is calling .filter() with multiple arguments; this
implies AND():
q1 = Employee.query() # A query that returns all employees
q3 = q1.filter(Employee.age >= 30,
Employee.age < 40) # Only those in their 30s
And finally you can also pass one or more filter expressions directly
to the .query() method:
q3 = Employee.query(Employee.age >= 30,
Employee.age < 40) # Only those in their 30s
Query objects are immutable, so these methods always return a new
Query object; the above calls to filter() do not affect q1. (On the
other hand, operations that are effectively no-ops may return the
original Query object.)
Sort orders can also be combined this way, and .filter() and .order()
calls may be intermixed:
q4 = q3.order(-Employee.age)
q5 = q4.order(Employee.name)
q6 = q5.filter(Employee.rank == 5)
Again, multiple .order() calls can be combined:
q5 = q3.order(-Employee.age, Employee.name)
The simplest way to retrieve Query results is a for-loop:
for emp in q3:
print emp.name, emp.age
Some other methods to run a query and access its results:
q.iter() # Return an iterator; same as iter(q) but more flexible
q.map(callback) # Call the callback function for each query result
q.fetch(N) # Return a list of the first N results
q.get() # Return the first result
q.count(N) # Return the number of results, with a maximum of N
q.fetch_page(N, start_cursor=cursor) # Return (results, cursor, has_more)
All of the above methods take a standard set of additional query
options, either in the form of keyword arguments such as
keys_only=True, or as QueryOptions object passed with
options=QueryOptions(...). The most important query options are:
keys_only: bool, if set the results are keys instead of entities
limit: int, limits the number of results returned
offset: int, skips this many results first
start_cursor: Cursor, start returning results after this position
end_cursor: Cursor, stop returning results after this position
batch_size: int, hint for the number of results returned per RPC
prefetch_size: int, hint for the number of results in the first RPC
produce_cursors: bool, return Cursor objects with the results
For additional (obscure) query options and more details on them,
including an explanation of Cursors, see datastore_query.py.
All of the above methods except for iter() have asynchronous variants
as well, which return a Future; to get the operation's ultimate
result, yield the Future (when inside a tasklet) or call the Future's
get_result() method (outside a tasklet):
q.map_async(callback) # Callback may be a task or a plain function
q.fetch_async(N)
q.get_async()
q.count_async(N)
q.fetch_page_async(N, start_cursor=cursor)
Finally, there's an idiom to efficiently loop over the Query results
in a tasklet, properly yielding when appropriate:
it = q.iter()
while (yield it.has_next_async()):
emp = it.next()
print emp.name, emp.age
"""
from __future__ import with_statement
del with_statement # No need to export this.
__author__ = 'guido@google.com (Guido van Rossum)'
import datetime
import heapq
import itertools
import sys
from .google_imports import datastore_errors
from .google_imports import datastore_types
from .google_imports import datastore_query
from . import model
from . import tasklets
from . import utils
__all__ = ['Query', 'QueryOptions', 'Cursor', 'QueryIterator',
'RepeatedStructuredPropertyPredicate',
'AND', 'OR', 'ConjunctionNode', 'DisjunctionNode',
'FilterNode', 'PostFilterNode', 'FalseNode', 'Node',
'ParameterNode', 'ParameterizedThing', 'Parameter',
'ParameterizedFunction', 'gql',
]
# Re-export some useful classes from the lower-level module.
QueryOptions = datastore_query.QueryOptions
Cursor = datastore_query.Cursor
# Some local renamings.
_ASC = datastore_query.PropertyOrder.ASCENDING
_DESC = datastore_query.PropertyOrder.DESCENDING
_AND = datastore_query.CompositeFilter.AND
_KEY = datastore_types._KEY_SPECIAL_PROPERTY
# Table of supported comparison operators.
_OPS = frozenset(['=', '!=', '<', '<=', '>', '>=', 'in'])
# Default limit value. (Yes, the datastore uses int32!)
_MAX_LIMIT = 2 ** 31 - 1
class RepeatedStructuredPropertyPredicate(datastore_query.FilterPredicate):
# Used by model.py.
def __init__(self, match_keys, pb, key_prefix):
super(RepeatedStructuredPropertyPredicate, self).__init__()
self.match_keys = match_keys
stripped_keys = []
for key in match_keys:
if not key.startswith(key_prefix):
raise ValueError('key %r does not begin with the specified prefix of %s'
% (key, key_prefix))
stripped_keys.append(key[len(key_prefix):])
value_map = datastore_query._make_key_value_map(pb, stripped_keys)
self.match_values = tuple(value_map[key][0] for key in stripped_keys)
def _get_prop_names(self):
return frozenset(self.match_keys)
def _apply(self, key_value_map):
"""Apply the filter to values extracted from an entity.
Think of self.match_keys and self.match_values as representing a
table with one row. For example:
match_keys = ('name', 'age', 'rank')
match_values = ('Joe', 24, 5)
(Except that in reality, the values are represented by tuples
produced by datastore_types.PropertyValueToKeyValue().)
represents this table:
| name | age | rank |
+---------+-------+--------+
| 'Joe' | 24 | 5 |
Think of key_value_map as a table with the same structure but
(potentially) many rows. This represents a repeated structured
property of a single entity. For example:
{'name': ['Joe', 'Jane', 'Dick'],
'age': [24, 21, 23],
'rank': [5, 1, 2]}
represents this table:
| name | age | rank |
+---------+-------+--------+
| 'Joe' | 24 | 5 |
| 'Jane' | 21 | 1 |
| 'Dick' | 23 | 2 |
We must determine wheter at least one row of the second table
exactly matches the first table. We need this class because the
datastore, when asked to find an entity with name 'Joe', age 24
and rank 5, will include entities that have 'Joe' somewhere in the
name column, 24 somewhere in the age column, and 5 somewhere in
the rank column, but not all aligned on a single row. Such an
entity should not be considered a match.
"""
columns = []
for key in self.match_keys:
column = key_value_map.get(key)
if not column: # None, or an empty list.
return False # If any column is empty there can be no match.
columns.append(column)
# Use izip to transpose the columns into rows.
return self.match_values in itertools.izip(*columns)
# Don't implement _prune()! It would mess up the row correspondence
# within columns.
class ParameterizedThing(object):
"""Base class for Parameter and ParameterizedFunction.
This exists purely for isinstance() checks.
"""
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
eq = self.__eq__(other)
if eq is not NotImplemented:
eq = not eq
return eq
class Parameter(ParameterizedThing):
"""Represents a bound variable in a GQL query.
Parameter(1) corresponds to a slot labeled ":1" in a GQL query.
Parameter('xyz') corresponds to a slot labeled ":xyz".
The value must be set (bound) separately by calling .set(value).
"""
def __init__(self, key):
"""Constructor.
Args:
key: The Parameter key, must be either an integer or a string.
"""
if not isinstance(key, (int, long, basestring)):
raise TypeError('Parameter key must be an integer or string, not %s' %
(key,))
self.__key = key
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__key)
def __eq__(self, other):
if not isinstance(other, Parameter):
return NotImplemented
return self.__key == other.__key
@property
def key(self):
"""Retrieve the key."""
return self.__key
def resolve(self, bindings, used):
key = self.__key
if key not in bindings:
raise datastore_errors.BadArgumentError(
'Parameter :%s is not bound.' % key)
value = bindings[key]
used[key] = True
return value
class ParameterizedFunction(ParameterizedThing):
"""Represents a GQL function with parameterized arguments.
For example, ParameterizedFunction('key', [Parameter(1)]) stands for
the GQL syntax KEY(:1).
"""
def __init__(self, func, values):
from .google_imports import gql # Late import, to avoid name conflict.
self.__func = func
self.__values = values
# NOTE: A horrible hack using GQL private variables so we can
# reuse GQL's implementations of its built-in functions.
gqli = gql.GQL('SELECT * FROM Dummy')
gql_method = gqli._GQL__cast_operators[func]
self.__method = getattr(gqli, '_GQL' + gql_method.__name__)
def __repr__(self):
return 'ParameterizedFunction(%r, %r)' % (self.__func, self.__values)
def __eq__(self, other):
if not isinstance(other, ParameterizedFunction):
return NotImplemented
return (self.__func == other.__func and
self.__values == other.__values)
@property
def func(self):
return self.__func
@property
def values(self):
return self.__values
def is_parameterized(self):
for val in self.__values:
if isinstance(val, Parameter):
return True
return False
def resolve(self, bindings, used):
values = []
for val in self.__values:
if isinstance(val, Parameter):
val = val.resolve(bindings, used)
values.append(val)
result = self.__method(values)
# The gql module returns slightly different types in some cases.
if self.__func == 'key' and isinstance(result, datastore_types.Key):
result = model.Key.from_old_key(result)
elif self.__func == 'time' and isinstance(result, datetime.datetime):
result = datetime.time(result.hour, result.minute,
result.second, result.microsecond)
elif self.__func == 'date' and isinstance(result, datetime.datetime):
result = datetime.date(result.year, result.month, result.day)
return result
class Node(object):
"""Base class for filter expression tree nodes.
Tree nodes are considered immutable, even though they can contain
Parameter instances, which are not. In particular, two identical
trees may be represented by the same Node object in different
contexts.
"""
def __new__(cls):
if cls is Node:
raise TypeError('Cannot instantiate Node, only a subclass.')
return super(Node, cls).__new__(cls)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
eq = self.__eq__(other)
if eq is not NotImplemented:
eq = not eq
return eq
def __unordered(self, unused_other):
raise TypeError('Nodes cannot be ordered')
__le__ = __lt__ = __ge__ = __gt__ = __unordered
def _to_filter(self, post=False):
"""Helper to convert to datastore_query.Filter, or None."""
raise NotImplementedError
def _post_filters(self):
"""Helper to extract post-filter Nodes, if any."""
return None
def resolve(self, bindings, used):
"""Return a Node with Parameters replaced by the selected values.
Args:
bindings: A dict mapping integers and strings to values.
used: A dict into which use of use of a binding is recorded.
Returns:
A Node instance.
"""
return self
class FalseNode(Node):
"""Tree node for an always-failing filter."""
def __eq__(self, other):
if not isinstance(other, FalseNode):
return NotImplemented
return True
def _to_filter(self, post=False):
if post:
return None
# Because there's no point submitting a query that will never
# return anything.
raise datastore_errors.BadQueryError(
'Cannot convert FalseNode to predicate')
class ParameterNode(Node):
"""Tree node for a parameterized filter."""
def __new__(cls, prop, op, param):
if not isinstance(prop, model.Property):
raise TypeError('Expected a Property, got %r' % (prop,))
if op not in _OPS:
raise TypeError('Expected a valid operator, got %r' % (op,))
if not isinstance(param, ParameterizedThing):
raise TypeError('Expected a ParameterizedThing, got %r' % (param,))
obj = super(ParameterNode, cls).__new__(cls)
obj.__prop = prop
obj.__op = op
obj.__param = param
return obj
def __repr__(self):
return 'ParameterNode(%r, %r, %r)' % (self.__prop, self.__op, self.__param)
def __eq__(self, other):
if not isinstance(other, ParameterNode):
return NotImplemented
return (self.__prop._name == other.__prop._name and
self.__op == other.__op and
self.__param == other.__param)
def _to_filter(self, post=False):
raise datastore_errors.BadArgumentError(
'Parameter :%s is not bound.' % (self.__param.key,))
def resolve(self, bindings, used):
value = self.__param.resolve(bindings, used)
if self.__op == 'in':
return self.__prop._IN(value)
else:
return self.__prop._comparison(self.__op, value)
class FilterNode(Node):
"""Tree node for a single filter expression."""
def __new__(cls, name, opsymbol, value):
if isinstance(value, model.Key):
value = value.to_old_key()
if opsymbol == '!=':
n1 = FilterNode(name, '<', value)
n2 = FilterNode(name, '>', value)
return DisjunctionNode(n1, n2)
if opsymbol == 'in':
if not isinstance(value, (list, tuple, set, frozenset)):
raise TypeError('in expected a list, tuple or set of values; '
'received %r' % value)
nodes = [FilterNode(name, '=', v) for v in value]
if not nodes:
return FalseNode()
if len(nodes) == 1:
return nodes[0]
return DisjunctionNode(*nodes)
self = super(FilterNode, cls).__new__(cls)
self.__name = name
self.__opsymbol = opsymbol
self.__value = value
return self
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.__name, self.__opsymbol, self.__value)
def __eq__(self, other):
if not isinstance(other, FilterNode):
return NotImplemented
# TODO: Should nodes with values that compare equal but have
# different types really be considered equal? IIUC the datastore
# doesn't consider 1 equal to 1.0 when it compares property values.
return (self.__name == other.__name and
self.__opsymbol == other.__opsymbol and
self.__value == other.__value)
def _to_filter(self, post=False):
if post:
return None
if self.__opsymbol in ('!=', 'in'):
raise NotImplementedError('Inequality filters are not single filter '
'expressions and therefore cannot be converted '
'to a single filter (%r)' % self.__opsymbol)
value = self.__value
return datastore_query.make_filter(self.__name.decode('utf-8'),
self.__opsymbol, value)
class PostFilterNode(Node):
"""Tree node representing an in-memory filtering operation.
This is used to represent filters that cannot be executed by the
datastore, for example a query for a structured value.
"""
def __new__(cls, predicate):
self = super(PostFilterNode, cls).__new__(cls)
self.predicate = predicate
return self
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.predicate)
def __eq__(self, other):
if not isinstance(other, PostFilterNode):
return NotImplemented
return self is other
def _to_filter(self, post=False):
if post:
return self.predicate
else:
return None
class ConjunctionNode(Node):
"""Tree node representing a Boolean AND operator on two or more nodes."""
def __new__(cls, *nodes):
if not nodes:
raise TypeError('ConjunctionNode() requires at least one node.')
elif len(nodes) == 1:
return nodes[0]
clauses = [[]] # Outer: Disjunction; inner: Conjunction.
# TODO: Remove duplicates?
for node in nodes:
if not isinstance(node, Node):
raise TypeError('ConjunctionNode() expects Node instances as arguments;'
' received a non-Node instance %r' % node)
if isinstance(node, DisjunctionNode):
# Apply the distributive law: (X or Y) and (A or B) becomes
# (X and A) or (X and B) or (Y and A) or (Y and B).
new_clauses = []
for clause in clauses:
for subnode in node:
new_clause = clause + [subnode]
new_clauses.append(new_clause)
clauses = new_clauses
elif isinstance(node, ConjunctionNode):
# Apply half of the distributive law: (X or Y) and A becomes
# (X and A) or (Y and A).
for clause in clauses:
clause.extend(node.__nodes)
else:
# Ditto.
for clause in clauses:
clause.append(node)
if not clauses:
return FalseNode()
if len(clauses) > 1:
return DisjunctionNode(*[ConjunctionNode(*clause) for clause in clauses])
self = super(ConjunctionNode, cls).__new__(cls)
self.__nodes = clauses[0]
return self
def __iter__(self):
return iter(self.__nodes)
def __repr__(self):
return 'AND(%s)' % (', '.join(map(str, self.__nodes)))
def __eq__(self, other):
if not isinstance(other, ConjunctionNode):
return NotImplemented
return self.__nodes == other.__nodes
def _to_filter(self, post=False):
filters = filter(None,
(node._to_filter(post=post)
for node in self.__nodes
if isinstance(node, PostFilterNode) == post))
if not filters:
return None
if len(filters) == 1:
return filters[0]
return datastore_query.CompositeFilter(_AND, filters)
def _post_filters(self):
post_filters = [node for node in self.__nodes
if isinstance(node, PostFilterNode)]
if not post_filters:
return None
if len(post_filters) == 1:
return post_filters[0]
if post_filters == self.__nodes:
return self
return ConjunctionNode(*post_filters)
def resolve(self, bindings, used):
nodes = [node.resolve(bindings, used) for node in self.__nodes]
if nodes == self.__nodes:
return self
return ConjunctionNode(*nodes)
class DisjunctionNode(Node):
"""Tree node representing a Boolean OR operator on two or more nodes."""
def __new__(cls, *nodes):
if not nodes:
raise TypeError('DisjunctionNode() requires at least one node')
elif len(nodes) == 1:
return nodes[0]
self = super(DisjunctionNode, cls).__new__(cls)
self.__nodes = []
# TODO: Remove duplicates?
for node in nodes:
if not isinstance(node, Node):
raise TypeError('DisjunctionNode() expects Node instances as arguments;'
' received a non-Node instance %r' % node)
if isinstance(node, DisjunctionNode):
self.__nodes.extend(node.__nodes)
else:
self.__nodes.append(node)
return self
def __iter__(self):
return iter(self.__nodes)
def __repr__(self):
return 'OR(%s)' % (', '.join(map(str, self.__nodes)))
def __eq__(self, other):
if not isinstance(other, DisjunctionNode):
return NotImplemented
return self.__nodes == other.__nodes
def resolve(self, bindings, used):
nodes = [node.resolve(bindings, used) for node in self.__nodes]
if nodes == self.__nodes:
return self
return DisjunctionNode(*nodes)
# AND and OR are preferred aliases for these.
AND = ConjunctionNode
OR = DisjunctionNode
def _args_to_val(func, args):
"""Helper for GQL parsing to extract values from GQL expressions.
This can extract the value from a GQL literal, return a Parameter
for a GQL bound parameter (:1 or :foo), and interprets casts like
KEY(...) and plain lists of values like (1, 2, 3).
Args:
func: A string indicating what kind of thing this is.
args: One or more GQL values, each integer, string, or GQL literal.
"""
from .google_imports import gql # Late import, to avoid name conflict.
vals = []
for arg in args:
if isinstance(arg, (int, long, basestring)):
val = Parameter(arg)
elif isinstance(arg, gql.Literal):
val = arg.Get()
else:
raise TypeError('Unexpected arg (%r)' % arg)
vals.append(val)
if func == 'nop':
if len(vals) != 1:
raise TypeError('"nop" requires exactly one value')
return vals[0] # May be a Parameter
pfunc = ParameterizedFunction(func, vals)
if pfunc.is_parameterized():
return pfunc
else:
return pfunc.resolve({}, {})
def _get_prop_from_modelclass(modelclass, name):
"""Helper for FQL parsing to turn a property name into a property object.
Args:
modelclass: The model class specified in the query.
name: The property name. This may contain dots which indicate
sub-properties of structured properties.
Returns:
A Property object.
Raises:
KeyError if the property doesn't exist and the model clas doesn't
derive from Expando.
"""
if name == '__key__':
return modelclass._key
parts = name.split('.')
part, more = parts[0], parts[1:]
prop = modelclass._properties.get(part)
if prop is None:
if issubclass(modelclass, model.Expando):
prop = model.GenericProperty(part)
else:
raise TypeError('Model %s has no property named %r' %
(modelclass._get_kind(), part))
while more:
part = more.pop(0)
if not isinstance(prop, model.StructuredProperty):
raise TypeError('Model %s has no property named %r' %
(modelclass._get_kind(), part))
maybe = getattr(prop, part, None)
if isinstance(maybe, model.Property) and maybe._name == part:
prop = maybe
else:
maybe = prop._modelclass._properties.get(part)
if maybe is not None:
# Must get it this way to get the copy with the long name.
# (See StructuredProperty.__getattr__() for details.)
prop = getattr(prop, maybe._code_name)
else:
if issubclass(prop._modelclass, model.Expando) and not more:
prop = model.GenericProperty()
prop._name = name # Bypass the restriction on dots.
else:
raise KeyError('Model %s has no property named %r' %
(prop._modelclass._get_kind(), part))
return prop
class Query(object):
"""Query object.
Usually constructed by calling Model.query().
See module docstring for examples.
Note that not all operations on Queries are supported by _MultiQuery
instances; the latter are generated as necessary when any of the
operators !=, IN or OR is used.
"""
@utils.positional(1)
def __init__(self, kind=None, ancestor=None, filters=None, orders=None,
app=None, namespace=None, default_options=None):
"""Constructor.
Args:
kind: Optional kind string.
ancestor: Optional ancestor Key.
filters: Optional Node representing a filter expression tree.
orders: Optional datastore_query.Order object.
app: Optional app id.
namespace: Optional namespace.
default_options: Optional QueryOptions object.
"""
if ancestor is not None:
if isinstance(ancestor, ParameterizedThing):
if isinstance(ancestor, ParameterizedFunction):
if ancestor.func != 'key':
raise TypeError('ancestor cannot be a GQL function other than KEY')
else:
if not isinstance(ancestor, model.Key):
raise TypeError('ancestor must be a Key')
if not ancestor.id():
raise ValueError('ancestor cannot be an incomplete key')
if app is not None:
if app != ancestor.app():
raise TypeError('app/ancestor mismatch')
if namespace is not None:
if namespace != ancestor.namespace():
raise TypeError('namespace/ancestor mismatch')
if filters is not None:
if not isinstance(filters, Node):
raise TypeError('filters must be a query Node or None; received %r' %
filters)
if orders is not None:
if not isinstance(orders, datastore_query.Order):
raise TypeError('orders must be an Order instance or None; received %r'
% orders)
self.__kind = kind # String
self.__ancestor = ancestor # Key
self.__filters = filters # None or Node subclass
self.__orders = orders # None or datastore_query.Order instance
self.__app = app
self.__namespace = namespace
self.__default_options = default_options
def __repr__(self):
args = []
if self.kind is not None:
args.append('kind=%r' % self.kind)
if self.ancestor is not None:
args.append('ancestor=%r' % self.ancestor)
if self.filters is not None:
args.append('filters=%r' % self.filters)
if self.orders is not None:
# TODO: Format orders better.
args.append('orders=...') # PropertyOrder doesn't have a good repr().
if self.app is not None:
args.append('app=%r' % self.app)
if self.namespace is not None:
args.append('namespace=%r' % self.namespace)
if self.default_options is not None:
args.append('default_options=%r' % self.default_options)
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def _get_query(self, connection):
self.bind() # Raises an exception if there are unbound parameters.
kind = self.kind
ancestor = self.ancestor
if ancestor is not None:
ancestor = connection.adapter.key_to_pb(ancestor)
filters = self.filters
post_filters = None
if filters is not None:
post_filters = filters._post_filters()
filters = filters._to_filter()
dsquery = datastore_query.Query(app=self.app,
namespace=self.namespace,
kind=kind.decode('utf-8') if kind else None,
ancestor=ancestor,
filter_predicate=filters,
order=self.orders)
if post_filters is not None:
dsquery = datastore_query._AugmentedQuery(
dsquery,
in_memory_filter=post_filters._to_filter(post=True))
return dsquery
@tasklets.tasklet
def run_to_queue(self, queue, conn, options=None, dsquery=None):
"""Run this query, putting entities into the given queue."""
try:
multiquery = self._maybe_multi_query()
if multiquery is not None:
yield multiquery.run_to_queue(queue, conn, options=options)
return
if dsquery is None:
dsquery = self._get_query(conn)
rpc = dsquery.run_async(conn, options)
while rpc is not None:
batch = yield rpc
rpc = batch.next_batch_async(options)
for i, result in enumerate(batch.results):
queue.putq((batch, i, result))
queue.complete()
except GeneratorExit:
raise
except Exception:
if not queue.done():
_, e, tb = sys.exc_info()
queue.set_exception(e, tb)
raise
@tasklets.tasklet
def _run_to_list(self, results, options=None):
# Internal version of run_to_queue(), without a queue.
ctx = tasklets.get_context()
conn = ctx._conn
dsquery = self._get_query(conn)
rpc = dsquery.run_async(conn, options)
while rpc is not None:
batch = yield rpc
rpc = batch.next_batch_async(options)
for result in batch.results:
result = ctx._update_cache_from_query_result(result, options)
if result is not None:
results.append(result)
raise tasklets.Return(results)
def _needs_multi_query(self):
filters = self.filters
return filters is not None and isinstance(filters, DisjunctionNode)
def _maybe_multi_query(self):
if not self._needs_multi_query():
return None
# Switch to a _MultiQuery.
filters = self.filters
subqueries = []
for subfilter in filters:
subquery = self.__class__(kind=self.kind, ancestor=self.ancestor,
filters=subfilter, orders=self.orders,
app=self.app, namespace=self.namespace,
default_options=self.default_options)
subqueries.append(subquery)
return _MultiQuery(subqueries)
@property
def kind(self):
"""Accessor for the kind (a string or None)."""
return self.__kind
@property
def ancestor(self):
"""Accessor for the ancestor (a Key or None)."""
return self.__ancestor
@property
def filters(self):
"""Accessor for the filters (a Node or None)."""
return self.__filters
@property
def orders(self):
"""Accessor for the filters (a datastore_query.Order or None)."""
return self.__orders
@property
def app(self):
"""Accessor for the app (a string or None)."""
return self.__app
@property
def namespace(self):
"""Accessor for the namespace (a string or None)."""
return self.__namespace
@property
def default_options(self):
"""Accessor for the default_options (a QueryOptions instance or None)."""
return self.__default_options
def filter(self, *args):
"""Return a new Query with additional filter(s) applied."""
if not args:
return self
preds = []
f = self.filters
if f:
preds.append(f)
for arg in args:
if not isinstance(arg, Node):
raise TypeError('Cannot filter a non-Node argument; received %r' % arg)
preds.append(arg)
if not preds:
pred = None
elif len(preds) == 1:
pred = preds[0]
else:
pred = ConjunctionNode(*preds)
return self.__class__(kind=self.kind, ancestor=self.ancestor,
filters=pred, orders=self.orders,
app=self.app, namespace=self.namespace,
default_options=self.default_options)
def order(self, *args):
"""Return a new Query with additional sort order(s) applied."""
# q.order(Employee.name, -Employee.age)
if not args:
return self
orders = []
o = self.orders
if o:
orders.append(o)
for arg in args:
if isinstance(arg, model.Property):
orders.append(datastore_query.PropertyOrder(arg._name, _ASC))
elif isinstance(arg, datastore_query.Order):
orders.append(arg)
else:
raise TypeError('order() expects a Property or query Order; '
'received %r' % arg)
if not orders:
orders = None
elif len(orders) == 1:
orders = orders[0]
else:
orders = datastore_query.CompositeOrder(orders)
return self.__class__(kind=self.kind, ancestor=self.ancestor,
filters=self.filters, orders=orders,
app=self.app, namespace=self.namespace,
default_options=self.default_options)
# Datastore API using the default context.
def iter(self, **q_options):
"""Construct an iterator over the query.
Args:
**q_options: All query options keyword arguments are supported.
Returns:
A QueryIterator object.
"""
self.bind() # Raises an exception if there are unbound parameters.
return QueryIterator(self, **q_options)
__iter__ = iter
@utils.positional(2)
def map(self, callback, pass_batch_into_callback=None,
merge_future=None, **q_options):
"""Map a callback function or tasklet over the query results.
Args:
callback: A function or tasklet to be applied to each result; see below.
merge_future: Optional Future subclass; see below.
**q_options: All query options keyword arguments are supported.
Callback signature: The callback is normally called with an entity
as argument. However if keys_only=True is given, it is called
with a Key. Also, when pass_batch_into_callback is True, it is
called with three arguments: the current batch, the index within
the batch, and the entity or Key at that index. The callback can
return whatever it wants. If the callback is None, a trivial
callback is assumed that just returns the entity or key passed in
(ignoring produce_cursors).
Optional merge future: The merge_future is an advanced argument
that can be used to override how the callback results are combined
into the overall map() return value. By default a list of
callback return values is produced. By substituting one of a
small number of specialized alternatives you can arrange
otherwise. See tasklets.MultiFuture for the default
implementation and a description of the protocol the merge_future
object must implement the default. Alternatives from the same
module include QueueFuture, SerialQueueFuture and ReducingFuture.
Returns:
When the query has run to completion and all callbacks have
returned, map() returns a list of the results of all callbacks.
(But see 'optional merge future' above.)
"""
return self.map_async(callback,
pass_batch_into_callback=pass_batch_into_callback,
merge_future=merge_future,
**q_options).get_result()
@utils.positional(2)
def map_async(self, callback, pass_batch_into_callback=None,
merge_future=None, **q_options):
"""Map a callback function or tasklet over the query results.
This is the asynchronous version of Query.map().
"""
return tasklets.get_context().map_query(
self,
callback,
pass_batch_into_callback=pass_batch_into_callback,
options=self._make_options(q_options),
merge_future=merge_future)
@utils.positional(2)
def fetch(self, limit=None, **q_options):
"""Fetch a list of query results, up to a limit.
Args:
limit: How many results to retrieve at most.
**q_options: All query options keyword arguments are supported.
Returns:
A list of results.
"""
return self.fetch_async(limit, **q_options).get_result()
@utils.positional(2)
def fetch_async(self, limit=None, **q_options):
"""Fetch a list of query results, up to a limit.
This is the asynchronous version of Query.fetch().
"""
if limit is None:
default_options = self._make_options(q_options)
if default_options is not None and default_options.limit is not None:
limit = default_options.limit
else:
limit = _MAX_LIMIT
q_options['limit'] = limit
q_options.setdefault('batch_size', limit)
if self._needs_multi_query():
return self.map_async(None, **q_options)
# Optimization using direct batches.
options = self._make_options(q_options)
return self._run_to_list([], options=options)
def get(self, **q_options):
"""Get the first query result, if any.
This is similar to calling q.fetch(1) and returning the first item
of the list of results, if any, otherwise None.
Args:
**q_options: All query options keyword arguments are supported.
Returns:
A single result, or None if there are no results.
"""
return self.get_async(**q_options).get_result()
@tasklets.tasklet
def get_async(self, **q_options):
"""Get the first query result, if any.
This is the asynchronous version of Query.get().
"""
res = yield self.fetch_async(1, **q_options)
if not res:
raise tasklets.Return(None)
raise tasklets.Return(res[0])
@utils.positional(2)
def count(self, limit=None, **q_options):
"""Count the number of query results, up to a limit.
This returns the same result as len(q.fetch(limit)) but more
efficiently.
Note that you must pass a maximum value to limit the amount of
work done by the query.
Args:
limit: How many results to count at most.
**q_options: All query options keyword arguments are supported.
Returns:
"""
return self.count_async(limit, **q_options).get_result()
@tasklets.tasklet
@utils.positional(2)
def count_async(self, limit=None, **q_options):
"""Count the number of query results, up to a limit.
This is the asynchronous version of Query.count().
"""
# TODO: Support offset by incorporating it to the limit.
if 'offset' in q_options:
raise NotImplementedError('.count() and .count_async() do not support '
'offsets at present.')
if 'limit' in q_options:
raise TypeError('Cannot specify limit as a non-keyword argument and as a '
'keyword argument simultaneously.')
elif limit is None:
limit = _MAX_LIMIT
if self._needs_multi_query():
# _MultiQuery does not support iterating over result batches,
# so just fetch results and count them.
# TODO: Use QueryIterator to avoid materializing the results list.
q_options.setdefault('batch_size', limit)
q_options.setdefault('keys_only', True)
results = yield self.fetch_async(limit, **q_options)
raise tasklets.Return(len(results))
# Issue a special query requesting 0 results at a given offset.
# The skipped_results count will tell us how many hits there were
# before that offset without fetching the items.
q_options['offset'] = limit
q_options['limit'] = 0
options = self._make_options(q_options)
conn = tasklets.get_context()._conn
dsquery = self._get_query(conn)
rpc = dsquery.run_async(conn, options)
total = 0
while rpc is not None:
batch = yield rpc
rpc = batch.next_batch_async(options)
total += batch.skipped_results
raise tasklets.Return(total)
@utils.positional(2)
def fetch_page(self, page_size, **q_options):
"""Fetch a page of results.
This is a specialized method for use by paging user interfaces.
Args:
page_size: The requested page size. At most this many results
will be returned.
In addition, any keyword argument supported by the QueryOptions
class is supported. In particular, to fetch the next page, you
pass the cursor returned by one call to the next call using
start_cursor=<cursor>. A common idiom is to pass the cursor to
the client using <cursor>.to_websafe_string() and to reconstruct
that cursor on a subsequent request using
Cursor.from_websafe_string(<string>).
Returns:
A tuple (results, cursor, more) where results is a list of query
results, cursor is a cursor pointing just after the last result
returned, and more is a bool indicating whether there are
(likely) more results after that.
"""
# NOTE: page_size can't be passed as a keyword.
return self.fetch_page_async(page_size, **q_options).get_result()
@tasklets.tasklet
@utils.positional(2)
def fetch_page_async(self, page_size, **q_options):
"""Fetch a page of results.
This is the asynchronous version of Query.fetch_page().
"""
q_options.setdefault('batch_size', page_size)
q_options.setdefault('produce_cursors', True)
it = self.iter(limit=page_size + 1, **q_options)
results = []
while (yield it.has_next_async()):
results.append(it.next())
if len(results) >= page_size:
break
try:
cursor = it.cursor_after()
except datastore_errors.BadArgumentError:
cursor = None
raise tasklets.Return(results, cursor, it.probably_has_next())
def _make_options(self, q_options):
"""Helper to construct a QueryOptions object from keyword arguents.
Args:
q_options: a dict of keyword arguments.
Note that either 'options' or 'config' can be used to pass another
QueryOptions object, but not both. If another QueryOptions object is
given it provides default values.
If self.default_options is set, it is used to provide defaults,
which have a lower precedence than options set in q_options.
Returns:
A QueryOptions object, or None if q_options is empty.
"""
if not q_options:
return self.default_options
if 'options' in q_options:
# Move 'options' to 'config' since that is what QueryOptions() uses.
if 'config' in q_options:
raise TypeError('You cannot use config= and options= at the same time')
q_options['config'] = q_options.pop('options')
options = QueryOptions(**q_options)
if self.default_options is not None:
options = self.default_options.merge(options)
return options
def analyze(self):
"""Return a list giving the parameters required by a query."""
class MockBindings(dict):
def __contains__(self, key):
self[key] = None
return True
bindings = MockBindings()
used = {}
ancestor = self.ancestor
if isinstance(ancestor, ParameterizedThing):
ancestor = ancestor.resolve(bindings, used)
filters = self.filters
if filters is not None:
filters = filters.resolve(bindings, used)
return sorted(used) # Returns only the keys.
def bind(self, *args, **kwds):
"""Bind parameter values. Returns a new Query object."""
return self._bind(args, kwds)
def _bind(self, args, kwds):
"""Bind parameter values. Returns a new Query object."""
bindings = dict(kwds)
for i, arg in enumerate(args):
bindings[i + 1] = arg
used = {}
ancestor = self.ancestor
if isinstance(ancestor, ParameterizedThing):
ancestor = ancestor.resolve(bindings, used)
filters = self.filters
if filters is not None:
filters = filters.resolve(bindings, used)
unused = []
for i in xrange(1, 1 + len(args)):
if i not in used:
unused.append(i)
if unused:
raise datastore_errors.BadArgumentError(
'Positional arguments %s were given but not used.' %
', '.join(str(i) for i in unused))
return self.__class__(kind=self.kind, ancestor=ancestor,
filters=filters, orders=self.orders,
app=self.app, namespace=self.namespace,
default_options=self.default_options)
def gql(query_string, *args, **kwds):
"""Parse a GQL query string.
Args:
query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'.
*args, **kwds: If present, used to call bind().
Returns:
An instance of query_class.
"""
qry = _gql(query_string)
if args or kwds:
qry = qry._bind(args, kwds)
return qry
@utils.positional(1)
def _gql(query_string, query_class=Query):
"""Parse a GQL query string (internal version).
Args:
query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'.
query_class: Optional class to use, default Query.
Returns:
An instance of query_class.
"""
from .google_imports import gql # Late import, to avoid name conflict.
gql_qry = gql.GQL(query_string)
kind = gql_qry.kind()
if kind is None:
modelclass = model.Expando
else:
ctx = tasklets.get_context()
default_model = ctx._conn.adapter.default_model
modelclass = model.Model._kind_map.get(kind, default_model)
if modelclass is None:
raise datastore_errors.BadQueryError(
"No model class found for kind %r. Did you forget to import it?" %
(kind,))
ancestor = None
flt = gql_qry.filters()
filters = []
for name_op in sorted(flt):
name, op = name_op
values = flt[name_op]
op = op.lower()
if op == 'is' and name == gql.GQL._GQL__ANCESTOR:
if len(values) != 1:
raise ValueError('"is" requires exactly one value')
[(func, args)] = values
ancestor = _args_to_val(func, args)
continue
if op not in _OPS:
raise NotImplementedError('Operation %r is not supported.' % op)
for (func, args) in values:
val = _args_to_val(func, args)
prop = _get_prop_from_modelclass(modelclass, name)
if prop._name != name:
raise RuntimeError('Whoa! _get_prop_from_modelclass(%s, %r) '
'returned a property whose name is %r?!' %
(modelclass.__name__, name, prop._name))
if isinstance(val, ParameterizedThing):
node = ParameterNode(prop, op, val)
elif op == 'in':
node = prop._IN(val)
else:
node = prop._comparison(op, val)
filters.append(node)
if filters:
filters = ConjunctionNode(*filters)
else:
filters = None
orders = _orderings_to_orders(gql_qry.orderings(), modelclass)
offset = gql_qry.offset()
limit = gql_qry.limit()
if limit < 0:
limit = None
keys_only = gql_qry._keys_only
if not keys_only:
keys_only = None
options = QueryOptions(offset=offset, limit=limit, keys_only=keys_only)
qry = query_class(kind=kind,
ancestor=ancestor,
filters=filters,
orders=orders,
default_options=options)
return qry
class QueryIterator(object):
"""This iterator works both for synchronous and async callers!
For synchronous callers, just use:
for entity in Account.query():
<use entity>
Async callers use this idiom:
it = iter(Account.query())
while (yield it.has_next_async()):
entity = it.next()
<use entity>
You can also use q.iter([options]) instead of iter(q); this allows
passing query options such as keys_only or produce_cursors.
When keys_only is set, it.next() returns a key instead of an entity.
When produce_cursors is set, the methods it.cursor_before() and
it.cursor_after() return Cursor objects corresponding to the query
position just before and after the item returned by it.next().
Before it.next() is called for the first time, both raise an
exception. Once the loop is exhausted, both return the cursor after
the last item returned. Calling it.has_next() does not affect the
cursors; you must call it.next() before the cursors move. Note that
sometimes requesting a cursor requires a datastore roundtrip (but
not if you happen to request a cursor corresponding to a batch
boundary). If produce_cursors is not set, both methods always raise
an exception.
Note that queries requiring in-memory merging of multiple queries
(i.e. queries using the IN, != or OR operators) do not support query
options.
"""
# When produce_cursors is set, _lookahead collects (batch, index)
# pairs passed to _extended_callback(), and (_batch, _index)
# contain the info pertaining to the current item.
_lookahead = None
_batch = None
_index = None
# Indicate the loop is exhausted.
_exhausted = False
@utils.positional(2)
def __init__(self, query, **q_options):
"""Constructor. Takes a Query and query options.
This is normally called by Query.iter() or Query.__iter__().
"""
ctx = tasklets.get_context()
callback = None
options = query._make_options(q_options)
callback = self._extended_callback
self._iter = ctx.iter_query(query,
callback=callback,
pass_batch_into_callback=True,
options=options)
self._fut = None
def _extended_callback(self, batch, index, ent):
if self._exhausted:
raise RuntimeError('QueryIterator is already exhausted')
# TODO: Make _lookup a deque.
if self._lookahead is None:
self._lookahead = []
self._lookahead.append((batch, index))
return ent
def _consume_item(self):
if self._lookahead:
self._batch, self._index = self._lookahead.pop(0)
else:
self._batch = self._index = None
def cursor_before(self):
"""Return the cursor before the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
"""
if self._batch is None:
raise datastore_errors.BadArgumentError('There is no cursor currently')
# TODO: if cursor_after() was called for the previous item
# reuse that result instead of computing it from scratch.
# (Some cursor() calls make a datastore roundtrip.)
# TODO: reimplement the cursor() call to use NDB async I/O;
# perhaps even add async versions of cursor_before/after.
return self._batch.cursor(self._index + self._exhausted)
def cursor_after(self):
"""Return the cursor after the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
"""
if self._batch is None:
raise datastore_errors.BadArgumentError('There is no cursor currently')
return self._batch.cursor(self._index + 1) # TODO: inline this as async.
def index_list(self):
"""Return the list of indexes used for this query.
This returns a list of index representations, where an index
representation is the same as what is returned by get_indexes().
Before the first result, the information is unavailable, and then
None is returned. This is not the same as an empty list -- the
empty list means that no index was used to execute the query. (In
the dev_appserver, an empty list may also mean that only built-in
indexes were used; metadata queries also return an empty list
here.)
Proper use is as follows:
q = <modelclass>.query(<filters>)
i = q.iter()
try:
i.next()
except Stopiteration:
pass
indexes = i.index_list()
assert isinstance(indexes, list)
Notes:
- Forcing produce_cursors=False makes this always return None.
- This always returns None for a multi-query.
"""
# TODO: Technically it is possible to implement this for
# multi-query by merging all the index lists from each subquery.
# Return None if the batch has no attribute index_list.
# This also applies when the batch itself is None.
return getattr(self._batch, 'index_list', None)
def __iter__(self):
"""Iterator protocol: get the iterator for this iterator, i.e. self."""
return self
def probably_has_next(self):
"""Return whether a next item is (probably) available.
This is not quite the same as has_next(), because when
produce_cursors is set, some shortcuts are possible. However, in
some cases (e.g. when the query has a post_filter) we can get a
false positive (returns True but next() will raise StopIteration).
There are no false negatives, if Batch.more_results doesn't lie.
"""
if self._lookahead:
return True
if self._batch is not None:
return self._batch.more_results
return self.has_next()
def has_next(self):
"""Return whether a next item is available.
See the module docstring for the usage pattern.
"""
return self.has_next_async().get_result()
@tasklets.tasklet
def has_next_async(self):
"""Return a Future whose result will say whether a next item is available.
See the module docstring for the usage pattern.
"""
if self._fut is None:
self._fut = self._iter.getq()
flag = True
try:
yield self._fut
except EOFError:
flag = False
raise tasklets.Return(flag)
def next(self):
"""Iterator protocol: get next item or raise StopIteration."""
if self._fut is None:
self._fut = self._iter.getq()
try:
try:
ent = self._fut.get_result()
self._consume_item()
return ent
except EOFError:
self._exhausted = True
raise StopIteration
finally:
self._fut = None
class _SubQueryIteratorState(object):
"""Helper class for _MultiQuery."""
def __init__(self, batch_i_entity, iterator, dsquery, orders):
batch, index, entity = batch_i_entity
self.batch = batch
self.index = index
self.entity = entity
self.iterator = iterator
self.dsquery = dsquery
self.orders = orders
def __cmp__(self, other):
if not isinstance(other, _SubQueryIteratorState):
raise NotImplementedError('Can only compare _SubQueryIteratorState '
'instances to other _SubQueryIteratorState '
'instances; not %r' % other)
if not self.orders == other.orders:
raise NotImplementedError('Cannot compare _SubQueryIteratorStates with '
'differing orders (%r != %r)' %
(self.orders, other.orders))
lhs = self.entity._orig_pb
rhs = other.entity._orig_pb
lhs_filter = self.dsquery._filter_predicate
rhs_filter = other.dsquery._filter_predicate
names = self.orders._get_prop_names()
# TODO: In some future version, there won't be a need to add the
# filters' names.
if lhs_filter is not None:
names |= lhs_filter._get_prop_names()
if rhs_filter is not None:
names |= rhs_filter._get_prop_names()
lhs_value_map = datastore_query._make_key_value_map(lhs, names)
rhs_value_map = datastore_query._make_key_value_map(rhs, names)
if lhs_filter is not None:
lhs_filter._prune(lhs_value_map)
if rhs_filter is not None:
rhs_filter._prune(rhs_value_map)
return self.orders._cmp(lhs_value_map, rhs_value_map)
class _MultiQuery(object):
"""Helper class to run queries involving !=, IN or OR operators."""
# This is not instantiated by the user directly, but implicitly when
# iterating over a query with at least one filter using an IN, OR or
# != operator. Note that some options must be interpreted by
# _MultiQuery instead of passed to the underlying Queries' methods,
# e.g. offset (though not necessarily limit, and I'm not sure about
# cursors).
# TODO: Need a way to specify the unification of two queries that
# are identical except one has an ancestor and the other doesn't.
# The HR datastore makes that a useful special case.
def __init__(self, subqueries):
if not isinstance(subqueries, list):
raise TypeError('subqueries must be a list; received %r' % subqueries)
for subq in subqueries:
if not isinstance(subq, Query):
raise TypeError('Each subquery must be a Query instances; received %r'
% subq)
first_subquery = subqueries[0]
kind = first_subquery.kind
orders = first_subquery.orders
if not kind:
raise ValueError('Subquery kind cannot be missing')
for subq in subqueries[1:]:
if subq.kind != kind:
raise ValueError('Subqueries must be for a common kind (%s != %s)' %
(subq.kind, kind))
elif subq.orders != orders:
raise ValueError('Subqueries must have the same order(s) (%s != %s)' %
(subq.orders, orders))
# TODO: Ensure that app and namespace match, when we support them.
self.__subqueries = subqueries
self.__orders = orders
self.ancestor = None # Hack for map_query().
def _make_options(self, q_options):
return self.__subqueries[0].default_options
@property
def orders(self):
return self.__orders
@property
def default_options(self):
return self.__subqueries[0].default_options
@tasklets.tasklet
def run_to_queue(self, queue, conn, options=None):
"""Run this query, putting entities into the given queue."""
if options is None:
# Default options.
offset = None
limit = None
keys_only = None
else:
# Capture options we need to simulate.
offset = options.offset
limit = options.limit
keys_only = options.keys_only
# Cursors are supported for certain orders only.
if (options.start_cursor or options.end_cursor or
options.produce_cursors):
names = set()
if self.__orders is not None:
names = self.__orders._get_prop_names()
if '__key__' not in names:
raise datastore_errors.BadArgumentError(
'_MultiQuery with cursors requires __key__ order')
# Decide if we need to modify the options passed to subqueries.
# NOTE: It would seem we can sometimes let the datastore handle
# the offset natively, but this would thwart the duplicate key
# detection, so we always have to emulate the offset here.
# We can set the limit we pass along to offset + limit though,
# since that is the maximum number of results from a single
# subquery we will ever have to consider.
modifiers = {}
if offset:
modifiers['offset'] = None
if limit is not None:
modifiers['limit'] = min(_MAX_LIMIT, offset + limit)
if keys_only and self.__orders is not None:
modifiers['keys_only'] = None
if modifiers:
options = QueryOptions(config=options, **modifiers)
if offset is None:
offset = 0
if limit is None:
limit = _MAX_LIMIT
if self.__orders is None:
# Run the subqueries sequentially; there is no order to keep.
keys_seen = set()
for subq in self.__subqueries:
if limit <= 0:
break
subit = tasklets.SerialQueueFuture('_MultiQuery.run_to_queue[ser]')
subq.run_to_queue(subit, conn, options=options)
while limit > 0:
try:
batch, index, result = yield subit.getq()
except EOFError:
break
if keys_only:
key = result
else:
key = result._key
if key not in keys_seen:
keys_seen.add(key)
if offset > 0:
offset -= 1
else:
limit -= 1
queue.putq((None, None, result))
queue.complete()
return
# This with-statement causes the adapter to set _orig_pb on all
# entities it converts from protobuf.
# TODO: Does this interact properly with the cache?
with conn.adapter:
# Start running all the sub-queries.
todo = [] # List of (subit, dsquery) tuples.
for subq in self.__subqueries:
dsquery = subq._get_query(conn)
subit = tasklets.SerialQueueFuture('_MultiQuery.run_to_queue[par]')
subq.run_to_queue(subit, conn, options=options, dsquery=dsquery)
todo.append((subit, dsquery))
# Create a list of (first-entity, subquery-iterator) tuples.
state = [] # List of _SubQueryIteratorState instances.
for subit, dsquery in todo:
try:
thing = yield subit.getq()
except EOFError:
continue
else:
state.append(_SubQueryIteratorState(thing, subit, dsquery,
self.__orders))
# Now turn it into a sorted heap. The heapq module claims that
# calling heapify() is more efficient than calling heappush() for
# each item.
heapq.heapify(state)
# Repeatedly yield the lowest entity from the state vector,
# filtering duplicates. This is essentially a multi-way merge
# sort. One would think it should be possible to filter
# duplicates simply by dropping other entities already in the
# state vector that are equal to the lowest entity, but because of
# the weird sorting of repeated properties, we have to explicitly
# keep a set of all keys, so we can remove later occurrences.
# Note that entities will still be sorted correctly, within the
# constraints given by the sort order.
keys_seen = set()
while state and limit > 0:
item = heapq.heappop(state)
batch = item.batch
index = item.index
entity = item.entity
key = entity._key
if key not in keys_seen:
keys_seen.add(key)
if offset > 0:
offset -= 1
else:
limit -= 1
if keys_only:
queue.putq((batch, index, key))
else:
queue.putq((batch, index, entity))
subit = item.iterator
try:
batch, index, entity = yield subit.getq()
except EOFError:
pass
else:
item.batch = batch
item.index = index
item.entity = entity
heapq.heappush(state, item)
queue.complete()
# Datastore API using the default context.
def iter(self, **q_options):
return QueryIterator(self, **q_options)
__iter__ = iter
# TODO: Add fetch() etc.?
# Helper functions to convert between orders and orderings. An order
# is a datastore_query.Order instance. An ordering is a
# (property_name, direction) tuple.
def _order_to_ordering(order):
pb = order._to_pb()
return pb.property(), pb.direction() # TODO: What about UTF-8?
def _orders_to_orderings(orders):
if orders is None:
return []
if isinstance(orders, datastore_query.PropertyOrder):
return [_order_to_ordering(orders)]
if isinstance(orders, datastore_query.CompositeOrder):
# TODO: What about UTF-8?
return [(pb.property(), pb.direction()) for pb in orders._to_pbs()]
raise ValueError('Bad order: %r' % (orders,))
def _ordering_to_order(ordering, modelclass):
name, direction = ordering
prop = _get_prop_from_modelclass(modelclass, name)
if prop._name != name:
raise RuntimeError('Whoa! _get_prop_from_modelclass(%s, %r) '
'returned a property whose name is %r?!' %
(modelclass.__name__, name, prop._name))
return datastore_query.PropertyOrder(name, direction)
def _orderings_to_orders(orderings, modelclass):
orders = [_ordering_to_order(o, modelclass) for o in orderings]
if not orders:
return None
if len(orders) == 1:
return orders[0]
return datastore_query.CompositeOrder(orders)
| Python |
"""Tests for tasklets.py."""
import os
import random
import re
import sys
import time
import unittest
from . import context
from . import eventloop
from . import model
from . import test_utils
from . import tasklets
from . import utils
class TaskletTests(test_utils.NDBTest):
def setUp(self):
super(TaskletTests, self).setUp()
if eventloop._EVENT_LOOP_KEY in os.environ:
del os.environ[eventloop._EVENT_LOOP_KEY]
if tasklets._CONTEXT_KEY in os.environ:
del os.environ[tasklets._CONTEXT_KEY]
self.ev = eventloop.get_event_loop()
self.log = []
the_module = tasklets
def universal_callback(self, *args):
self.log.append(args)
def testAddFlowException(self):
try:
self.assertRaises(TypeError, tasklets.add_flow_exception, 'abc')
self.assertRaises(TypeError, tasklets.add_flow_exception, str)
tasklets.add_flow_exception(ZeroDivisionError)
self.assertTrue(ZeroDivisionError in tasklets._flow_exceptions)
@tasklets.tasklet
def foo():
1/0
yield
self.assertRaises(ZeroDivisionError, foo().get_result)
finally:
tasklets._init_flow_exceptions()
def testFuture_Constructor(self):
f = tasklets.Future()
self.assertEqual(f._result, None)
self.assertEqual(f._exception, None)
self.assertEqual(f._callbacks, [])
def testFuture_Repr(self):
f = tasklets.Future()
prefix = (r'<Future [\da-f]+ created by'
r'( testFuture_Repr\(tasklets_test.py:\d+\)|\?); ')
self.assertTrue(re.match(prefix + r'pending>$', repr(f)), repr(f))
f.set_result('abc')
self.assertTrue(re.match(prefix + r'result \'abc\'>$', repr(f)), repr(f))
f = tasklets.Future()
f.set_exception(RuntimeError('abc'))
self.assertTrue(re.match(prefix + r'exception RuntimeError: abc>$',
repr(f)),
repr(f))
def testFuture_Repr_TaskletWrapper(self):
prefix = r'<Future [\da-f]+ created by '
@tasklets.tasklet
@utils.positional(1)
def foo():
f1 = tasklets.Future()
self.assertTrue(re.match(prefix +
r'foo\(tasklets_test.py:\d+\); pending>$',
repr(f1)),
repr(f1))
f1.set_result(None)
yield f1
f2 = foo()
self.assertTrue(
re.match(prefix +
r'testFuture_Repr_TaskletWrapper\(tasklets_test.py:\d+\) '
r'for tasklet foo\(tasklets_test.py:\d+\).*; pending>$',
repr(f2)),
repr(f2))
f2.check_success()
def testFuture_Done_State(self):
f = tasklets.Future()
self.assertFalse(f.done())
self.assertEqual(f.state, f.RUNNING)
f.set_result(42)
self.assertTrue(f.done())
self.assertEqual(f.state, f.FINISHING)
def testFuture_SetResult(self):
f = tasklets.Future()
f.set_result(42)
self.assertEqual(f._result, 42)
self.assertEqual(f._exception, None)
self.assertEqual(f.get_result(), 42)
def testFuture_SetException(self):
f = tasklets.Future()
err = RuntimeError(42)
f.set_exception(err)
self.assertEqual(f.done(), True)
self.assertEqual(f._exception, err)
self.assertEqual(f._result, None)
self.assertEqual(f.get_exception(), err)
self.assertRaises(RuntimeError, f.get_result)
def testFuture_AddDoneCallback_SetResult(self):
f = tasklets.Future()
f.add_callback(self.universal_callback, f)
self.assertEqual(self.log, []) # Nothing happened yet.
f.set_result(42)
eventloop.run()
self.assertEqual(self.log, [(f,)])
def testFuture_SetResult_AddDoneCallback(self):
f = tasklets.Future()
f.set_result(42)
self.assertEqual(f.get_result(), 42)
f.add_callback(self.universal_callback, f)
eventloop.run()
self.assertEqual(self.log, [(f,)])
def testFuture_AddDoneCallback_SetException(self):
f = tasklets.Future()
f.add_callback(self.universal_callback, f)
f.set_exception(RuntimeError(42))
eventloop.run()
self.assertEqual(self.log, [(f,)])
self.assertEqual(f.done(), True)
def create_futures(self):
self.futs = []
for i in range(5):
f = tasklets.Future()
f.add_callback(self.universal_callback, f)
def wake(fut, result):
fut.set_result(result)
self.ev.queue_call(i * 0.01, wake, f, i)
self.futs.append(f)
return set(self.futs)
def testFuture_WaitAny(self):
self.assertEqual(tasklets.Future.wait_any([]), None)
todo = self.create_futures()
while todo:
f = tasklets.Future.wait_any(todo)
todo.remove(f)
eventloop.run()
self.assertEqual(self.log, [(f,) for f in self.futs])
def testFuture_WaitAll(self):
todo = self.create_futures()
tasklets.Future.wait_all(todo)
self.assertEqual(self.log, [(f,) for f in self.futs])
def testSleep(self):
# Ensure that tasklets sleep for the specified amount of time.
# NOTE: May sleep too long if processor usage is high.
log = []
@tasklets.tasklet
def foo():
log.append(time.time())
yield tasklets.sleep(0.1)
log.append(time.time())
foo()
eventloop.run()
t0, t1 = log
dt = t1 - t0
self.assertTrue(0.08 <= dt <= 0.12,
'slept too long or too short: dt=%.03f' % dt)
def testMultiFuture(self):
@tasklets.tasklet
def foo(dt):
yield tasklets.sleep(dt)
raise tasklets.Return('foo-%s' % dt)
@tasklets.tasklet
def bar(n):
for _ in range(n):
yield tasklets.sleep(0.01)
raise tasklets.Return('bar-%d' % n)
bar5 = bar(5)
futs = [foo(0.05), foo(0.01), foo(0.03), bar(3), bar5, bar5]
mfut = tasklets.MultiFuture()
for fut in futs:
mfut.add_dependent(fut)
mfut.complete()
results = mfut.get_result()
self.assertEqual(set(results),
set(['foo-0.01', 'foo-0.03', 'foo-0.05',
'bar-3', 'bar-5']))
def testMultiFuture_PreCompleted(self):
@tasklets.tasklet
def foo():
yield tasklets.sleep(0.01)
raise tasklets.Return(42)
mfut = tasklets.MultiFuture()
dep = foo()
dep.wait()
mfut.add_dependent(dep)
mfut.complete()
eventloop.run()
self.assertTrue(mfut.done())
self.assertEqual(mfut.get_result(), [42])
def testMultiFuture_SetException(self):
mf = tasklets.MultiFuture()
f1 = tasklets.Future()
f2 = tasklets.Future()
f3 = tasklets.Future()
f2.set_result(2)
mf.putq(f1)
f1.set_result(1)
mf.putq(f2)
mf.putq(f3)
mf.putq(4)
self.ev.run()
mf.set_exception(ZeroDivisionError())
f3.set_result(3)
self.ev.run()
self.assertRaises(ZeroDivisionError, mf.get_result)
def testMultiFuture_ItemException(self):
mf = tasklets.MultiFuture()
f1 = tasklets.Future()
f2 = tasklets.Future()
f3 = tasklets.Future()
f2.set_result(2)
mf.putq(f1)
f1.set_exception(ZeroDivisionError())
mf.putq(f2)
mf.putq(f3)
f3.set_result(3)
self.ev.run()
mf.complete()
self.assertRaises(ZeroDivisionError, mf.get_result)
def testMultiFuture_Repr(self):
mf = tasklets.MultiFuture('info')
r1 = repr(mf)
mf.putq(1)
r2 = repr(mf)
f2 = tasklets.Future()
f2.set_result(2)
mf.putq(2)
r3 = repr(mf)
self.ev.run()
r4 = repr(mf)
f3 = tasklets.Future()
mf.putq(f3)
r5 = repr(mf)
mf.complete()
r6 = repr(mf)
f3.set_result(3)
self.ev.run()
r7 = repr(mf)
for r in r1, r2, r3, r4, r5, r6, r7:
self.assertTrue(
re.match(
r'<MultiFuture [\da-f]+ created by '
r'(testMultiFuture_Repr\(tasklets_test.py:\d+\)|\?) for info; ',
r))
if r is r7:
self.assertTrue('result' in r)
else:
self.assertTrue('pending' in r)
def testQueueFuture(self):
q = tasklets.QueueFuture()
@tasklets.tasklet
def produce_one(i):
yield tasklets.sleep(i * 0.01)
raise tasklets.Return(i)
@tasklets.tasklet
def producer():
q.putq(0)
for i in range(1, 10):
q.add_dependent(produce_one(i))
q.complete()
@tasklets.tasklet
def consumer():
for i in range(10):
val = yield q.getq()
self.assertEqual(val, i)
yield q
self.assertRaises(EOFError, q.getq().get_result)
@tasklets.tasklet
def foo():
yield producer(), consumer()
foo().get_result()
def testQueueFuture_Complete(self):
qf = tasklets.QueueFuture()
qf.putq(1)
f2 = tasklets.Future()
qf.putq(f2)
self.ev.run()
g1 = qf.getq()
g2 = qf.getq()
g3 = qf.getq()
f2.set_result(2)
self.ev.run()
qf.complete()
self.ev.run()
self.assertEqual(g1.get_result(), 1)
self.assertEqual(g2.get_result(), 2)
self.assertRaises(EOFError, g3.get_result)
self.assertRaises(EOFError, qf.getq().get_result)
def testQueueFuture_SetException(self):
qf = tasklets.QueueFuture()
f1 = tasklets.Future()
f1.set_result(1)
qf.putq(f1)
qf.putq(f1)
self.ev.run()
qf.putq(2)
self.ev.run()
f3 = tasklets.Future()
f3.set_exception(ZeroDivisionError())
qf.putq(f3)
self.ev.run()
f4 = tasklets.Future()
qf.putq(f4)
self.ev.run()
qf.set_exception(KeyError())
f4.set_result(4)
self.ev.run()
self.assertRaises(KeyError, qf.get_result)
# Futures are returned in the order of completion, which should be
# f1, f2, f3, f4. These produce 1, 2, ZeroDivisionError, 4,
# respectively. After that KeyError (the exception set on qf
# itself) is raised.
self.assertEqual(qf.getq().get_result(), 1)
self.assertEqual(qf.getq().get_result(), 2)
self.assertRaises(ZeroDivisionError, qf.getq().get_result)
self.assertEqual(qf.getq().get_result(), 4)
self.assertRaises(KeyError, qf.getq().get_result)
self.assertRaises(KeyError, qf.getq().get_result)
def testQueueFuture_SetExceptionAlternative(self):
qf = tasklets.QueueFuture()
g1 = qf.getq()
qf.set_exception(KeyError())
self.ev.run()
self.assertRaises(KeyError, g1.get_result)
def testQueueFuture_ItemException(self):
qf = tasklets.QueueFuture()
qf.putq(1)
f2 = tasklets.Future()
qf.putq(f2)
f3 = tasklets.Future()
f3.set_result(3)
self.ev.run()
qf.putq(f3)
self.ev.run()
f4 = tasklets.Future()
f4.set_exception(ZeroDivisionError())
self.ev.run()
qf.putq(f4)
f5 = tasklets.Future()
qf.putq(f5)
self.ev.run()
qf.complete()
self.ev.run()
f2.set_result(2)
self.ev.run()
f5.set_exception(KeyError())
self.ev.run()
# Futures are returned in the order of completion, which should be
# f1, f3, f4, f2, f5. These produce 1, 3, ZeroDivisionError, 2,
# KeyError, respectively. After that EOFError is raised.
self.assertEqual(qf.getq().get_result(), 1)
self.assertEqual(qf.getq().get_result(), 3)
self.assertRaises(ZeroDivisionError, qf.getq().get_result)
self.assertEqual(qf.getq().get_result(), 2)
self.assertRaises(KeyError, qf.getq().get_result)
self.assertRaises(EOFError, qf.getq().get_result)
self.assertRaises(EOFError, qf.getq().get_result)
def testSerialQueueFuture(self):
q = tasklets.SerialQueueFuture()
@tasklets.tasklet
def produce_one(i):
yield tasklets.sleep(random.randrange(10) * 0.01)
raise tasklets.Return(i)
@tasklets.tasklet
def producer():
for i in range(10):
q.add_dependent(produce_one(i))
q.complete()
@tasklets.tasklet
def consumer():
for i in range(10):
val = yield q.getq()
self.assertEqual(val, i)
yield q
self.assertRaises(EOFError, q.getq().get_result)
yield q
@tasklets.synctasklet
def foo():
yield producer(), consumer()
foo()
def testSerialQueueFuture_Complete(self):
sqf = tasklets.SerialQueueFuture()
g1 = sqf.getq()
sqf.complete()
self.assertRaises(EOFError, g1.get_result)
def testSerialQueueFuture_SetException(self):
sqf = tasklets.SerialQueueFuture()
g1 = sqf.getq()
sqf.set_exception(KeyError())
self.assertRaises(KeyError, g1.get_result)
def testSerialQueueFuture_ItemException(self):
sqf = tasklets.SerialQueueFuture()
g1 = sqf.getq()
f1 = tasklets.Future()
sqf.putq(f1)
sqf.complete()
f1.set_exception(ZeroDivisionError())
self.assertRaises(ZeroDivisionError, g1.get_result)
def testSerialQueueFuture_PutQ_1(self):
sqf = tasklets.SerialQueueFuture()
f1 = tasklets.Future()
sqf.putq(f1)
sqf.complete()
f1.set_result(1)
self.assertEqual(sqf.getq().get_result(), 1)
def testSerialQueueFuture_PutQ_2(self):
sqf = tasklets.SerialQueueFuture()
sqf.putq(1)
sqf.complete()
self.assertEqual(sqf.getq().get_result(), 1)
def testSerialQueueFuture_PutQ_3(self):
sqf = tasklets.SerialQueueFuture()
g1 = sqf.getq()
sqf.putq(1)
sqf.complete()
self.assertEqual(g1.get_result(), 1)
def testSerialQueueFuture_PutQ_4(self):
sqf = tasklets.SerialQueueFuture()
g1 = sqf.getq()
f1 = tasklets.Future()
sqf.putq(f1)
sqf.complete()
f1.set_result(1)
self.assertEqual(g1.get_result(), 1)
def testSerialQueueFuture_GetQ(self):
sqf = tasklets.SerialQueueFuture()
sqf.set_exception(KeyError())
self.assertRaises(KeyError, sqf.getq().get_result)
def testReducingFuture(self):
def reducer(arg):
return sum(arg)
rf = tasklets.ReducingFuture(reducer, batch_size=10)
for i in range(10):
rf.putq(i)
for i in range(10, 20):
f = tasklets.Future()
rf.putq(f)
f.set_result(i)
rf.complete()
self.assertEqual(rf.get_result(), sum(range(20)))
def testReducingFuture_Empty(self):
def reducer(_):
self.fail()
rf = tasklets.ReducingFuture(reducer)
rf.complete()
self.assertEqual(rf.get_result(), None)
def testReducingFuture_OneItem(self):
def reducer(_):
self.fail()
rf = tasklets.ReducingFuture(reducer)
rf.putq(1)
rf.complete()
self.assertEqual(rf.get_result(), 1)
def testReducingFuture_ItemException(self):
def reducer(arg):
return sum(arg)
rf = tasklets.ReducingFuture(reducer)
f1 = tasklets.Future()
f1.set_exception(ZeroDivisionError())
rf.putq(f1)
rf.complete()
self.assertRaises(ZeroDivisionError, rf.get_result)
def testReducingFuture_ReducerException_1(self):
def reducer(arg):
raise ZeroDivisionError
rf = tasklets.ReducingFuture(reducer)
rf.putq(1)
rf.putq(1)
rf.complete()
self.assertRaises(ZeroDivisionError, rf.get_result)
def testReducingFuture_ReducerException_2(self):
def reducer(arg):
raise ZeroDivisionError
rf = tasklets.ReducingFuture(reducer, batch_size=2)
rf.putq(1)
rf.putq(1)
rf.putq(1)
rf.complete()
self.assertRaises(ZeroDivisionError, rf.get_result)
def testReducingFuture_ReducerFuture_1(self):
def reducer(arg):
f = tasklets.Future()
f.set_result(sum(arg))
return f
rf = tasklets.ReducingFuture(reducer, batch_size=2)
rf.putq(1)
rf.putq(1)
rf.complete()
self.assertEqual(rf.get_result(), 2)
def testReducingFuture_ReducerFuture_2(self):
# Weird hack to reach _internal_add_dependent() call in _mark_finished().
def reducer(arg):
res = sum(arg)
if len(arg) < 3:
f = tasklets.Future()
f.set_result(res)
res = f
return res
rf = tasklets.ReducingFuture(reducer, batch_size=3)
rf.putq(1)
rf.putq(1)
rf.putq(1)
rf.putq(1)
rf.complete()
self.assertEqual(rf.get_result(), 4)
def testGetReturnValue(self):
r0 = tasklets.Return()
r1 = tasklets.Return(42)
r2 = tasklets.Return(42, 'hello')
r3 = tasklets.Return((1, 2, 3))
self.assertEqual(tasklets.get_return_value(r0), None)
self.assertEqual(tasklets.get_return_value(r1), 42)
self.assertEqual(tasklets.get_return_value(r2), (42, 'hello'))
self.assertEqual(tasklets.get_return_value(r3), (1, 2, 3))
def testTasklets_Basic(self):
@tasklets.tasklet
def t1():
a = yield t2(3)
b = yield t3(2)
raise tasklets.Return(a + b)
@tasklets.tasklet
def t2(n):
raise tasklets.Return(n)
@tasklets.tasklet
def t3(n):
return n
x = t1()
self.assertTrue(isinstance(x, tasklets.Future))
y = x.get_result()
self.assertEqual(y, 5)
def testTasklets_Raising(self):
self.ExpectWarnings()
@tasklets.tasklet
def t1():
f = t2(True)
try:
yield f
except RuntimeError, err:
self.assertEqual(f.get_exception(), err)
raise tasklets.Return(str(err))
@tasklets.tasklet
def t2(error):
if error:
raise RuntimeError('hello')
else:
yield tasklets.Future()
x = t1()
y = x.get_result()
self.assertEqual(y, 'hello')
def testTasklets_YieldRpcs(self):
@tasklets.tasklet
def main_tasklet():
rpc1 = self.conn.async_get(None, [])
rpc2 = self.conn.async_put(None, [])
res1 = yield rpc1
res2 = yield rpc2
raise tasklets.Return(res1, res2)
f = main_tasklet()
result = f.get_result()
self.assertEqual(result, ([], []))
def testTasklet_YieldTuple(self):
@tasklets.tasklet
def fib(n):
if n <= 1:
raise tasklets.Return(n)
a, b = yield fib(n - 1), fib(n - 2)
# print 'fib(%r) = %r + %r = %r' % (n, a, b, a + b)
self.assertTrue(a >= b, (a, b))
raise tasklets.Return(a + b)
fut = fib(10)
val = fut.get_result()
self.assertEqual(val, 55)
def testTasklet_YieldTupleError(self):
@tasklets.tasklet
def good():
yield tasklets.sleep(0)
@tasklets.tasklet
def bad():
raise ZeroDivisionError
@tasklets.tasklet
def foo():
try:
yield good(), bad(), good()
self.assertFalse('Should have raised ZeroDivisionError')
except ZeroDivisionError:
pass
foo().check_success()
def testTasklet_YieldTupleTypeError(self):
self.ExpectWarnings()
@tasklets.tasklet
def good():
yield tasklets.sleep(0)
@tasklets.tasklet
def bad():
raise ZeroDivisionError
yield tasklets.sleep(0)
@tasklets.tasklet
def foo():
try:
yield good(), bad(), 42
except TypeError:
pass
else:
self.assertFalse('Should have raised TypeError')
foo().check_success()
def testMultiSingleCombinationYield(self):
@tasklets.tasklet
def foo():
class Test(model.Model):
k = model.KeyProperty()
ks = model.KeyProperty(repeated=True)
t = Test()
t.put()
t1 = Test(k=t.key, ks=[t.key, t.key])
t1.put()
t1 = t1.key.get()
obj, objs = yield t1.k.get_async(), model.get_multi_async(t1.ks)
self.assertEqual(obj.key, t1.k)
self.assertEqual([obj.key for obj in objs], t1.ks)
foo().get_result()
def testAddContextDecorator(self):
class Demo(object):
@tasklets.toplevel
def method(self, arg):
return tasklets.get_context(), arg
@tasklets.toplevel
def method2(self, **kwds):
return tasklets.get_context(), kwds
a = Demo()
old_ctx = tasklets.get_context()
ctx, arg = a.method(42)
self.assertTrue(isinstance(ctx, context.Context))
self.assertEqual(arg, 42)
self.assertTrue(ctx is not old_ctx)
old_ctx = tasklets.get_context()
ctx, kwds = a.method2(foo='bar', baz='ding')
self.assertTrue(isinstance(ctx, context.Context))
self.assertEqual(kwds, dict(foo='bar', baz='ding'))
self.assertTrue(ctx is not old_ctx)
class TracebackTests(test_utils.NDBTest):
"""Checks that errors result in reasonable tracebacks."""
def testBasicError(self):
self.ExpectWarnings()
frames = [sys._getframe()]
@tasklets.tasklet
def level3():
frames.append(sys._getframe())
raise RuntimeError('hello')
yield
@tasklets.tasklet
def level2():
frames.append(sys._getframe())
yield level3()
@tasklets.tasklet
def level1():
frames.append(sys._getframe())
yield level2()
@tasklets.tasklet
def level0():
frames.append(sys._getframe())
yield level1()
fut = level0()
try:
fut.check_success()
except RuntimeError, err:
_, _, tb = sys.exc_info()
self.assertEqual(str(err), 'hello')
tbframes = []
while tb is not None:
# It's okay if some _help_tasklet_along frames are present.
if tb.tb_frame.f_code.co_name != '_help_tasklet_along':
tbframes.append(tb.tb_frame)
tb = tb.tb_next
self.assertEqual(frames, tbframes)
else:
self.fail('Expected RuntimeError not raised')
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Tests for prospective_search.py."""
import base64
import os
import unittest
from .google_imports import apiproxy_stub_map
from .google_test_imports import prospective_search_stub
from . import prospective_search
from . import model
from . import test_utils
class ProspectiveSearchTests(test_utils.NDBTest):
def setUp(self):
super(ProspectiveSearchTests, self).setUp()
tq_stub = self.testbed.get_stub('taskqueue')
dummy_fn = os.path.devnull
ps_stub = prospective_search_stub.ProspectiveSearchStub(dummy_fn, tq_stub)
self.testbed._register_stub('matcher', ps_stub)
the_module = prospective_search
def testSubscribe(self):
class Foo(model.Model):
name = model.TextProperty()
rank = model.IntegerProperty()
tags = model.StringProperty(repeated=True)
flag = model.BooleanProperty()
rand = model.FloatProperty()
nope = model.KeyProperty()
prospective_search.subscribe(Foo, 'query', 'sub_id')
self.assertEqual(prospective_search._model_to_entity_schema(Foo),
{str: ['name', 'tags'],
int: ['rank'],
bool: ['flag'],
float: ['rand'],
})
def testUnsubscribe(self):
class Foo(model.Model):
pass
prospective_search.unsubscribe(Foo, 'sub_id')
def testGetSubscription(self):
class Foo(model.Model):
pass
self.assertRaises(prospective_search.SubscriptionDoesNotExist,
prospective_search.get_subscription, Foo, 'sub_id')
prospective_search.subscribe(Foo, 'query', 'sub_id')
sub = prospective_search.get_subscription(Foo, 'sub_id')
sub_id, query, expiration, state, error_message = sub
self.assertEqual(sub_id, 'sub_id')
self.assertEqual(query, 'query')
self.assertTrue(isinstance(expiration, (int, long, float)))
self.assertEqual(state, 0)
self.assertEqual(error_message, '')
def testListSubscriptions(self):
class Foo(model.Model):
pass
prospective_search.subscribe(Foo, 'query', 'sub_id', topic='bar')
subs = prospective_search.list_subscriptions(Foo)
self.assertEqual(subs, [])
subs = prospective_search.list_subscriptions(Foo, topic='bar')
self.assertEqual(len(subs), 1)
sub = subs[0]
sub_id, query, expiration, state, error_message = sub
self.assertEqual(sub_id, 'sub_id')
self.assertEqual(query, 'query')
self.assertTrue(isinstance(expiration, (int, long, float)))
self.assertEqual(state, 0)
self.assertEqual(error_message, '')
def testListTopics(self):
class Foo(model.Model):
pass
prospective_search.subscribe(Foo, 'query', 'sub_id')
topics = prospective_search.list_topics()
self.assertEqual(topics, ['Foo'])
def testMatch(self):
class Foo(model.Model):
name = model.StringProperty()
prospective_search.subscribe(Foo, 'query', 'sub_id')
ent = Foo(name='fred')
prospective_search.match(ent)
def testGetDocument(self):
class Foo(model.Model):
name = model.StringProperty()
ent = Foo(name='fred')
request = {'python_document_class': prospective_search._doc_class.ENTITY,
'document': base64.urlsafe_b64encode(ent._to_pb().Encode())}
ent2 = prospective_search.get_document(request)
self.assertEqual(ent2, ent)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Tests for model.py."""
import datetime
import difflib
import os
import pickle
import re
import unittest
from .google_imports import datastore_errors
from .google_imports import datastore_types
from .google_imports import db
from .google_imports import memcache
from .google_imports import namespace_manager
from .google_imports import users
from .google_test_imports import datastore_stub_util
from . import eventloop
from . import key
from . import model
from . import query
from . import tasklets
from . import test_utils
TESTUSER = users.User('test@example.com', 'example.com', '123')
AMSTERDAM = model.GeoPt(52.35, 4.9166667)
GOLDEN_PB = """\
key <
app: "_"
path <
Element {
type: "Model"
id: 42
}
>
>
entity_group <
Element {
type: "Model"
id: 42
}
>
property <
name: "b"
value <
booleanValue: true
>
multiple: false
>
property <
name: "d"
value <
doubleValue: 2.5
>
multiple: false
>
property <
name: "k"
value <
ReferenceValue {
app: "_"
PathElement {
type: "Model"
id: 42
}
}
>
multiple: false
>
property <
name: "p"
value <
int64Value: 42
>
multiple: false
>
property <
name: "q"
value <
stringValue: "hello"
>
multiple: false
>
property <
name: "u"
value <
UserValue {
email: "test@example.com"
auth_domain: "example.com"
gaiaid: 0
obfuscated_gaiaid: "123"
}
>
multiple: false
>
property <
name: "xy"
value <
PointValue {
x: 52.35
y: 4.9166667
}
>
multiple: false
>
"""
INDEXED_PB = re.sub('Model', 'MyModel', GOLDEN_PB)
UNINDEXED_PB = """\
key <
app: "_"
path <
Element {
type: "MyModel"
id: 0
}
>
>
entity_group <
>
raw_property <
meaning: 14
name: "b"
value <
stringValue: "\\000\\377"
>
multiple: false
>
raw_property <
meaning: 15
name: "t"
value <
stringValue: "Hello world\\341\\210\\264"
>
multiple: false
>
"""
PERSON_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.city"
value <
stringValue: "Mountain View"
>
multiple: false
>
property <
name: "address.street"
value <
stringValue: "1600 Amphitheatre"
>
multiple: false
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
NESTED_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.home.city"
value <
stringValue: "Mountain View"
>
multiple: false
>
property <
name: "address.home.street"
value <
stringValue: "1600 Amphitheatre"
>
multiple: false
>
property <
name: "address.work.city"
value <
stringValue: "San Francisco"
>
multiple: false
>
property <
name: "address.work.street"
value <
stringValue: "345 Spear"
>
multiple: false
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
RECURSIVE_PB = """\
key <
app: "_"
path <
Element {
type: "Tree"
id: 0
}
>
>
entity_group <
>
property <
name: "root.left.left.left"
value <
>
multiple: false
>
property <
name: "root.left.left.name"
value <
stringValue: "a1a"
>
multiple: false
>
property <
name: "root.left.left.rite"
value <
>
multiple: false
>
property <
name: "root.left.name"
value <
stringValue: "a1"
>
multiple: false
>
property <
name: "root.left.rite.left"
value <
>
multiple: false
>
property <
name: "root.left.rite.name"
value <
stringValue: "a1b"
>
multiple: false
>
property <
name: "root.left.rite.rite"
value <
>
multiple: false
>
property <
name: "root.name"
value <
stringValue: "a"
>
multiple: false
>
property <
name: "root.rite.left"
value <
>
multiple: false
>
property <
name: "root.rite.name"
value <
stringValue: "a2"
>
multiple: false
>
property <
name: "root.rite.rite.left"
value <
>
multiple: false
>
property <
name: "root.rite.rite.name"
value <
stringValue: "a2b"
>
multiple: false
>
property <
name: "root.rite.rite.rite"
value <
>
multiple: false
>
"""
MULTI_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address"
value <
stringValue: "345 Spear"
>
multiple: true
>
property <
name: "address"
value <
stringValue: "San Francisco"
>
multiple: true
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
MULTIINSTRUCT_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.label"
value <
stringValue: "work"
>
multiple: false
>
property <
name: "address.line"
value <
stringValue: "345 Spear"
>
multiple: true
>
property <
name: "address.line"
value <
stringValue: "San Francisco"
>
multiple: true
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
MULTISTRUCT_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.label"
value <
stringValue: "work"
>
multiple: true
>
property <
name: "address.text"
value <
stringValue: "San Francisco"
>
multiple: true
>
property <
name: "address.label"
value <
stringValue: "home"
>
multiple: true
>
property <
name: "address.text"
value <
stringValue: "Mountain View"
>
multiple: true
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
class ModelTests(test_utils.NDBTest):
def tearDown(self):
self.assertTrue(model.Model._properties == {})
self.assertTrue(model.Expando._properties == {})
super(ModelTests, self).tearDown()
the_module = model
def testKey(self):
m = model.Model()
self.assertEqual(m.key, None)
k = model.Key(flat=['ParentModel', 42, 'Model', 'foobar'])
m.key = k
self.assertEqual(m.key, k)
del m.key
self.assertEqual(m.key, None)
# incomplete key
k2 = model.Key(flat=['ParentModel', 42, 'Model', None])
m.key = k2
self.assertEqual(m.key, k2)
def testIncompleteKey(self):
m = model.Model()
k = model.Key(flat=['Model', None])
m.key = k
pb = m._to_pb()
m2 = model.Model._from_pb(pb)
self.assertEqual(m2, m)
def testIdAndParent(self):
p = model.Key('ParentModel', 'foo')
# key name
m = model.Model(id='bar')
m2 = model.Model._from_pb(m._to_pb())
self.assertEqual(m2.key, model.Key('Model', 'bar'))
# key name + parent
m = model.Model(id='bar', parent=p)
m2 = model.Model._from_pb(m._to_pb())
self.assertEqual(m2.key, model.Key('ParentModel', 'foo', 'Model', 'bar'))
# key id
m = model.Model(id=42)
m2 = model.Model._from_pb(m._to_pb())
self.assertEqual(m2.key, model.Key('Model', 42))
# key id + parent
m = model.Model(id=42, parent=p)
m2 = model.Model._from_pb(m._to_pb())
self.assertEqual(m2.key, model.Key('ParentModel', 'foo', 'Model', 42))
# parent
m = model.Model(parent=p)
m2 = model.Model._from_pb(m._to_pb())
self.assertEqual(m2.key, model.Key('ParentModel', 'foo', 'Model', None))
# not key -- invalid
self.assertRaises(datastore_errors.BadValueError, model.Model, key='foo')
# wrong key kind -- invalid
k = model.Key('OtherModel', 'bar')
class MyModel(model.Model):
pass
self.assertRaises(model.KindError, MyModel, key=k)
# incomplete parent -- invalid
p2 = model.Key('ParentModel', None)
self.assertRaises(datastore_errors.BadArgumentError, model.Model,
parent=p2)
self.assertRaises(datastore_errors.BadArgumentError, model.Model,
id='bar', parent=p2)
# key + id -- invalid
k = model.Key('Model', 'bar')
self.assertRaises(datastore_errors.BadArgumentError, model.Model, key=k,
id='bar')
# key + parent -- invalid
k = model.Key('Model', 'bar', parent=p)
self.assertRaises(datastore_errors.BadArgumentError, model.Model, key=k,
parent=p)
# key + id + parent -- invalid
self.assertRaises(datastore_errors.BadArgumentError, model.Model, key=k,
id='bar', parent=p)
def testNamespaceAndApp(self):
m = model.Model(namespace='')
self.assertEqual(m.key.namespace(), '')
m = model.Model(namespace='x')
self.assertEqual(m.key.namespace(), 'x')
m = model.Model(app='y')
self.assertEqual(m.key.app(), 'y')
def testNamespaceAndAppErrors(self):
self.assertRaises(datastore_errors.BadArgumentError,
model.Model, key=model.Key('X', 1), namespace='')
self.assertRaises(datastore_errors.BadArgumentError,
model.Model, key=model.Key('X', 1), namespace='x')
self.assertRaises(datastore_errors.BadArgumentError,
model.Model, key=model.Key('X', 1), app='y')
def testPropsOverrideConstructorArgs(self):
class MyModel(model.Model):
key = model.StringProperty()
id = model.StringProperty()
app = model.StringProperty()
namespace = model.StringProperty()
parent = model.StringProperty()
root = model.Key('Root', 1, app='app', namespace='ns')
key = model.Key(MyModel, 42, parent=root)
a = MyModel(_key=key)
self.assertEqual(a._key, key)
self.assertEqual(a.key, None)
b = MyModel(_id=42, _app='app', _namespace='ns', _parent=root)
self.assertEqual(b._key, key)
self.assertEqual(b.key, None)
self.assertEqual(b.id, None)
self.assertEqual(b.app, None)
self.assertEqual(b.namespace, None)
self.assertEqual(b.parent, None)
c = MyModel(key='key', id='id', app='app', namespace='ns', parent='root')
self.assertEqual(c._key, None)
self.assertEqual(c.key, 'key')
self.assertEqual(c.id, 'id')
self.assertEqual(c.app, 'app')
self.assertEqual(c.namespace, 'ns')
self.assertEqual(c.parent, 'root')
d = MyModel(_id=42, _app='app', _namespace='ns', _parent=root,
key='key', id='id', app='app', namespace='ns', parent='root')
self.assertEqual(d._key, key)
self.assertEqual(d.key, 'key')
self.assertEqual(d.id, 'id')
self.assertEqual(d.app, 'app')
self.assertEqual(d.namespace, 'ns')
self.assertEqual(d.parent, 'root')
def testAdapter(self):
class Foo(model.Model):
name = model.StringProperty()
ad = model.ModelAdapter()
foo1 = Foo(name='abc')
pb1 = ad.entity_to_pb(foo1)
foo2 = ad.pb_to_entity(pb1)
self.assertEqual(foo1, foo2)
self.assertTrue(foo2.key is None)
pb2 = foo2._to_pb(set_key=False)
self.assertRaises(model.KindError, ad.pb_to_entity, pb2)
ad = model.ModelAdapter(Foo)
foo3 = ad.pb_to_entity(pb2)
self.assertEqual(foo3, foo2)
key1 = model.Key(Foo, 1)
pbk1 = ad.key_to_pb(key1)
key2 = ad.pb_to_key(pbk1)
self.assertEqual(key1, key2)
def testPropertyVerboseNameAttribute(self):
class Foo(model.Model):
name = model.StringProperty(verbose_name='Full name')
np = Foo._properties['name']
self.assertEqual('Full name', np._verbose_name)
def testQuery(self):
class MyModel(model.Model):
p = model.IntegerProperty()
q = MyModel.query()
self.assertTrue(isinstance(q, query.Query))
self.assertEqual(q.kind, 'MyModel')
self.assertEqual(q.ancestor, None)
k = model.Key(flat=['Model', 1])
q = MyModel.query(ancestor=k)
self.assertEqual(q.kind, 'MyModel')
self.assertEqual(q.ancestor, k)
k0 = model.Key(flat=['Model', None])
self.assertRaises(Exception, MyModel.query, ancestor=k0)
def testQueryWithFilter(self):
class MyModel(model.Model):
p = model.IntegerProperty()
q = MyModel.query(MyModel.p >= 0)
self.assertTrue(isinstance(q, query.Query))
self.assertEqual(q.kind, 'MyModel')
self.assertEqual(q.ancestor, None)
self.assertTrue(q.filters is not None)
q2 = MyModel.query().filter(MyModel.p >= 0)
self.assertEqual(q.filters, q2.filters)
def testQueryForNone(self):
class MyModel(model.Model):
b = model.BooleanProperty()
bb = model.BlobProperty(indexed=True)
d = model.DateProperty()
f = model.FloatProperty()
i = model.IntegerProperty()
k = model.KeyProperty()
s = model.StringProperty()
t = model.TimeProperty()
u = model.UserProperty()
xy = model.GeoPtProperty()
m1 = MyModel()
m1.put()
m2 = MyModel(
b=True,
bb='z',
d=datetime.date.today(),
f=3.14,
i=1,
k=m1.key,
s='a',
t=datetime.time(),
u=TESTUSER,
xy=AMSTERDAM,
)
m2.put()
q = MyModel.query(
MyModel.b == None,
MyModel.bb == None,
MyModel.d == None,
MyModel.f == None,
MyModel.i == None,
MyModel.k == None,
MyModel.s == None,
MyModel.t == None,
MyModel.u == None,
MyModel.xy == None,
)
r = q.fetch()
self.assertEqual(r, [m1])
qq = [
MyModel.query(MyModel.b != None),
MyModel.query(MyModel.bb != None),
MyModel.query(MyModel.d != None),
MyModel.query(MyModel.f != None),
MyModel.query(MyModel.i != None),
MyModel.query(MyModel.k != None),
MyModel.query(MyModel.s != None),
MyModel.query(MyModel.t != None),
MyModel.query(MyModel.u != None),
MyModel.query(MyModel.xy != None),
]
for q in qq:
r = q.fetch()
self.assertEqual(r, [m2], str(q))
def testBottom(self):
a = model._BaseValue(42)
b = model._BaseValue(42)
c = model._BaseValue('hello')
self.assertEqual("_BaseValue(42)", repr(a))
self.assertEqual("_BaseValue('hello')", repr(c))
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertTrue(b != c)
self.assertFalse(b == c)
self.assertFalse(a == 42)
self.assertTrue(a != 42)
def testCompressedValue(self):
a = model._CompressedValue('xyz')
b = model._CompressedValue('xyz')
c = model._CompressedValue('abc')
self.assertEqual("_CompressedValue('abc')", repr(c))
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertTrue(b != c)
self.assertFalse(b == c)
self.assertFalse(a == 'xyz')
self.assertTrue(a != 'xyz')
def testProperty(self):
class MyModel(model.Model):
b = model.BooleanProperty()
p = model.IntegerProperty()
q = model.StringProperty()
d = model.FloatProperty()
k = model.KeyProperty()
u = model.UserProperty()
xy = model.GeoPtProperty()
ent = MyModel()
k = model.Key(flat=['MyModel', 42])
ent.key = k
MyModel.b._set_value(ent, True)
MyModel.p._set_value(ent, 42)
MyModel.q._set_value(ent, 'hello')
MyModel.d._set_value(ent, 2.5)
MyModel.k._set_value(ent, k)
MyModel.u._set_value(ent, TESTUSER)
MyModel.xy._set_value(ent, AMSTERDAM)
self.assertEqual(MyModel.b._get_value(ent), True)
self.assertEqual(MyModel.p._get_value(ent), 42)
self.assertEqual(MyModel.q._get_value(ent), 'hello')
self.assertEqual(MyModel.d._get_value(ent), 2.5)
self.assertEqual(MyModel.k._get_value(ent), k)
self.assertEqual(MyModel.u._get_value(ent), TESTUSER)
self.assertEqual(MyModel.xy._get_value(ent), AMSTERDAM)
pb = self.conn.adapter.entity_to_pb(ent)
self.assertEqual(str(pb), INDEXED_PB)
ent = MyModel._from_pb(pb)
self.assertEqual(ent._get_kind(), 'MyModel')
k = model.Key(flat=['MyModel', 42])
self.assertEqual(ent.key, k)
self.assertEqual(MyModel.p._get_value(ent), 42)
self.assertEqual(MyModel.q._get_value(ent), 'hello')
self.assertEqual(MyModel.d._get_value(ent), 2.5)
self.assertEqual(MyModel.k._get_value(ent), k)
def testDeletingPropertyValue(self):
class MyModel(model.Model):
a = model.StringProperty()
m = MyModel()
# Initially it isn't there (but the value defaults to None).
self.assertEqual(m.a, None)
self.assertFalse(MyModel.a._has_value(m))
# Explicit None assignment makes it present.
m.a = None
self.assertEqual(m.a, None)
self.assertTrue(MyModel.a._has_value(m))
# Deletion restores the initial state.
del m.a
self.assertEqual(m.a, None)
self.assertFalse(MyModel.a._has_value(m))
# Redundant deletions are okay.
del m.a
self.assertEqual(m.a, None)
self.assertFalse(MyModel.a._has_value(m))
# Deleted/missing values are serialized and considered present
# when deserialized.
pb = m._to_pb()
m = MyModel._from_pb(pb)
self.assertEqual(m.a, None)
self.assertTrue(MyModel.a._has_value(m))
def testDefaultPropertyValue(self):
class MyModel(model.Model):
a = model.StringProperty(default='a')
b = model.StringProperty(default='')
m = MyModel()
# Initial values equal the defaults.
self.assertEqual(m.a, 'a')
self.assertEqual(m.b, '')
self.assertFalse(MyModel.a._has_value(m))
self.assertFalse(MyModel.b._has_value(m))
# Setting values erases the defaults.
m.a = ''
m.b = 'b'
self.assertEqual(m.a, '')
self.assertEqual(m.b, 'b')
self.assertTrue(MyModel.a._has_value(m))
self.assertTrue(MyModel.b._has_value(m))
# Deleting values restores the defaults.
del m.a
del m.b
self.assertEqual(m.a, 'a')
self.assertEqual(m.b, '')
self.assertFalse(MyModel.a._has_value(m))
self.assertFalse(MyModel.b._has_value(m))
# Serialization makes the default values explicit.
pb = m._to_pb()
m = MyModel._from_pb(pb)
self.assertEqual(m.a, 'a')
self.assertEqual(m.b, '')
self.assertTrue(MyModel.a._has_value(m))
self.assertTrue(MyModel.b._has_value(m))
def testComparingExplicitAndImplicitValue(self):
class MyModel(model.Model):
a = model.StringProperty(default='a')
b = model.StringProperty()
m1 = MyModel(b=None)
m2 = MyModel()
self.assertEqual(m1, m2)
m1.a = 'a'
self.assertEqual(m1, m2)
def testRequiredProperty(self):
class MyModel(model.Model):
a = model.StringProperty(required=True)
b = model.StringProperty() # Never counts as uninitialized
self.assertEqual(repr(MyModel.a), "StringProperty('a', required=True)")
m = MyModel()
# Never-assigned values are considered uninitialized.
self.assertEqual(m._find_uninitialized(), set(['a']))
self.assertRaises(datastore_errors.BadValueError, m._check_initialized)
self.assertRaises(datastore_errors.BadValueError, m._to_pb)
# Empty string is fine.
m.a = ''
self.assertFalse(m._find_uninitialized())
m._check_initialized()
m._to_pb()
# Non-empty string is fine (of course).
m.a = 'foo'
self.assertFalse(m._find_uninitialized())
m._check_initialized()
m._to_pb()
# Deleted value is not fine.
del m.a
self.assertEqual(m._find_uninitialized(), set(['a']))
self.assertRaises(datastore_errors.BadValueError, m._check_initialized)
self.assertRaises(datastore_errors.BadValueError, m._to_pb)
# Explicitly assigned None is *not* fine.
m.a = None
self.assertEqual(m._find_uninitialized(), set(['a']))
self.assertRaises(datastore_errors.BadValueError, m._check_initialized)
self.assertRaises(datastore_errors.BadValueError, m._to_pb)
# Check that b is still unset.
self.assertFalse(MyModel.b._has_value(m))
def testRepeatedRequiredDefaultConflict(self):
# Allow at most one of repeated=True, required=True, default=<non-None>.
class MyModel(model.Model):
self.assertRaises(Exception,
model.StringProperty, repeated=True, default='')
self.assertRaises(Exception,
model.StringProperty, repeated=True, required=True)
self.assertRaises(Exception,
model.StringProperty, required=True, default='')
self.assertRaises(Exception,
model.StringProperty,
repeated=True, required=True, default='')
self.assertEqual('MyModel()', repr(MyModel()))
def testKeyProperty(self):
class RefModel(model.Model):
pass
class FancyModel(model.Model):
@classmethod
def _get_kind(cls):
return 'Fancy'
class FancierModel(model.Model):
@classmethod
def _get_kind(cls):
return u'Fancier'
class FanciestModel(model.Model):
@classmethod
def _get_kind(cls):
return '\xff'
class MyModel(model.Model):
basic = model.KeyProperty(kind=None)
ref = model.KeyProperty(kind=RefModel)
refs = model.KeyProperty(kind=RefModel, repeated=True)
fancy = model.KeyProperty(kind=FancyModel)
fancee = model.KeyProperty(kind='Fancy')
fancier = model.KeyProperty(kind=FancierModel)
fanciest = model.KeyProperty(kind=FanciestModel)
faanceest = model.KeyProperty(kind=u'\xff')
a = MyModel(basic=model.Key('Foo', 1),
ref=model.Key(RefModel, 1),
refs=[model.Key(RefModel, 2), model.Key(RefModel, 3)],
fancy=model.Key(FancyModel, 1),
fancee=model.Key(FancyModel, 2),
fancier=model.Key('Fancier', 1),
fanciest=model.Key(FanciestModel, 1))
a.put()
b = a.key.get()
self.assertEqual(a, b)
# Try some assignments.
b.basic = model.Key('Bar', 1)
b.ref = model.Key(RefModel, 2)
b.refs = [model.Key(RefModel, 4)]
# Try the repr().
self.assertEqual(repr(MyModel.basic), "KeyProperty('basic')")
self.assertEqual(repr(MyModel.ref), "KeyProperty('ref', kind='RefModel')")
# Try some errors declaring properties.
self.assertRaises(TypeError, model.KeyProperty, kind=42) # Non-class.
self.assertRaises(TypeError, model.KeyProperty, kind=int) # Non-Model.
# Try some errors assigning property values.
self.assertRaises(datastore_errors.BadValueError,
setattr, a, 'ref', model.Key('Bar', 1))
self.assertRaises(datastore_errors.BadValueError,
setattr, a, 'refs', [model.Key('Bar', 1)])
def testKeyPropertyPositionalKind(self):
class RefModel(model.Model):
pass
class MyModel(model.Model):
ref0 = model.KeyProperty('REF0')
ref1 = model.KeyProperty(RefModel)
ref2 = model.KeyProperty(RefModel, 'REF2')
ref3 = model.KeyProperty('REF3', RefModel)
ref4 = model.KeyProperty(None)
ref5 = model.KeyProperty(None, None)
ref6 = model.KeyProperty(RefModel, None)
ref7 = model.KeyProperty(None, RefModel)
ref8 = model.KeyProperty('REF8', None)
ref9 = model.KeyProperty(None, 'REF9')
self.assertEqual(MyModel.ref0._kind, None)
self.assertEqual(MyModel.ref1._kind, 'RefModel')
self.assertEqual(MyModel.ref2._kind, 'RefModel')
self.assertEqual(MyModel.ref3._kind, 'RefModel')
self.assertEqual(MyModel.ref4._kind, None)
self.assertEqual(MyModel.ref5._kind, None)
self.assertEqual(MyModel.ref6._kind, 'RefModel')
self.assertEqual(MyModel.ref7._kind, 'RefModel')
self.assertEqual(MyModel.ref8._kind, None)
self.assertEqual(MyModel.ref9._kind, None)
self.assertEqual(MyModel.ref0._name, 'REF0')
self.assertEqual(MyModel.ref1._name, 'ref1')
self.assertEqual(MyModel.ref2._name, 'REF2')
self.assertEqual(MyModel.ref3._name, 'REF3')
self.assertEqual(MyModel.ref4._name, 'ref4')
self.assertEqual(MyModel.ref5._name, 'ref5')
self.assertEqual(MyModel.ref6._name, 'ref6')
self.assertEqual(MyModel.ref7._name, 'ref7')
self.assertEqual(MyModel.ref8._name, 'REF8')
self.assertEqual(MyModel.ref9._name, 'REF9')
for args in [(1,), (int,), (1, int), (int, 1),
('x', 'y'), (RefModel, RefModel),
(None, int), (int, None), (None, 1), (1, None)]:
self.assertRaises(TypeError, model.KeyProperty, *args)
self.assertRaises(TypeError, model.KeyProperty, RefModel, kind='K')
self.assertRaises(TypeError, model.KeyProperty, None, RefModel, kind='k')
self.assertRaises(TypeError, model.KeyProperty, 'n', RefModel, kind='k')
def testBlobKeyProperty(self):
class MyModel(model.Model):
image = model.BlobKeyProperty()
test_blobkey = datastore_types.BlobKey('testkey123')
m = MyModel()
m.image = test_blobkey
m.put()
m = m.key.get()
self.assertTrue(isinstance(m.image, datastore_types.BlobKey))
self.assertEqual(str(m.image), str(test_blobkey))
def testChoicesProperty(self):
class MyModel(model.Model):
a = model.StringProperty(choices=['a', 'b', 'c'])
b = model.IntegerProperty(choices=[1, 2, 3], repeated=True)
m = MyModel(a='a', b=[1, 2])
m.a = 'b'
m.a = None
m.b = [1, 1, 3]
m.b = []
self.assertRaises(datastore_errors.BadValueError,
setattr, m, 'a', 'A')
self.assertRaises(datastore_errors.BadValueError,
setattr, m, 'b', [42])
def testValidatorProperty(self):
def my_validator(prop, value):
value = value.lower()
if not value.startswith('a'):
raise datastore_errors.BadValueError('%s does not start with "a"' %
prop._name)
return value
class MyModel(model.Model):
a = model.StringProperty(validator=my_validator)
foos = model.StringProperty(validator=my_validator, repeated=True)
m = MyModel()
m.a = 'ABC'
self.assertEqual(m.a, 'abc')
self.assertRaises(datastore_errors.BadValueError,
setattr, m, 'a', 'def')
m.foos = ['ABC', 'ABC', 'ABC']
self.assertEqual(m.foos, ['abc', 'abc', 'abc'])
self.assertRaises(datastore_errors.BadValueError,
setattr, m, 'foos', ['def'])
def testUnindexedProperty(self):
class MyModel(model.Model):
t = model.TextProperty()
b = model.BlobProperty()
ent = MyModel()
MyModel.t._set_value(ent, u'Hello world\u1234')
MyModel.b._set_value(ent, '\x00\xff')
self.assertEqual(MyModel.t._get_value(ent), u'Hello world\u1234')
self.assertEqual(MyModel.b._get_value(ent), '\x00\xff')
pb = ent._to_pb()
self.assertEqual(str(pb), UNINDEXED_PB)
ent = MyModel._from_pb(pb)
self.assertEqual(ent._get_kind(), 'MyModel')
k = model.Key(flat=['MyModel', None])
self.assertEqual(ent.key, k)
self.assertEqual(MyModel.t._get_value(ent), u'Hello world\u1234')
self.assertEqual(MyModel.b._get_value(ent), '\x00\xff')
def testUserPropertyAutoFlags(self):
# Can't combind auto_current_user* with repeated.
self.assertRaises(ValueError, model.UserProperty,
repeated=True, auto_current_user_add=True)
self.assertRaises(ValueError, model.UserProperty,
repeated=True, auto_current_user=True)
# Define a model with user properties.
class MyModel(model.Model):
u0 = model.UserProperty(auto_current_user_add=True)
u1 = model.UserProperty(auto_current_user=True)
# Without a current user, these remain None.
x = MyModel()
k = x.put()
y = k.get()
self.assertTrue(y.u0 is None)
self.assertTrue(y.u1 is None)
try:
# When there is a current user, it sets both.
os.environ['USER_EMAIL'] = 'test@example.com'
x = MyModel()
k = x.put()
y = k.get()
self.assertFalse(y.u0 is None)
self.assertFalse(y.u1 is None)
self.assertEqual(y.u0, users.User(email='test@example.com'))
self.assertEqual(y.u1, users.User(email='test@example.com'))
# When the current user changes, only u1 is changed.
os.environ['USER_EMAIL'] = 'test2@example.com'
x.put()
y = k.get()
self.assertEqual(y.u0, users.User(email='test@example.com'))
self.assertEqual(y.u1, users.User(email='test2@example.com'))
# When we delete the property values, both are reset.
del x.u0
del x.u1
x.put()
y = k.get()
self.assertEqual(y.u0, users.User(email='test2@example.com'))
self.assertEqual(y.u1, users.User(email='test2@example.com'))
# When we set them to None, u0 stays None, u1 is reset.
x.u0 = None
x.u1 = None
x.put()
y = k.get()
self.assertEqual(y.u0, None)
self.assertEqual(y.u1, users.User(email='test2@example.com'))
finally:
# Reset environment.
del os.environ['USER_EMAIL']
def testPickleProperty(self):
class MyModel(model.Model):
pkl = model.PickleProperty()
sample = {'one': 1, 2: [1, 2, '3'], 3.: model.Model}
ent = MyModel(pkl=sample)
ent.put()
ent2 = ent.key.get()
self.assertTrue(ent2.pkl == sample)
def testJsonProperty(self):
class MyModel(model.Model):
pkl = model.JsonProperty()
sample = [1, 2, {'a': 'one', 'b': [1, 2]}, 'xyzzy', [1, 2, 3]]
ent = MyModel(pkl=sample)
ent.put()
ent2 = ent.key.get()
self.assertTrue(ent2.pkl == sample)
def DateAndOrTimePropertyTest(self, propclass, t1, t2):
class ClockInOut(model.Model):
ctime = propclass(auto_now_add=True)
mtime = propclass(auto_now=True)
class Person(model.Model):
name = model.StringProperty()
ctime = propclass(auto_now_add=True)
mtime = propclass(auto_now=True)
atime = propclass()
times = propclass(repeated=True)
struct = model.StructuredProperty(ClockInOut)
repstruct = model.StructuredProperty(ClockInOut, repeated=True)
localstruct = model.LocalStructuredProperty(ClockInOut)
replocalstruct = model.LocalStructuredProperty(ClockInOut, repeated=True)
p = Person(id=1, struct=ClockInOut(), repstruct=[ClockInOut()],
localstruct=ClockInOut(), replocalstruct=[ClockInOut()])
p.atime = t1
p.times = [t1, t2]
self.assertEqual(p.ctime, None)
self.assertEqual(p.mtime, None)
self.assertEqual(p.struct.ctime, None)
self.assertEqual(p.struct.mtime, None)
self.assertEqual(p.repstruct[0].ctime, None)
self.assertEqual(p.repstruct[0].mtime, None)
self.assertEqual(p.localstruct.ctime, None)
self.assertEqual(p.localstruct.mtime, None)
self.assertEqual(p.replocalstruct[0].ctime, None)
self.assertEqual(p.replocalstruct[0].mtime, None)
p.put()
self.assertNotEqual(p.ctime, None)
self.assertNotEqual(p.mtime, None)
self.assertNotEqual(p.struct.ctime, None)
self.assertNotEqual(p.struct.mtime, None)
self.assertNotEqual(p.repstruct[0].ctime, None)
self.assertNotEqual(p.repstruct[0].mtime, None)
self.assertNotEqual(p.localstruct.ctime, None)
self.assertNotEqual(p.localstruct.mtime, None)
self.assertNotEqual(p.replocalstruct[0].ctime, None)
self.assertNotEqual(p.replocalstruct[0].mtime, None)
pb = p._to_pb()
q = Person._from_pb(pb)
self.assertEqual(q.ctime, p.ctime)
self.assertEqual(q.mtime, p.mtime)
self.assertEqual(q.struct.ctime, p.struct.ctime)
self.assertEqual(q.struct.mtime, p.struct.mtime)
self.assertEqual(q.repstruct[0].ctime, p.repstruct[0].ctime)
self.assertEqual(q.repstruct[0].mtime, p.repstruct[0].mtime)
self.assertEqual(q.localstruct.ctime, p.localstruct.ctime)
self.assertEqual(q.localstruct.mtime, p.localstruct.mtime)
self.assertEqual(q.replocalstruct[0].ctime, p.replocalstruct[0].ctime)
self.assertEqual(q.replocalstruct[0].mtime, p.replocalstruct[0].mtime)
self.assertEqual(q.atime, t1)
self.assertEqual(q.times, [t1, t2])
def PrepareForPutTests(self, propclass):
class AuditedRecord(model.Model):
created = propclass(auto_now_add=True)
modified = propclass(auto_now=True)
record = AuditedRecord(id=1)
record._to_pb()
self.assertEqual(record.created, None,
'auto_now_add set before entity was put')
self.assertEqual(record.modified, None,
'auto_now set before entity was put')
def MultiDateAndOrTimePropertyTest(self, *args):
ctx = tasklets.get_context()
# Run tests against datastore
self.DateAndOrTimePropertyTest(*args)
self.PrepareForPutTests(args[0])
ctx.set_datastore_policy(False)
# Run tests against memcache
ctx.set_memcache_policy(True)
self.DateAndOrTimePropertyTest(*args)
self.PrepareForPutTests(args[0])
ctx.set_memcache_policy(False)
# Run tests against process cache
ctx.set_cache_policy(True)
self.DateAndOrTimePropertyTest(*args)
self.PrepareForPutTests(args[0])
def testDateTimeProperty(self):
self.MultiDateAndOrTimePropertyTest(model.DateTimeProperty,
datetime.datetime(1982, 12, 1, 9, 0, 0),
datetime.datetime(1995, 4, 15, 5, 0, 0))
def testDateProperty(self):
self.MultiDateAndOrTimePropertyTest(model.DateProperty,
datetime.date(1982, 12, 1),
datetime.date(1995, 4, 15))
def testTimeProperty(self):
self.MultiDateAndOrTimePropertyTest(model.TimeProperty,
datetime.time(9, 0, 0),
datetime.time(5, 0, 0, 500))
def testStructuredProperty(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address)
p = Person()
p.name = 'Google'
a = Address(street='1600 Amphitheatre')
p.address = a
p.address.city = 'Mountain View'
self.assertEqual(Person.name._get_value(p), 'Google')
self.assertEqual(p.name, 'Google')
self.assertEqual(Person.address._get_value(p), a)
self.assertEqual(Address.street._get_value(a), '1600 Amphitheatre')
self.assertEqual(Address.city._get_value(a), 'Mountain View')
pb = p._to_pb()
self.assertEqual(str(pb), PERSON_PB)
p = Person._from_pb(pb)
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address.street, '1600 Amphitheatre')
self.assertEqual(p.address.city, 'Mountain View')
self.assertEqual(p.address, a)
def testNestedStructuredProperty(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class AddressPair(model.Model):
home = model.StructuredProperty(Address)
work = model.StructuredProperty(Address)
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(AddressPair)
p = Person()
p.name = 'Google'
p.address = AddressPair(home=Address(), work=Address())
p.address.home.city = 'Mountain View'
p.address.home.street = '1600 Amphitheatre'
p.address.work.city = 'San Francisco'
p.address.work.street = '345 Spear'
pb = p._to_pb()
self.assertEqual(str(pb), NESTED_PB)
p = Person._from_pb(pb)
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address.home.street, '1600 Amphitheatre')
self.assertEqual(p.address.home.city, 'Mountain View')
self.assertEqual(p.address.work.street, '345 Spear')
self.assertEqual(p.address.work.city, 'San Francisco')
def testRepeatedNestedStructuredProperty(self):
class Person(model.Model):
first_name = model.StringProperty()
last_name = model.StringProperty()
class PersonPhone(model.Model):
person = model.StructuredProperty(Person)
phone = model.StringProperty()
class Phonebook(model.Model):
numbers = model.StructuredProperty(PersonPhone, repeated=True)
book = Phonebook.get_or_insert('test')
person = Person(first_name="John", last_name='Smith')
phone = PersonPhone(person=person, phone='1-212-555-1212')
book.numbers.append(phone)
pb = book._to_pb()
ent = Phonebook._from_pb(pb)
self.assertEqual(ent.numbers[0].person.first_name, 'John')
self.assertEqual(len(ent.numbers), 1)
self.assertEqual(ent.numbers[0].person.last_name, 'Smith')
self.assertEqual(ent.numbers[0].phone, '1-212-555-1212')
def testRecursiveStructuredProperty(self):
class Node(model.Model):
name = model.StringProperty()
Node.left = model.StructuredProperty(Node)
Node.right = model.StructuredProperty(Node, 'rite')
Node._fix_up_properties()
class Tree(model.Model):
root = model.StructuredProperty(Node)
k = model.Key(flat=['Tree', None])
tree = Tree()
tree.key = k
tree.root = Node(name='a',
left=Node(name='a1',
left=Node(name='a1a'),
right=Node(name='a1b')),
right=Node(name='a2',
right=Node(name='a2b')))
pb = tree._to_pb()
self.assertEqual(str(pb), RECURSIVE_PB)
tree2 = Tree._from_pb(pb)
self.assertEqual(tree2, tree)
# Also test querying nodes.
tree.put()
tree3 = Tree.query(Tree.root.left.right.name == 'a1b').get()
self.assertEqual(tree3, tree)
def testRenamedProperty(self):
class MyModel(model.Model):
bb = model.BooleanProperty('b')
pp = model.IntegerProperty('p')
qq = model.StringProperty('q')
dd = model.FloatProperty('d')
kk = model.KeyProperty('k')
uu = model.UserProperty('u')
xxyy = model.GeoPtProperty('xy')
ent = MyModel()
k = model.Key(flat=['MyModel', 42])
ent.key = k
MyModel.bb._set_value(ent, True)
MyModel.pp._set_value(ent, 42)
MyModel.qq._set_value(ent, 'hello')
MyModel.dd._set_value(ent, 2.5)
MyModel.kk._set_value(ent, k)
MyModel.uu._set_value(ent, TESTUSER)
MyModel.xxyy._set_value(ent, AMSTERDAM)
self.assertEqual(MyModel.pp._get_value(ent), 42)
self.assertEqual(MyModel.qq._get_value(ent), 'hello')
self.assertEqual(MyModel.dd._get_value(ent), 2.5)
self.assertEqual(MyModel.kk._get_value(ent), k)
self.assertEqual(MyModel.uu._get_value(ent), TESTUSER)
self.assertEqual(MyModel.xxyy._get_value(ent), AMSTERDAM)
pb = self.conn.adapter.entity_to_pb(ent)
self.assertEqual(str(pb), INDEXED_PB)
ent = MyModel._from_pb(pb)
self.assertEqual(ent._get_kind(), 'MyModel')
k = model.Key(flat=['MyModel', 42])
self.assertEqual(ent.key, k)
self.assertEqual(MyModel.pp._get_value(ent), 42)
self.assertEqual(MyModel.qq._get_value(ent), 'hello')
self.assertEqual(MyModel.dd._get_value(ent), 2.5)
self.assertEqual(MyModel.kk._get_value(ent), k)
def testUnicodeRenamedProperty(self):
class UModel(model.Model):
val = model.StringProperty(u'\u00fc')
@classmethod
def _get_kind(cls):
return u'UModel' # Pure ASCII Unicode kind string is find.
u = UModel(val='abc')
u.put()
v = u.key.get()
self.assertFalse(u is v)
self.assertEqual(u.val, v.val)
def testUnicodeKind(self):
def helper():
class UModel(model.Model):
val = model.StringProperty()
@classmethod
def _get_kind(cls):
return u'\u00fcModel'
self.assertRaises(model.KindError, helper)
def testRenamedStructuredProperty(self):
uhome = u'hom\u00e9'
uhome_enc_repr = r'hom\303\251'
class Address(model.Model):
st = model.StringProperty('street')
ci = model.StringProperty('city')
class AddressPair(model.Model):
ho = model.StructuredProperty(Address, uhome)
wo = model.StructuredProperty(Address, 'work')
class Person(model.Model):
na = model.StringProperty('name')
ad = model.StructuredProperty(AddressPair, 'address')
p = Person()
p.na = 'Google'
p.ad = AddressPair(ho=Address(), wo=Address())
p.ad.ho.ci = 'Mountain View'
p.ad.ho.st = '1600 Amphitheatre'
p.ad.wo.ci = 'San Francisco'
p.ad.wo.st = '345 Spear'
pb = p._to_pb()
expected = NESTED_PB.replace('home', uhome_enc_repr)
self.assertEqual(str(pb), expected)
p = Person._from_pb(pb)
self.assertEqual(p.na, 'Google')
self.assertEqual(p.ad.ho.st, '1600 Amphitheatre')
self.assertEqual(p.ad.ho.ci, 'Mountain View')
self.assertEqual(p.ad.wo.st, '345 Spear')
self.assertEqual(p.ad.wo.ci, 'San Francisco')
def testKindMap(self):
model.Model._reset_kind_map()
class A1(model.Model):
pass
def get_kind_map():
# Return the kind map with __* removed.
d = model.Model._kind_map
return dict(kv for kv in d.iteritems() if not kv[0].startswith('__'))
self.assertEqual(get_kind_map(), {'A1': A1})
class A2(model.Model):
pass
self.assertEqual(get_kind_map(), {'A1': A1, 'A2': A2})
def testMultipleProperty(self):
class Person(model.Model):
name = model.StringProperty()
address = model.StringProperty(repeated=True)
m = Person(name='Google', address=['345 Spear', 'San Francisco'])
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address, ['345 Spear', 'San Francisco'])
pb = m._to_pb()
self.assertEqual(str(pb), MULTI_PB)
m2 = Person._from_pb(pb)
self.assertEqual(m2, m)
def testMultipleInStructuredProperty(self):
class Address(model.Model):
label = model.StringProperty()
line = model.StringProperty(repeated=True)
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address)
m = Person(name='Google',
address=Address(label='work',
line=['345 Spear', 'San Francisco']))
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address.line, ['345 Spear', 'San Francisco'])
pb = m._to_pb()
self.assertEqual(str(pb), MULTIINSTRUCT_PB)
m2 = Person._from_pb(pb)
self.assertEqual(m2, m)
def testMultipleStructuredPropertyProtocolBuffers(self):
class Address(model.Model):
label = model.StringProperty()
text = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address, repeated=True)
m = Person(name='Google',
address=[Address(label='work', text='San Francisco'),
Address(label='home', text='Mountain View')])
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address[0].label, 'work')
self.assertEqual(m.address[0].text, 'San Francisco')
self.assertEqual(m.address[1].label, 'home')
self.assertEqual(m.address[1].text, 'Mountain View')
pb = m._to_pb()
self.assertEqual(str(pb), MULTISTRUCT_PB)
m2 = Person._from_pb(pb)
self.assertEqual(m2, m)
def testCannotMultipleInMultiple(self):
class Inner(model.Model):
innerval = model.StringProperty(repeated=True)
self.assertRaises(TypeError,
model.StructuredProperty, Inner, repeated=True)
def testNullProperties(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
zipcode = model.IntegerProperty()
class Person(model.Model):
address = model.StructuredProperty(Address)
age = model.IntegerProperty()
name = model.StringProperty()
k = model.KeyProperty()
k = model.Key(flat=['Person', 42])
p = Person()
p.key = k
self.assertEqual(p.address, None)
self.assertEqual(p.age, None)
self.assertEqual(p.name, None)
self.assertEqual(p.k, None)
pb = p._to_pb()
q = Person._from_pb(pb)
self.assertEqual(q.address, None)
self.assertEqual(q.age, None)
self.assertEqual(q.name, None)
self.assertEqual(q.k, None)
self.assertEqual(q, p)
def testOrphanProperties(self):
class Tag(model.Model):
names = model.StringProperty(repeated=True)
ratings = model.IntegerProperty(repeated=True)
class Address(model.Model):
line = model.StringProperty(repeated=True)
city = model.StringProperty()
zipcode = model.IntegerProperty()
tags = model.StructuredProperty(Tag)
class Person(model.Model):
address = model.StructuredProperty(Address)
age = model.IntegerProperty(repeated=True)
name = model.StringProperty()
k = model.KeyProperty()
k = model.Key(flat=['Person', 42])
p = Person(name='White House', k=k, age=[210, 211],
address=Address(line=['1600 Pennsylvania', 'Washington, DC'],
tags=Tag(names=['a', 'b'], ratings=[1, 2]),
zipcode=20500))
p.key = k
pb = p._to_pb()
q = model.Model._from_pb(pb)
qb = q._to_pb()
linesp = str(pb).splitlines(True)
linesq = str(qb).splitlines(True)
lines = difflib.unified_diff(linesp, linesq, 'Expected', 'Actual')
self.assertEqual(pb, qb, ''.join(lines))
def testMetaModelRepr(self):
class MyModel(model.Model):
name = model.StringProperty()
tags = model.StringProperty(repeated=True)
age = model.IntegerProperty(name='a')
other = model.KeyProperty()
self.assertEqual(repr(MyModel),
"MyModel<"
"age=IntegerProperty('a'), "
"name=StringProperty('name'), "
"other=KeyProperty('other'), "
"tags=StringProperty('tags', repeated=True)"
">")
def testModelToDict(self):
class MyModel(model.Model):
foo = model.StringProperty(name='f')
bar = model.StringProperty(default='bar')
baz = model.StringProperty(repeated=True)
ent = MyModel()
self.assertEqual({'foo': None, 'bar': 'bar', 'baz': []},
ent._to_dict())
self.assertEqual({'foo': None}, ent._to_dict(include=['foo']))
self.assertEqual({'bar': 'bar', 'baz': []},
ent._to_dict(exclude=frozenset(['foo'])))
self.assertEqual({}, ent.to_dict(include=['foo'], exclude=['foo']))
self.assertRaises(TypeError, ent._to_dict, include='foo')
self.assertRaises(TypeError, ent._to_dict, exclude='foo')
ent.foo = 'x'
ent.bar = 'y'
ent.baz = ['a']
self.assertEqual({'foo': 'x', 'bar': 'y', 'baz': ['a']},
ent.to_dict())
def testModelToDictStructures(self):
class MySubmodel(model.Model):
foo = model.StringProperty()
bar = model.IntegerProperty()
class MyModel(model.Model):
a = model.StructuredProperty(MySubmodel)
b = model.LocalStructuredProperty(MySubmodel, repeated=True)
c = model.StructuredProperty(MySubmodel)
d = model.LocalStructuredProperty(MySubmodel)
e = model.StructuredProperty(MySubmodel, repeated=True)
x = MyModel(a=MySubmodel(foo='foo', bar=42),
b=[MySubmodel(foo='f'), MySubmodel(bar=4)])
self.assertEqual({'a': {'foo': 'foo', 'bar': 42},
'b': [{'foo': 'f', 'bar': None,},
{'foo': None, 'bar': 4}],
'c': None,
'd': None,
'e': [],
},
x.to_dict())
def testModelPickling(self):
global MyModel
class MyModel(model.Model):
name = model.StringProperty()
tags = model.StringProperty(repeated=True)
age = model.IntegerProperty()
other = model.KeyProperty()
my = MyModel(name='joe', tags=['python', 'ruby'], age=42,
other=model.Key(MyModel, 42))
for proto in 0, 1, 2:
s = pickle.dumps(my, proto)
mycopy = pickle.loads(s)
self.assertEqual(mycopy, my)
def testRejectOldPickles(self):
global MyModel
class MyModel(db.Model):
name = db.StringProperty()
dumped = []
for proto in 0, 1, 2:
x = MyModel()
s = pickle.dumps(x, proto)
dumped.append(s)
x.name = 'joe'
s = pickle.dumps(x, proto)
dumped.append(s)
db.put(x)
s = pickle.dumps(x, proto)
dumped.append(s)
class MyModel(model.Model):
name = model.StringProperty()
for s in dumped:
self.assertRaises(Exception, pickle.loads, s)
def testModelRepr(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address)
p = Person(name='Google', address=Address(street='345 Spear', city='SF'))
self.assertEqual(
repr(p),
"Person(address=Address(city='SF', street='345 Spear'), name='Google')")
p.key = model.Key(pairs=[('Person', 42)])
self.assertEqual(
repr(p),
"Person(key=Key('Person', 42), "
"address=Address(city='SF', street='345 Spear'), name='Google')")
def testModelReprNoSideEffects(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
a = Address(street='345 Spear', city='SF')
# White box test: values are 'top values'.
self.assertEqual(a._values, {'street': '345 Spear', 'city': 'SF'})
a.put()
# White box test: put() has turned wrapped values in _BaseValue().
self.assertEqual(a._values, {'street': model._BaseValue('345 Spear'),
'city': model._BaseValue('SF')})
self.assertEqual(repr(a),
"Address(key=Key('Address', 1), "
# (Note: Unicode literals.)
"city=u'SF', street=u'345 Spear')")
# White box test: _values is unchanged.
self.assertEqual(a._values, {'street': model._BaseValue('345 Spear'),
'city': model._BaseValue('SF')})
def testModelRepr_RenamedProperty(self):
class Address(model.Model):
street = model.StringProperty('Street')
city = model.StringProperty('City')
a = Address(street='345 Spear', city='SF')
self.assertEqual(repr(a), "Address(city='SF', street='345 Spear')")
def testModel_RenameAlias(self):
class Person(model.Model):
name = model.StringProperty('Name')
p = Person(name='Fred')
self.assertRaises(AttributeError, getattr, p, 'Name')
self.assertRaises(AttributeError, Person, Name='Fred')
# Unfortunately, p.Name = 'boo' just sets p.__dict__['Name'] = 'boo'.
self.assertRaises(AttributeError, getattr, p, 'foo')
def testExpando_RenameAlias(self):
class Person(model.Expando):
name = model.StringProperty('Name')
p = Person(name='Fred')
self.assertEqual(p.name, 'Fred')
self.assertEqual(p.Name, 'Fred')
self.assertEqual(p._values, {'Name': 'Fred'})
self.assertTrue(p._properties, Person._properties)
p = Person(Name='Fred')
self.assertEqual(p.name, 'Fred')
self.assertEqual(p.Name, 'Fred')
self.assertEqual(p._values, {'Name': 'Fred'})
self.assertTrue(p._properties, Person._properties)
p = Person()
p.Name = 'Fred'
self.assertEqual(p.name, 'Fred')
self.assertEqual(p.Name, 'Fred')
self.assertEqual(p._values, {'Name': 'Fred'})
self.assertTrue(p._properties, Person._properties)
self.assertRaises(AttributeError, getattr, p, 'foo')
def testModel_RenameSwap(self):
class Person(model.Model):
foo = model.StringProperty('bar')
bar = model.StringProperty('foo')
p = Person(foo='foo', bar='bar')
self.assertEqual(p._values,
{'foo': 'bar', 'bar': 'foo'})
def testExpando_RenameSwap(self):
class Person(model.Expando):
foo = model.StringProperty('bar')
bar = model.StringProperty('foo')
p = Person(foo='foo', bar='bar', baz='baz')
self.assertEqual(p._values,
{'foo': 'bar', 'bar': 'foo', 'baz': 'baz'})
p = Person()
p.foo = 'foo'
p.bar = 'bar'
p.baz = 'baz'
self.assertEqual(p._values,
{'foo': 'bar', 'bar': 'foo', 'baz': 'baz'})
def testExpando_Repr(self):
class E(model.Expando):
pass
ent = E(a=1, b=[2], c=E(x=3, y=[4]))
self.assertEqual(repr(ent),
"E(a=1, b=[2], c=E(x=3, y=[4]))")
pb = ent._to_pb(set_key=False)
ent2 = E._from_pb(pb)
# NOTE: The 'E' kind name for the inner instance is not persisted,
# so it comes out as Expando.
self.assertEqual(repr(ent2),
"E(a=1, b=[2], c=Expando(x=3, y=[4]))")
def testPropertyRepr(self):
p = model.Property()
self.assertEqual(repr(p), 'Property()')
p = model.IntegerProperty('foo', indexed=False, repeated=True)
self.assertEqual(repr(p),
"IntegerProperty('foo', indexed=False, repeated=True)")
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
p = model.StructuredProperty(Address, 'foo')
self.assertEqual(repr(p), "StructuredProperty(Address, 'foo')")
q = model.LocalStructuredProperty(Address, 'bar')
self.assertEqual(repr(q), "LocalStructuredProperty(Address, 'bar')")
class MyModel(model.Model):
boolp = model.BooleanProperty()
intp = model.IntegerProperty()
floatp = model.FloatProperty()
strp = model.StringProperty()
txtp = model.TextProperty()
blobp = model.BlobProperty()
geoptp = model.GeoPtProperty()
userp = model.UserProperty()
keyp = model.KeyProperty()
blobkeyp = model.BlobKeyProperty()
datetimep = model.DateTimeProperty()
datep = model.DateProperty()
timep = model.TimeProperty()
structp = model.StructuredProperty(Address)
localstructp = model.LocalStructuredProperty(Address)
genp = model.GenericProperty()
compp = model.ComputedProperty(lambda _: 'x')
self.assertEqual(repr(MyModel.key), "ModelKey('__key__')")
for prop in MyModel._properties.itervalues():
s = repr(prop)
self.assertTrue(s.startswith(prop.__class__.__name__ + '('), s)
def testLengthRestriction(self):
# Check the following rules for size validation of blobs and texts:
# - Unindexed blob and text properties can be unlimited in size.
# - Indexed blob properties are limited to 500 bytes.
# - Indexed text properties are limited to 500 characters.
class MyModel(model.Model):
ublob = model.BlobProperty() # Defaults to indexed=False.
iblob = model.BlobProperty(indexed=True)
utext = model.TextProperty() # Defaults to indexed=False.
itext = model.TextProperty(indexed=True)
ustr = model.StringProperty(indexed=False)
istr = model.StringProperty() # Defaults to indexed=True.
ugen = model.GenericProperty(indexed=False)
igen = model.GenericProperty(indexed=True)
largeblob = 'x'*500
toolargeblob = 'x'*501
hugeblob = 'x'*10000
largetext = u'\u1234'*500
toolargetext = u'\u1234'*500 + 'x'
hugetext = u'\u1234'*10000
ent = MyModel()
# These should all fail:
self.assertRaises(datastore_errors.BadValueError,
setattr, ent, 'iblob', toolargeblob)
self.assertRaises(datastore_errors.BadValueError,
setattr, ent, 'itext', toolargetext)
self.assertRaises(datastore_errors.BadValueError,
setattr, ent, 'itext', toolargeblob)
self.assertRaises(datastore_errors.BadValueError,
setattr, ent, 'istr', toolargetext)
self.assertRaises(datastore_errors.BadValueError,
setattr, ent, 'istr', toolargeblob)
self.assertRaises(datastore_errors.BadValueError,
setattr, ent, 'igen', toolargetext)
self.assertRaises(datastore_errors.BadValueError,
setattr, ent, 'igen', toolargeblob)
# These should all work:
ent.ublob = hugeblob
ent.iblob = largeblob
ent.utext = hugetext
ent.itext = largetext
ent.ustr = hugetext
ent.istr = largetext
ent.ugen = hugetext
ent.igen = largetext
# Writing the entity should work:
key = ent.put()
# Reading it back should work:
ent2 = key.get()
self.assertEqual(ent2, ent)
self.assertTrue(ent2 is not ent)
def testValidation(self):
class All(model.Model):
s = model.StringProperty()
i = model.IntegerProperty()
f = model.FloatProperty()
t = model.TextProperty()
b = model.BlobProperty()
k = model.KeyProperty()
BVE = datastore_errors.BadValueError
a = All()
a.s = None
a.s = 'abc'
a.s = u'def'
a.s = u'\xff'
a.s = u'\u1234'
a.s = u'\U00012345'
self.assertRaises(BVE, setattr, a, 's', 0)
self.assertRaises(BVE, setattr, a, 's', '\xff')
a.i = None
a.i = 42
a.i = 123L
self.assertRaises(BVE, setattr, a, 'i', '')
a.f = None
a.f = 42
a.f = 3.14
self.assertRaises(BVE, setattr, a, 'f', '')
a.t = None
a.t = 'abc'
a.t = u'def'
a.t = u'\xff'
a.t = u'\u1234'
a.t = u'\U00012345'
self.assertRaises(BVE, setattr, a, 't', 0)
self.assertRaises(BVE, setattr, a, 't', '\xff')
a.b = None
a.b = 'abc'
a.b = '\xff'
self.assertRaises(BVE, setattr, a, 'b', u'')
self.assertRaises(BVE, setattr, a, 'b', u'\u1234')
a.k = None
a.k = model.Key('Foo', 42)
self.assertRaises(BVE, setattr, a, 'k', '')
def testLocalStructuredProperty(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.LocalStructuredProperty(Address)
p = Person()
p.name = 'Google'
a = Address(street='1600 Amphitheatre')
p.address = a
p.address.city = 'Mountain View'
self.assertEqual(p.address.key, None)
self.assertEqual(Person.name._get_value(p), 'Google')
self.assertEqual(p.name, 'Google')
self.assertEqual(Person.address._get_value(p), a)
self.assertEqual(Address.street._get_value(a), '1600 Amphitheatre')
self.assertEqual(Address.city._get_value(a), 'Mountain View')
pb = p._to_pb()
# TODO: Validate pb
# Check we can enable and disable compression and have old data still
# be understood.
Person.address._compressed = True
p = Person._from_pb(pb)
self.assertEqual(p.address.key, None)
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address.street, '1600 Amphitheatre')
self.assertEqual(p.address.city, 'Mountain View')
self.assertEqual(p.address, a)
self.assertEqual(repr(Person.address),
"LocalStructuredProperty(Address, 'address', "
"compressed=True)")
pb = p._to_pb()
Person.address._compressed = False
p = Person._from_pb(pb)
self.assertEqual(p.address.key, None)
# Now try with an empty address
p = Person()
p.name = 'Google'
self.assertTrue(p.address is None)
pb = p._to_pb()
p = Person._from_pb(pb)
self.assertTrue(p.address is None)
self.assertEqual(p.name, 'Google')
def testLocalStructuredPropertyCompressed(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.LocalStructuredProperty(Address, compressed=True)
k = model.Key('Person', 'google')
p = Person(key=k)
p.name = 'Google'
p.address = Address(street='1600 Amphitheatre', city='Mountain View')
p.put()
# To test compression and deserialization with untouched properties.
p = k.get()
p.put()
p = k.get()
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address.street, '1600 Amphitheatre')
self.assertEqual(p.address.city, 'Mountain View')
# To test compression and deserialization after properties were accessed.
p.put()
def testLocalStructuredPropertyRepeated(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.LocalStructuredProperty(Address, repeated=True)
k = model.Key('Person', 'google')
p = Person(key=k)
p.name = 'Google'
p.address.append(Address(street='1600 Amphitheatre', city='Mountain View'))
p.address.append(Address(street='Webb crater', city='Moon'))
p.put()
# To test compression and deserialization with untouched properties.
p = k.get()
p.put()
p = k.get()
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address[0].street, '1600 Amphitheatre')
self.assertEqual(p.address[0].city, 'Mountain View')
self.assertEqual(p.address[1].street, 'Webb crater')
self.assertEqual(p.address[1].city, 'Moon')
# To test compression and deserialization after properties were accessed.
p.put()
def testLocalStructuredPropertyRepeatedCompressed(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.LocalStructuredProperty(Address, repeated=True,
compressed=True)
k = model.Key('Person', 'google')
p = Person(key=k)
p.name = 'Google'
p.address.append(Address(street='1600 Amphitheatre', city='Mountain View'))
p.address.append(Address(street='Webb crater', city='Moon'))
p.put()
# To test compression and deserialization with untouched properties.
p = k.get()
p.put()
p = k.get()
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address[0].street, '1600 Amphitheatre')
self.assertEqual(p.address[0].city, 'Mountain View')
self.assertEqual(p.address[1].street, 'Webb crater')
self.assertEqual(p.address[1].city, 'Moon')
# To test compression and deserialization after properties were accessed.
p.put()
def testLocalStructuredPropertyRepeatedRepeated(self):
class Inner(model.Model):
a = model.IntegerProperty(repeated=True)
self.assertTrue(Inner._has_repeated)
class Outer(model.Model):
b = model.LocalStructuredProperty(Inner, repeated=True)
self.assertTrue(Inner._has_repeated)
x = Outer(b=[Inner(a=[1, 2]), Inner(a=[3, 4, 5])])
k = x.put()
y = k.get()
self.assertTrue(x is not y)
self.assertEqual(x, y)
def testEmptyList(self):
class Person(model.Model):
name = model.StringProperty(repeated=True)
p = Person()
self.assertEqual(p.name, [])
pb = p._to_pb()
q = Person._from_pb(pb)
self.assertEqual(q.name, [], str(pb))
def testEmptyListSerialized(self):
class Person(model.Model):
name = model.StringProperty(repeated=True)
p = Person()
pb = p._to_pb()
q = Person._from_pb(pb)
self.assertEqual(q.name, [], str(pb))
def testDatetimeSerializing(self):
class Person(model.Model):
t = model.GenericProperty()
p = Person(t=datetime.datetime.utcnow())
pb = p._to_pb()
q = Person._from_pb(pb)
self.assertEqual(p.t, q.t)
def testExpandoKey(self):
class Ex(model.Expando):
pass
e = Ex()
self.assertEqual(e.key, None)
k = model.Key('Ex', 'abc')
e.key = k
self.assertEqual(e.key, k)
k2 = model.Key('Ex', 'def')
e2 = Ex(key=k2)
self.assertEqual(e2.key, k2)
e2.key = k
self.assertEqual(e2.key, k)
self.assertEqual(e, e2)
del e.key
self.assertEqual(e.key, None)
def testExpandoRead(self):
class Person(model.Model):
name = model.StringProperty()
city = model.StringProperty()
p = Person(name='Guido', city='SF')
pb = p._to_pb()
q = model.Expando._from_pb(pb)
self.assertEqual(q.name, 'Guido')
self.assertEqual(q.city, 'SF')
def testExpandoWrite(self):
k = model.Key(flat=['Model', 42])
p = model.Expando(key=k)
p.k = k
p.p = 42
p.q = 'hello'
p.u = TESTUSER
p.d = 2.5
p.b = True
p.xy = AMSTERDAM
pb = p._to_pb()
self.assertEqual(str(pb), GOLDEN_PB)
def testExpandoDelAttr(self):
class Ex(model.Expando):
static = model.StringProperty()
e = Ex()
self.assertEqual(e.static, None)
self.assertRaises(AttributeError, getattr, e, 'dynamic')
self.assertRaises(AttributeError, getattr, e, '_absent')
e.static = 'a'
e.dynamic = 'b'
self.assertEqual(e.static, 'a')
self.assertEqual(e.dynamic, 'b')
e = Ex(static='a', dynamic='b')
self.assertEqual(e.static, 'a')
self.assertEqual(e.dynamic, 'b')
del e.static
del e.dynamic
self.assertEqual(e.static, None)
self.assertRaises(AttributeError, getattr, e, 'dynamic')
def testExpandoRepr(self):
class Person(model.Expando):
name = model.StringProperty('Name')
city = model.StringProperty('City')
p = Person(name='Guido', zip='00000')
p.city = 'SF'
self.assertEqual(repr(p),
"Person(city='SF', name='Guido', zip='00000')")
# White box confirmation.
self.assertEqual(p._values,
{'City': 'SF', 'Name': 'Guido', 'zip': '00000'})
def testExpandoNested(self):
p = model.Expando()
nest = model.Expando()
nest.foo = 42
nest.bar = 'hello'
p.nest = nest
self.assertEqual(p.nest.foo, 42)
self.assertEqual(p.nest.bar, 'hello')
pb = p._to_pb()
q = model.Expando._from_pb(pb)
self.assertEqual(q.nest.foo, 42)
self.assertEqual(q.nest.bar, 'hello')
def testExpandoSubclass(self):
class Person(model.Expando):
name = model.StringProperty()
p = Person()
p.name = 'Joe'
p.age = 7
self.assertEqual(p.name, 'Joe')
self.assertEqual(p.age, 7)
def testExpandoConstructor(self):
p = model.Expando(foo=42, bar='hello')
self.assertEqual(p.foo, 42)
self.assertEqual(p.bar, 'hello')
pb = p._to_pb()
q = model.Expando._from_pb(pb)
self.assertEqual(q.foo, 42)
self.assertEqual(q.bar, 'hello')
def testExpandoNestedConstructor(self):
p = model.Expando(foo=42, bar=model.Expando(hello='hello'))
self.assertEqual(p.foo, 42)
self.assertEqual(p.bar.hello, 'hello')
pb = p._to_pb()
q = model.Expando._from_pb(pb)
self.assertEqual(q.foo, 42)
self.assertEqual(q.bar.hello, 'hello')
def testExpandoRepeatedProperties(self):
p = model.Expando(foo=1, bar=[1, 2])
p.baz = [3]
self.assertFalse(p._properties['foo']._repeated)
self.assertTrue(p._properties['bar']._repeated)
self.assertTrue(p._properties['baz']._repeated)
p.bar = 'abc'
self.assertFalse(p._properties['bar']._repeated)
pb = p._to_pb()
q = model.Expando._from_pb(pb)
q.key = None
self.assertFalse(p._properties['foo']._repeated)
self.assertFalse(p._properties['bar']._repeated)
self.assertTrue(p._properties['baz']._repeated)
self.assertEqual(q, model.Expando(foo=1, bar='abc', baz=[3]))
def testExpandoUnindexedProperties(self):
class Mine(model.Expando):
pass
a = Mine(foo=1, bar=['a', 'b'])
self.assertTrue(a._properties['foo']._indexed)
self.assertTrue(a._properties['bar']._indexed)
a._default_indexed = False
a.baz = 'baz'
self.assertFalse(a._properties['baz']._indexed)
Mine._default_indexed = False
b = Mine(foo=1)
b.bar = ['a', 'b']
self.assertFalse(b._properties['foo']._indexed)
self.assertFalse(b._properties['bar']._indexed)
def testGenericPropertyCompressedRefusesIndexed(self):
self.assertRaises(NotImplementedError,
model.GenericProperty, compressed=True, indexed=True)
def testGenericPropertyCompressed(self):
class Goo(model.Model):
comp = model.GenericProperty(compressed=True)
comps = model.GenericProperty(compressed=True, repeated=True)
self.assertFalse(Goo.comp._indexed)
self.assertFalse(Goo.comps._indexed)
a = Goo(comp='fizzy', comps=['x'*1000, 'y'*1000])
a.put()
self.assertTrue(isinstance(a._values['comp'].b_val,
model._CompressedValue))
self.assertTrue(isinstance(a._values['comps'][0].b_val,
model._CompressedValue))
self.assertTrue(isinstance(a._values['comps'][1].b_val,
model._CompressedValue))
b = a.key.get()
self.assertEqual(a, b)
self.assertTrue(a is not b)
# Extra-double-check.
self.assertEqual(b.comp, 'fizzy')
self.assertEqual(b.comps, ['x'*1000, 'y'*1000])
# Now try some non-string values.
x = Goo(comp=42, comps=[u'\u1234'*1000, datetime.datetime(2012, 2, 23)])
x.put()
self.assertFalse(isinstance(x._values['comp'].b_val,
model._CompressedValue))
self.assertFalse(isinstance(x._values['comps'][0].b_val,
model._CompressedValue))
self.assertFalse(isinstance(x._values['comps'][1].b_val,
model._CompressedValue))
y = x.key.get()
self.assertEqual(x, y)
def testExpandoReadsCompressed(self):
class Goo(model.Model):
comp = model.BlobProperty(compressed=True)
x = Goo(comp='foo')
x.put()
class Goo(model.Expando):
pass
y = x.key.get()
self.assertTrue(y._properties['comp']._compressed)
self.assertEqual(y.comp, 'foo')
def testComputedProperty(self):
class ComputedTest(model.Model):
name = model.StringProperty()
name_lower = model.ComputedProperty(lambda self: self.name.lower())
@model.ComputedProperty
def length(self):
return len(self.name)
def _compute_hash(self):
return hash(self.name)
computed_hash = model.ComputedProperty(_compute_hash, name='hashcode')
m = ComputedTest(name='Foobar')
m._prepare_for_put()
pb = m._to_pb()
for p in pb.property_list():
if p.name() == 'name_lower':
self.assertEqual(p.value().stringvalue(), 'foobar')
break
else:
self.assert_(False, "name_lower not found in PB")
m = ComputedTest._from_pb(pb)
self.assertEqual(m.name, 'Foobar')
self.assertEqual(m.name_lower, 'foobar')
self.assertEqual(m.length, 6)
self.assertEqual(m.computed_hash, hash('Foobar'))
func = lambda unused_ent: None
self.assertRaises(TypeError, model.ComputedProperty, func,
choices=('foo', 'bar'))
self.assertRaises(TypeError, model.ComputedProperty, func, default='foo')
self.assertRaises(TypeError, model.ComputedProperty, func, required=True)
self.assertRaises(TypeError, model.ComputedProperty, func, validator=func)
def testComputedPropertyRepeated(self):
class StopWatch(model.Model):
start = model.IntegerProperty()
end = model.IntegerProperty()
cp = model.ComputedProperty(lambda self: range(self.start, self.end),
repeated=True)
e = StopWatch(start=1, end=10)
self.assertEqual(e.cp, [1, 2, 3, 4, 5, 6, 7, 8, 9])
k = e.put()
self.assertEqual(k.get().cp, [1, 2, 3, 4, 5, 6, 7, 8, 9])
# Check that the computed property works when retrieved without cache
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
ctx.set_memcache_policy(False)
self.assertEqual(k.get().cp, [1, 2, 3, 4, 5, 6, 7, 8, 9])
def testComputedPropertyInRepeatedStructuredProperty(self):
class Inner(model.Model):
arg = model.IntegerProperty()
comp1 = model.ComputedProperty(lambda ent: 1)
comp2 = model.ComputedProperty(lambda ent: 2)
class Outer(model.Model):
wrap = model.StructuredProperty(Inner, repeated=True)
orig = Outer(wrap=[Inner(arg=1), Inner(arg=2)])
key = orig.put()
copy = Outer.query().get()
self.assertEqual(copy, orig)
def testLargeValues(self):
class Demo(model.Model):
bytes = model.BlobProperty()
text = model.TextProperty()
x = Demo(bytes='x'*1000, text=u'a'*1000)
key = x.put()
y = key.get()
self.assertEqual(x, y)
self.assertTrue(isinstance(y.bytes, str))
self.assertTrue(isinstance(y.text, unicode))
def testMultipleStructuredPropertyDatastore(self):
class Address(model.Model):
label = model.StringProperty()
text = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address, repeated=True)
m = Person(name='Google',
address=[Address(label='work', text='San Francisco'),
Address(label='home', text='Mountain View')])
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address[0].label, 'work')
self.assertEqual(m.address[0].text, 'San Francisco')
self.assertEqual(m.address[1].label, 'home')
self.assertEqual(m.address[1].text, 'Mountain View')
[k] = self.conn.put([m])
m.key = k # Connection.put() doesn't do this!
[m2] = self.conn.get([k])
self.assertEqual(m2, m)
def testIdAndParentPut(self):
# id
m = model.Model(id='bar')
self.assertEqual(m.put(), model.Key('Model', 'bar'))
# id + parent
p = model.Key('ParentModel', 'foo')
m = model.Model(id='bar', parent=p)
self.assertEqual(m.put(), model.Key('ParentModel', 'foo', 'Model', 'bar'))
# parent without id
p = model.Key('ParentModel', 'foo')
m = model.Model(parent=p)
m.put()
self.assertTrue(m.key.id())
def testAllocateIds(self):
class MyModel(model.Model):
pass
res = MyModel.allocate_ids(size=100)
self.assertEqual(res, (1, 100))
# with parent
key = model.Key(flat=(MyModel._get_kind(), 1))
res = MyModel.allocate_ids(size=200, parent=key)
self.assertEqual(res, (101, 300))
def testGetOrInsert(self):
class MyModel(model.Model):
text = model.StringProperty()
key = model.Key(flat=(MyModel._get_kind(), 'baz'))
self.assertEqual(key.get(), None)
MyModel.get_or_insert('baz', text='baz')
self.assertNotEqual(key.get(), None)
self.assertEqual(key.get().text, 'baz')
def testGetOrInsertAsync(self):
class Mod(model.Model):
data = model.StringProperty()
@tasklets.tasklet
def foo():
ent = yield Mod.get_or_insert_async('a', data='hello')
self.assertTrue(isinstance(ent, Mod))
ent2 = yield Mod.get_or_insert_async('a', data='hello')
self.assertEqual(ent2, ent)
foo().check_success()
def testGetOrInsertAsyncWithParent(self):
class Mod(model.Model):
data = model.StringProperty()
@tasklets.tasklet
def foo():
parent = model.Key(flat=('Foo', 1))
ent = yield Mod.get_or_insert_async('a', _parent=parent, data='hello')
self.assertTrue(isinstance(ent, Mod))
ent2 = yield Mod.get_or_insert_async('a', parent=parent, data='hello')
self.assertEqual(ent2, ent)
foo().check_success()
def testGetOrInsertAsyncInTransaction(self):
class Mod(model.Model):
data = model.StringProperty()
def txn():
ent = Mod.get_or_insert('a', data='hola')
self.assertTrue(isinstance(ent, Mod))
ent2 = Mod.get_or_insert('a', data='hola2')
self.assertEqual(ent2, ent)
self.assertTrue(ent2 is ent)
raise model.Rollback()
# First with caching turned off. (This works because the
# transactional context always starts out with caching turned on.)
model.transaction(txn)
self.assertEqual(Mod.query().get(), None)
# And again with caching turned on.
ctx = tasklets.get_context()
ctx.set_cache_policy(None) # Restore default cache policy.
model.transaction(txn)
self.assertEqual(Mod.query().get(), None)
def testGetOrInsertAsyncInTransactionUncacheableModel(self):
class Mod(model.Model):
_use_cache = False
data = model.StringProperty()
def txn():
ent = Mod.get_or_insert('a', data='hola')
self.assertTrue(isinstance(ent, Mod))
ent2 = Mod.get_or_insert('a', data='hola2')
self.assertEqual(ent2.data, 'hola2')
raise model.Rollback()
# First with caching turned off.
model.transaction(txn)
self.assertEqual(Mod.query().get(), None)
# And again with caching turned on.
ctx = tasklets.get_context()
ctx.set_cache_policy(None) # Restore default cache policy.
model.transaction(txn)
self.assertEqual(Mod.query().get(), None)
def testGetById(self):
class MyModel(model.Model):
pass
kind = MyModel._get_kind()
# key id
ent1 = MyModel(key=model.Key(pairs=[(kind, 1)]))
ent1.put()
res = MyModel.get_by_id(1)
self.assertEqual(res, ent1)
# key name
ent2 = MyModel(key=model.Key(pairs=[(kind, 'foo')]))
ent2.put()
res = MyModel.get_by_id('foo')
self.assertEqual(res, ent2)
# key id + parent
ent3 = MyModel(key=model.Key(pairs=[(kind, 1), (kind, 2)]))
ent3.put()
res = MyModel.get_by_id(2, parent=model.Key(pairs=[(kind, 1)]))
self.assertEqual(res, ent3)
# key name + parent
ent4 = MyModel(key=model.Key(pairs=[(kind, 1), (kind, 'bar')]))
ent4.put()
res = MyModel.get_by_id('bar', parent=ent1.key)
self.assertEqual(res, ent4)
# None
res = MyModel.get_by_id('idontexist')
self.assertEqual(res, None)
# Invalid parent
self.assertRaises(datastore_errors.BadValueError, MyModel.get_by_id,
'bar', parent=1)
def testDelete(self):
class MyModel(model.Model):
pass
ent1 = MyModel()
key1 = ent1.put()
ent2 = key1.get()
self.assertEqual(ent1, ent2)
key1.delete()
ent3 = key1.get()
self.assertEqual(ent3, None)
def testPopulate(self):
class MyModel(model.Model):
name = model.StringProperty()
m = MyModel()
m.populate(name='abc')
self.assertEqual(m.name, 'abc')
m.populate(name='def')
self.assertEqual(m.name, 'def')
self.assertRaises(AttributeError, m.populate, foo=42)
def testPopulate_Expando(self):
class Ex(model.Expando):
name = model.StringProperty()
m = Ex()
m.populate(name='abc')
self.assertEqual(m.name, 'abc')
m.populate(foo=42)
self.assertEqual(m.foo, 42)
def testTransaction(self):
class MyModel(model.Model):
text = model.StringProperty()
key = model.Key(MyModel, 'babaz')
self.assertEqual(key.get(), None)
def callback():
# Emulate get_or_insert()
a = key.get()
if a is None:
a = MyModel(text='baz', key=key)
a.put()
return a
b = model.transaction(callback)
self.assertNotEqual(b, None)
self.assertEqual(b.text, 'baz')
self.assertEqual(key.get(), b)
key = model.Key(MyModel, 'bababaz')
self.assertEqual(key.get(), None)
c = model.transaction(callback, retries=0)
self.assertNotEqual(c, None)
self.assertEqual(c.text, 'baz')
self.assertEqual(key.get(), c)
def testNoNestedTransactions(self):
self.ExpectWarnings()
class MyModel(model.Model):
text = model.StringProperty()
key = model.Key(MyModel, 'schtroumpf')
self.assertEqual(key.get(), None)
def inner():
self.fail('Should not get here')
def outer():
model.transaction(inner)
self.assertRaises(datastore_errors.BadRequestError,
model.transaction, outer)
def testGetMultiAsync(self):
model.Model._kind_map['Model'] = model.Model
ent1 = model.Model(key=model.Key('Model', 1))
ent2 = model.Model(key=model.Key('Model', 2))
ent3 = model.Model(key=model.Key('Model', 3))
key1 = ent1.put()
key2 = ent2.put()
key3 = ent3.put()
@tasklets.tasklet
def foo():
ents = yield model.get_multi_async([key1, key2, key3])
raise tasklets.Return(ents)
res = foo().get_result()
self.assertEqual(res, [ent1, ent2, ent3])
def testGetMulti(self):
model.Model._kind_map['Model'] = model.Model
ent1 = model.Model(key=model.Key('Model', 1))
ent2 = model.Model(key=model.Key('Model', 2))
ent3 = model.Model(key=model.Key('Model', 3))
key1 = ent1.put()
key2 = ent2.put()
key3 = ent3.put()
res = model.get_multi((key1, key2, key3))
self.assertEqual(res, [ent1, ent2, ent3])
def testPutMultiAsync(self):
ent1 = model.Model(key=model.Key('Model', 1))
ent2 = model.Model(key=model.Key('Model', 2))
ent3 = model.Model(key=model.Key('Model', 3))
@tasklets.tasklet
def foo():
ents = yield model.put_multi_async([ent1, ent2, ent3])
raise tasklets.Return(ents)
res = foo().get_result()
self.assertEqual(res, [ent1.key, ent2.key, ent3.key])
def testPutMulti(self):
ent1 = model.Model(key=model.Key('Model', 1))
ent2 = model.Model(key=model.Key('Model', 2))
ent3 = model.Model(key=model.Key('Model', 3))
res = model.put_multi((ent1, ent2, ent3))
self.assertEqual(res, [ent1.key, ent2.key, ent3.key])
def testDeleteMultiAsync(self):
model.Model._kind_map['Model'] = model.Model
ent1 = model.Model(key=model.Key('Model', 1))
ent2 = model.Model(key=model.Key('Model', 2))
ent3 = model.Model(key=model.Key('Model', 3))
key1 = ent1.put()
key2 = ent2.put()
key3 = ent3.put()
self.assertEqual(key1.get(), ent1)
self.assertEqual(key2.get(), ent2)
self.assertEqual(key3.get(), ent3)
@tasklets.tasklet
def foo():
ents = yield model.delete_multi_async([key1, key2, key3])
raise tasklets.Return(ents)
foo().get_result()
self.assertEqual(key1.get(), None)
self.assertEqual(key2.get(), None)
self.assertEqual(key3.get(), None)
def testDeleteMulti(self):
model.Model._kind_map['Model'] = model.Model
ent1 = model.Model(key=model.Key('Model', 1))
ent2 = model.Model(key=model.Key('Model', 2))
ent3 = model.Model(key=model.Key('Model', 3))
key1 = ent1.put()
key2 = ent2.put()
key3 = ent3.put()
self.assertEqual(key1.get(), ent1)
self.assertEqual(key2.get(), ent2)
self.assertEqual(key3.get(), ent3)
model.delete_multi((key1, key2, key3))
self.assertEqual(key1.get(), None)
self.assertEqual(key2.get(), None)
self.assertEqual(key3.get(), None)
def testContextOptions(self):
ctx = tasklets.get_context()
ctx.set_cache_policy(True)
ctx.set_memcache_policy(True)
ctx.set_memcache_timeout_policy(0)
# Create an entity and put it in the caches.
class MyModel(model.Model):
name = model.StringProperty()
key = model.Key(MyModel, 'yo')
ent = MyModel(key=key, name='yo')
ent.put(use_memcache=False) # Don't lock memcache.
key.get(use_cache=False) # Write to memcache.
eventloop.run() # Wait for async memcache request to complete.
# Verify that it is in both caches.
self.assertTrue(ctx._cache[key] is ent)
self.assertEqual(memcache.get(ctx._memcache_prefix + key.urlsafe()),
ent._to_pb(set_key=False).SerializePartialToString())
# Get it bypassing the in-process cache.
ent_copy = key.get(use_cache=False)
self.assertEqual(ent_copy, ent)
self.assertFalse(ent_copy is ent)
# Put it bypassing both caches.
ent_copy.name = 'yoyo'
ent_copy.put(use_cache=False, use_memcache=False)
# Get it from the in-process cache.
ent2 = key.get()
self.assertTrue(ent2 is ent)
self.assertEqual(ent2.name, 'yo')
self.assertEqual(ent_copy.name, 'yoyo') # Should not have changed.
# Get it from memcache.
ent3 = key.get(use_cache=False)
self.assertFalse(ent3 is ent)
self.assertFalse(ent3 is ent2)
self.assertEqual(ent3.name, 'yo')
self.assertEqual(ent_copy.name, 'yoyo') # Should not have changed.
# Get it from the datastore.
ent4 = key.get(use_cache=False, use_memcache=False)
self.assertFalse(ent4 is ent)
self.assertFalse(ent4 is ent2)
self.assertFalse(ent4 is ent3)
self.assertFalse(ent4 is ent_copy)
self.assertEqual(ent4.name, 'yoyo')
# Delete it from the datastore but leave it in the caches.
key.delete(use_cache=False, use_memcache=False)
# Assure it is gone from the datastore.
[ent5] = model.get_multi([key],
use_cache=False, use_memcache=False)
self.assertEqual(ent5, None)
# Assure it is still in memcache.
ent6 = key.get(use_cache=False)
self.assertEqual(ent6.name, 'yo')
self.assertEqual(memcache.get(ctx._memcache_prefix + key.urlsafe()),
ent._to_pb(set_key=False).SerializePartialToString())
# Assure it is still in the in-memory cache.
ent7 = key.get()
self.assertEqual(ent7.name, 'yo')
self.assertTrue(ctx._cache[key] is ent7)
# Delete it from memcache.
model.delete_multi([key], use_cache=False)
# Assure it is gone from memcache.
ent8 = key.get(use_cache=False)
self.assertEqual(ent8, None)
# Assure it is still in the in-memory cache.
ent9 = key.get()
self.assertEqual(ent9.name, 'yo')
self.assertTrue(ctx._cache[key] is ent9)
# Delete it from the in-memory cache.
key.delete()
# Assure it is gone.
ent10 = key.get()
self.assertEqual(ent10, None)
def testContextOptions_Timeouts(self):
# Tweak the context.
ctx = tasklets.get_context()
ctx.set_cache_policy(True)
ctx.set_memcache_policy(True)
ctx.set_memcache_timeout_policy(0)
# Mock memcache.cas_multi_async().
save_memcache_cas_multi_async = ctx._memcache.cas_multi_async
memcache_args_log = []
def mock_memcache_cas_multi_async(*args, **kwds):
memcache_args_log.append((args, kwds))
return save_memcache_cas_multi_async(*args, **kwds)
# Mock conn.async_put().
save_conn_async_put = ctx._conn.async_put
conn_args_log = []
def mock_conn_async_put(*args, **kwds):
conn_args_log.append((args, kwds))
return save_conn_async_put(*args, **kwds)
# Create some entities.
class MyModel(model.Model):
name = model.StringProperty()
e1 = MyModel(name='1')
e2 = MyModel(name='2')
e3 = MyModel(name='3')
e4 = MyModel(name='4')
e5 = MyModel(name='5')
# Test that the timeouts make it through to memcache and the datastore.
try:
ctx._memcache.cas_multi_async = mock_memcache_cas_multi_async
ctx._conn.async_put = mock_conn_async_put
[f1, f3] = model.put_multi_async([e1, e3],
memcache_timeout=7,
deadline=3)
[f4] = model.put_multi_async([e4],
deadline=2)
[x2, x5] = model.put_multi([e2, e5],
memcache_timeout=5)
x4 = f4.get_result()
x1 = f1.get_result()
x3 = f3.get_result()
# Write to memcache.
model.get_multi([x1, x3], use_cache=False, memcache_timeout=7)
model.get_multi([x4], use_cache=False)
model.get_multi([x2, x5], use_cache=False, memcache_timeout=5)
eventloop.run() # Wait for async memcache request to complete.
# (And there are straggler events too, but they don't matter here.)
finally:
ctx._memcache.cas_multi_async = save_memcache_cas_multi_async
ctx._conn.async_put = save_conn_async_put
self.assertEqual([e1.key, e2.key, e3.key, e4.key, e5.key],
[x1, x2, x3, x4, x5])
self.assertEqual(len(memcache_args_log), 3, memcache_args_log)
timeouts = set(kwds['time'] for _, kwds in memcache_args_log)
self.assertEqual(timeouts, set([0, 5, 7]))
self.assertEqual(len(conn_args_log), 3)
deadlines = set(args[0]._values.get('deadline')
for (args, kwds) in conn_args_log)
self.assertEqual(deadlines, set([None, 2, 3]))
def testContextOptions_ThreeLevels(self):
# Reset policies to default.
ctx = tasklets.get_context()
ctx.set_cache_policy(None)
ctx.set_memcache_policy(None)
ctx.set_memcache_timeout_policy(None)
class M(model.Model):
s = model.StringProperty()
k = model.Key(M, '1')
a = M(s='a', key=k)
b = M(s='b', key=k)
c = M(s='c', key=k)
a.put(use_cache=True, use_memcache=False, use_datastore=False)
b.put(use_cache=False, use_memcache=True, use_datastore=False)
c.put(use_cache=False, use_memcache=False, use_datastore=True)
self.assertEqual(ctx._cache[k], a)
self.assertEqual(memcache.get(ctx._memcache_prefix + k.urlsafe()),
b._to_pb(set_key=False).SerializePartialToString())
self.assertEqual(ctx._conn.get([k]), [c])
self.assertEqual(k.get(), a)
self.assertEqual(k.get(use_cache=False), b)
self.assertEqual(k.get(use_cache=False, use_memcache=False), c)
k.delete(use_cache=True, use_memcache=False, use_datastore=False)
# Note: it is now in the Context cache marked as deleted.
self.assertEqual(k.get(use_cache=False), b)
k.delete(use_cache=False, use_memcache=True, use_datastore=False)
self.assertEqual(k.get(use_cache=False), c)
k.delete(use_cache=False, use_memcache=False, use_datastore=True)
self.assertEqual(k.get(use_cache=False), None)
def testContextOptions_PerClass(self):
# Reset policies to default.
ctx = tasklets.get_context()
ctx.set_cache_policy(None)
ctx.set_memcache_policy(None)
ctx.set_memcache_timeout_policy(None)
class M(model.Model):
s = model.StringProperty()
_use_cache = False
@classmethod
def _use_memcache(cls, key):
return bool(key.string_id())
@classmethod
def _use_datastore(cls, key):
return not bool(key.string_id())
a = M(s='a', key=model.Key(M, 'a')) # Uses memcache only
b = M(s='b', key=model.Key(M, None)) # Uses datastore only
a.put()
b.put()
self.assertFalse(a.key in ctx._cache)
self.assertFalse(b.key in ctx._cache)
self.assertEqual(memcache.get(ctx._memcache_prefix + a.key.urlsafe()),
a._to_pb(set_key=False).SerializePartialToString())
self.assertEqual(memcache.get(ctx._memcache_prefix + b.key.urlsafe()), None)
self.assertEqual(ctx._conn.get([a.key]), [None])
self.assertEqual(ctx._conn.get([b.key]), [b])
def testNamespaces(self):
save_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('ns1')
k1 = model.Key('A', 1)
self.assertEqual(k1.namespace(), 'ns1')
k2 = model.Key('B', 2, namespace='ns2')
self.assertEqual(k2.namespace(), 'ns2')
namespace_manager.set_namespace('ns3')
self.assertEqual(k1.namespace(), 'ns1')
k3 = model.Key('C', 3, parent=k1)
self.assertEqual(k3.namespace(), 'ns1')
# Test that namespaces survive serialization
namespace_manager.set_namespace('ns2')
km = model.Key('M', 1, namespace='ns4')
class M(model.Model):
keys = model.KeyProperty(repeated=True)
m1 = M(keys=[k1, k2, k3], key=km)
pb = m1._to_pb()
namespace_manager.set_namespace('ns3')
m2 = M._from_pb(pb)
self.assertEqual(m1, m2)
self.assertEqual(m2.keys[0].namespace(), 'ns1')
self.assertEqual(m2.keys[1].namespace(), 'ns2')
self.assertEqual(m2.keys[2].namespace(), 'ns1')
# Now test the same thing for Expando
namespace_manager.set_namespace('ns2')
ke = model.Key('E', 1)
class E(model.Expando):
pass
e1 = E(keys=[k1, k2, k3], key=ke)
pb = e1._to_pb()
namespace_manager.set_namespace('ns3')
e2 = E._from_pb(pb)
self.assertEqual(e1, e2)
# Test that an absent namespace always means the empty namespace
namespace_manager.set_namespace('')
k3 = model.Key('E', 2)
e3 = E(key=k3, k=k3)
pb = e3._to_pb()
namespace_manager.set_namespace('ns4')
e4 = E._from_pb(pb)
self.assertEqual(e4.key.namespace(), '')
self.assertEqual(e4.k.namespace(), '')
finally:
namespace_manager.set_namespace(save_namespace)
def testOverrideModelKey(self):
class MyModel(model.Model):
# key, overridden
key = model.StringProperty()
# aha, here it is!
real_key = model.ModelKey()
class MyExpando(model.Expando):
# key, overridden
key = model.StringProperty()
# aha, here it is!
real_key = model.ModelKey()
m = MyModel()
k = model.Key('MyModel', 'foo')
m.key = 'bar'
m.real_key = k
m.put()
res = k.get()
self.assertEqual(res, m)
self.assertEqual(res.key, 'bar')
self.assertEqual(res.real_key, k)
q = MyModel.query(MyModel.real_key == k)
res = q.get()
self.assertEqual(res, m)
self.assertEqual(res.key, 'bar')
self.assertEqual(res.real_key, k)
m = MyExpando()
k = model.Key('MyExpando', 'foo')
m.key = 'bar'
m.real_key = k
m.put()
res = k.get()
self.assertEqual(res, m)
self.assertEqual(res.key, 'bar')
self.assertEqual(res.real_key, k)
q = MyExpando.query(MyModel.real_key == k)
res = q.get()
self.assertEqual(res, m)
self.assertEqual(res.key, 'bar')
self.assertEqual(res.real_key, k)
def testTransactionalDecorator(self):
# This tests @model.transactional and model.in_transaction(), and
# indirectly context.Context.in_transaction().
logs = []
@model.transactional
def foo(a, b):
self.assertTrue(model.in_transaction())
logs.append(tasklets.get_context()._conn) # White box
return a + b
@model.transactional
def bar(a):
self.assertTrue(model.in_transaction())
logs.append(tasklets.get_context()._conn) # White box
return foo(a, 42)
before = tasklets.get_context()._conn
self.assertFalse(model.in_transaction())
x = bar(100)
self.assertFalse(model.in_transaction())
after = tasklets.get_context()._conn
self.assertEqual(before, after)
self.assertEqual(x, 142)
self.assertEqual(len(logs), 2)
self.assertEqual(logs[0], logs[1])
self.assertNotEqual(before, logs[0])
def testTransactionalDecoratorExtensions(self):
# Test that @transactional(flag=value, ...) works too.
@model.transactional()
def callback1(log):
self.assertTrue(model.in_transaction())
ctx = tasklets.get_context()
orig_async_commit = ctx._conn.async_commit
def wrap_async_commit(options):
log.append(options)
return orig_async_commit(options)
ctx._conn.async_commit = wrap_async_commit
log = []
callback1(log)
self.assertEqual(log, [None])
@model.transactional(retries=42)
def callback2(log):
self.assertTrue(model.in_transaction())
ctx = tasklets.get_context()
orig_async_commit = ctx._conn.async_commit
def wrap_async_commit(options):
log.append(options)
return orig_async_commit(options)
ctx._conn.async_commit = wrap_async_commit
log = []
callback2(log)
self.assertEqual(len(log), 1)
self.assertEqual(log[0].retries, 42)
def testPropertyFilters(self):
class M(model.Model):
dt = model.DateTimeProperty()
d = model.DateProperty()
t = model.TimeProperty()
f = model.FloatProperty()
s = model.StringProperty()
k = model.KeyProperty()
b = model.BooleanProperty()
i = model.IntegerProperty()
g = model.GeoPtProperty()
@model.ComputedProperty
def c(self):
return self.i + 1
u = model.UserProperty()
values = {
'dt': datetime.datetime.now(),
'd': datetime.date.today(),
't': datetime.datetime.now().time(),
'f': 4.2,
's': 'foo',
'k': model.Key('Foo', 'bar'),
'b': False,
'i': 42,
'g': AMSTERDAM,
'u': TESTUSER,
}
m = M(**values)
m.put()
q = M.query(M.dt == values['dt'])
self.assertEqual(q.get(), m)
q = M.query(M.d == values['d'])
self.assertEqual(q.get(), m)
q = M.query(M.t == values['t'])
self.assertEqual(q.get(), m)
q = M.query(M.f == values['f'])
self.assertEqual(q.get(), m)
q = M.query(M.s == values['s'])
self.assertEqual(q.get(), m)
q = M.query(M.k == values['k'])
self.assertEqual(q.get(), m)
q = M.query(M.b == values['b'])
self.assertEqual(q.get(), m)
q = M.query(M.i == values['i'])
self.assertEqual(q.get(), m)
q = M.query(M.g == values['g'])
self.assertEqual(q.get(), m)
q = M.query(M.c == values['i'] + 1)
self.assertEqual(q.get(), m)
q = M.query(M.u == values['u'])
self.assertEqual(q.get(), m)
def testNonRepeatedListValue(self):
class ReprProperty(model.BlobProperty):
def _validate(self, value):
# dummy
return value
def _to_base_type(self, value):
if not isinstance(value, str):
value = value.__repr__()
return value
def _from_base_type(self, value):
if isinstance(value, str):
value = eval(value)
return value
class M(model.Model):
p1 = ReprProperty()
p2 = ReprProperty(compressed=True)
p3 = ReprProperty(repeated=True)
p4 = ReprProperty(compressed=True, repeated=True)
key1 = model.Key(M, 'test')
value = [{'foo': 'bar'}, {'baz': 'ding'}]
m1 = M(key=key1, p1=value, p2=value, p3=[value, value], p4=[value, value])
m1.put()
# To test compression and deserialization with untouched properties.
m2 = key1.get()
m2.put()
m2 = key1.get()
self.assertEqual(m2.p1, value)
self.assertEqual(m2.p2, value)
self.assertEqual(m2.p3, [value, value])
self.assertEqual(m2.p4, [value, value])
# To test compression and deserialization after properties were accessed.
m2.put()
def testCompressedProperty(self):
class M(model.Model):
t1 = model.TextProperty()
t2 = model.TextProperty(compressed=True)
t3 = model.TextProperty(repeated=True)
t4 = model.TextProperty(compressed=True, repeated=True)
t5 = model.TextProperty()
t6 = model.TextProperty(compressed=True)
t7 = model.TextProperty(repeated=True)
t8 = model.TextProperty(compressed=True, repeated=True)
b1 = model.BlobProperty()
b2 = model.BlobProperty(compressed=True)
b3 = model.BlobProperty(repeated=True)
b4 = model.BlobProperty(compressed=True, repeated=True)
key1 = model.Key(M, 'test')
value1 = 'foo bar baz ding'
value2 = u'f\xd6\xd6 b\xe4r b\xe4z d\xefng' # Umlauts on the vowels.
m1 = M(key=key1,
t1=value1, t2=value1, t3=[value1], t4=[value1],
t5=value2, t6=value2, t7=[value2], t8=[value2],
b1=value1, b2=value1, b3=[value1], b4=[value1])
m1.put()
# To test compression and deserialization with untouched properties.
m2 = key1.get()
m2.put()
m2 = key1.get()
self.assertEqual(m2.t1, value1)
self.assertEqual(m2.t2, value1)
self.assertEqual(m2.t3, [value1])
self.assertEqual(m2.t4, [value1])
self.assertEqual(m2.t5, value2)
self.assertEqual(m2.t6, value2)
self.assertEqual(m2.t7, [value2])
self.assertEqual(m2.t8, [value2])
self.assertEqual(m2.b1, value1)
self.assertEqual(m2.b2, value1)
self.assertEqual(m2.b3, [value1])
self.assertEqual(m2.b4, [value1])
# To test compression and deserialization after properties were accessed.
m2.put()
def testCompressedProperty_Repr(self):
class Foo(model.Model):
name = model.StringProperty()
class M(model.Model):
b = model.BlobProperty(compressed=True)
t = model.TextProperty(compressed=True)
l = model.LocalStructuredProperty(Foo, compressed=True)
x = M(b='b' * 100, t=u't' * 100, l=Foo(name='joe'))
x.put()
y = x.key.get()
self.assertFalse(x is y)
self.assertEqual(
repr(y),
'M(key=Key(\'M\', 1), ' +
'b=%r, ' % ('b' * 100) +
'l=%r, ' % Foo(name=u'joe') +
't=%r)' % (u't' * 100))
def testCorruption(self):
# Thanks to Ricardo Banffy
class Evil(model.Model):
x = model.IntegerProperty()
def __init__(self, *a, **k):
super(Evil, self).__init__(*a, **k)
self.x = 42
e = Evil()
e.x = 50
pb = e._to_pb()
y = Evil._from_pb(pb)
self.assertEqual(y.x, 50)
def testAllocateIdsHooksCalled(self):
self.pre_counter = 0
self.post_counter = 0
self.size = 25
self.max = None
self.parent = key.Key('Foo', 'Bar')
class HatStand(model.Model):
@classmethod
def _pre_allocate_ids_hook(cls, size, max, parent):
self.pre_counter += 1
self.assertEqual(size, self.size)
self.assertEqual(max, self.max)
self.assertEqual(parent, self.parent)
@classmethod
def _post_allocate_ids_hook(cls, size, max, parent, future):
self.post_counter += 1
self.assertEqual(size, self.size)
self.assertEqual(max, self.max)
self.assertEqual(parent, self.parent)
low, high = future.get_result()
self.assertEqual(high - low + 1, self.size)
self.assertEqual(self.pre_counter, 0, 'Pre allocate ids hook called early')
future = HatStand.allocate_ids_async(size=self.size, max=self.max,
parent=self.parent)
self.assertEqual(self.pre_counter, 1, 'Pre allocate ids hook not called')
self.assertEqual(self.post_counter, 0,
'Post allocate ids hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post allocate ids hook not called')
def testNoDefaultAllocateIdsCallback(self):
# See issue 58. http://goo.gl/hPN6j
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
class EmptyModel(model.Model):
pass
fut = EmptyModel.allocate_ids_async(1)
self.assertFalse(fut._immediate_callbacks,
'Allocate ids hook queued default no-op.')
def testPutHooksCalled(self):
test = self # Closure for inside hooks
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
def _pre_put_hook(self):
test.pre_counter += 1
def _post_put_hook(self, future):
test.post_counter += 1
test.assertEqual(future.get_result(), test.entity.key)
furniture = HatStand()
self.entity = furniture
self.assertEqual(self.pre_counter, 0, 'Pre put hook called early')
future = furniture.put_async()
self.assertEqual(self.pre_counter, 1, 'Pre put hook not called')
self.assertEqual(self.post_counter, 0, 'Post put hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post put hook not called')
# All counters now read 1, calling put_multi for 10 entities makes this 11
new_furniture = [HatStand() for _ in range(10)]
multi_future = model.put_multi_async(new_furniture)
self.assertEqual(self.pre_counter, 11,
'Pre put hooks not called on put_multi')
self.assertEqual(self.post_counter, 1,
'Post put hooks called early on put_multi')
for fut, ent in zip(multi_future, new_furniture):
self.entity = ent
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post put hooks not called on put_multi')
def testGetByIdHooksCalled(self):
# See issue 95. http://goo.gl/QSRQH
# Adapted from testGetHooksCalled in key_test.py.
test = self # Closure for inside hook
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, key):
test.pre_counter += 1
if test.pre_counter == 1: # Cannot test for key in get_multi
self.assertEqual(key, self.key)
@classmethod
def _post_get_hook(cls, key, future):
test.post_counter += 1
self.assertEqual(key, self.key)
self.assertEqual(future.get_result(), self.entity)
furniture = HatStand()
self.entity = furniture
key = furniture.put()
self.key = key
self.assertEqual(self.pre_counter, 0, 'Pre get hook called early')
future = HatStand.get_by_id_async(key.id())
self.assertEqual(self.pre_counter, 1, 'Pre get hook not called')
self.assertEqual(self.post_counter, 0, 'Post get hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post get hook not called')
# All counters now read 1, calling get for 10 keys should make this 11
new_furniture = [HatStand() for _ in range(10)]
keys = [furniture.put() for furniture in new_furniture] # Sequential keys
multi_future = [HatStand.get_by_id_async(key.id()) for key in keys]
self.assertEqual(self.pre_counter, 11,
'Pre get hooks not called on get_multi')
self.assertEqual(self.post_counter, 1,
'Post get hooks called early on get_multi')
for fut, key, entity in zip(multi_future, keys, new_furniture):
self.key = key
self.entity = entity
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post get hooks not called on get_multi')
def testGetOrInsertHooksCalled(self):
# See issue 98. http://goo.gl/7ak2i
test = self # Closure for inside hooks
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, key):
test.pre_get_counter += 1
@classmethod
def _post_get_hook(cls, key, future):
test.post_get_counter += 1
def _pre_put_hook(self):
test.pre_put_counter += 1
def _post_put_hook(self, future):
test.post_put_counter += 1
# First call creates it. This calls get() twice (once outside the
# transaction and once inside it) and put() once (from inside the
# transaction).
self.pre_get_counter = 0
self.post_get_counter = 0
self.pre_put_counter = 0
self.post_put_counter = 0
HatStand.get_or_insert('classic')
self.assertEqual(self.pre_get_counter, 2)
self.assertEqual(self.post_get_counter, 2)
self.assertEqual(self.pre_put_counter, 1)
self.assertEqual(self.post_put_counter, 1)
# Second call gets it without needing a transaction.
self.pre_get_counter = 0
self.post_get_counter = 0
self.pre_put_counter = 0
self.post_put_counter = 0
HatStand.get_or_insert_async('classic').get_result()
self.assertEqual(self.pre_get_counter, 1)
self.assertEqual(self.post_get_counter, 1)
self.assertEqual(self.pre_put_counter, 0)
self.assertEqual(self.post_put_counter, 0)
def testMonkeyPatchHooks(self):
test = self # Closure for inside put hooks
hook_attr_names = ('_pre_allocate_ids_hook', '_post_allocate_ids_hook',
'_pre_put_hook', '_post_put_hook')
original_hooks = {}
# Backup the original hooks
for name in hook_attr_names:
original_hooks[name] = getattr(model.Model, name)
self.pre_allocate_ids_flag = False
self.post_allocate_ids_flag = False
self.pre_put_flag = False
self.post_put_flag = False
# TODO: Should the unused arguments to Monkey Patched tests be tested?
class HatStand(model.Model):
@classmethod
def _pre_allocate_ids_hook(cls, unused_size, unused_max, unused_parent):
self.pre_allocate_ids_flag = True
@classmethod
def _post_allocate_ids_hook(cls, unused_size, unused_max, unused_parent,
unused_future):
self.post_allocate_ids_flag = True
def _pre_put_hook(self):
test.pre_put_flag = True
def _post_put_hook(self, unused_future):
test.post_put_flag = True
# Monkey patch the hooks
for name in hook_attr_names:
hook = getattr(HatStand, name)
setattr(model.Model, name, hook)
try:
HatStand.allocate_ids(1)
self.assertTrue(self.pre_allocate_ids_flag,
'Pre allocate ids hook not called when model is monkey patched')
self.assertTrue(self.post_allocate_ids_flag,
'Post allocate ids hook not called when model is monkey patched')
furniture = HatStand()
furniture.put()
self.assertTrue(self.pre_put_flag,
'Pre put hook not called when model is monkey patched')
self.assertTrue(self.post_put_flag,
'Post put hook not called when model is monkey patched')
finally:
# Restore the original hooks
for name in hook_attr_names:
setattr(model.Model, name, original_hooks[name])
def testPreHooksCannotCancelRPC(self):
class HatStand(model.Model):
@classmethod
def _pre_allocate_ids_hook(cls, unused_size, unused_max, unused_parent):
raise tasklets.Return()
def _pre_put_hook(self):
raise tasklets.Return()
self.assertRaises(tasklets.Return, HatStand.allocate_ids)
entity = HatStand()
self.assertRaises(tasklets.Return, entity.put)
def testNoDefaultPutCallback(self):
# See issue 58. http://goo.gl/hPN6j
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
class EmptyModel(model.Model):
pass
entity = EmptyModel()
fut = entity.put_async()
self.assertFalse(fut._immediate_callbacks, 'Put hook queued default no-op.')
def testKeyValidation(self):
# See issue 75. http://goo.gl/k0Gfv
class Foo(model.Model):
# Override the default Model method with our own.
def _validate_key(self, key):
if key.parent() is None:
raise TypeError
elif key.parent().kind() != 'Foo':
raise TypeError
elif key.id().startswith('a'):
raise ValueError
return key
# Using no arguments
self.assertRaises(TypeError, Foo().put)
# Using id/parent arguments
rogue_parent = model.Key('Bar', 1)
self.assertRaises(TypeError, Foo, parent=rogue_parent, id='b')
parent = model.Key(Foo, 1)
self.assertRaises(ValueError, Foo, parent=parent, id='a')
# Using key argument
rogue_key = model.Key(Foo, 1, Foo, 'a')
self.assertRaises(ValueError, Foo, key=rogue_key)
# Using Key assignment
entity = Foo()
self.assertRaises(ValueError, setattr, entity, 'key', rogue_key)
# None assignment (including delete) should work correctly
entity.key = None
self.assertTrue(entity.key is None)
del entity.key
self.assertTrue(entity.key is None)
# Sanity check a valid key
key = Foo(parent=parent, id='b').put()
self.assertEqual(key.parent(), parent)
self.assertEqual(key.id(), 'b')
self.assertEqual(key.kind(), 'Foo')
def testExpandoBlobKey(self):
class Foo(model.Expando):
pass
bk = model.BlobKey('blah')
foo = Foo(bk=bk)
foo.put()
bar = foo.key.get(use_memcache=False, use_cache=False)
self.assertTrue(isinstance(bar.bk, model.BlobKey))
self.assertEqual(bar.bk, bk)
class IndexTests(test_utils.NDBTest):
def create_index(self):
ci = datastore_stub_util.datastore_pb.CompositeIndex()
ci.set_app_id(os.environ['APPLICATION_ID'])
ci.set_id(0)
ci.set_state(ci.WRITE_ONLY)
index = ci.mutable_definition()
index.set_ancestor(0)
index.set_entity_type('Kind')
property = index.add_property()
property.set_name('property1')
property.set_direction(property.DESCENDING)
property = index.add_property()
property.set_name('property2')
property.set_direction(property.ASCENDING)
stub = self.testbed.get_stub('datastore_v3')
stub.CreateIndex(ci)
def testGetIndexes(self):
self.assertEqual([], model.get_indexes())
self.create_index()
self.assertEqual(
[model.IndexState(
definition=model.Index(kind='Kind',
properties=[
model.IndexProperty(name='property1',
direction='desc'),
model.IndexProperty(name='property2',
direction='asc'),
],
ancestor=False),
state='building',
id=1,
),
],
model.get_indexes())
def testGetIndexesAsync(self):
fut = model.get_indexes_async()
self.assertTrue(isinstance(fut, tasklets.Future))
self.assertEqual([], fut.get_result())
self.create_index()
self.assertEqual(
[model.IndexState(
definition=model.Index(kind='Kind',
properties=[
model.IndexProperty(name='property1',
direction='desc'),
model.IndexProperty(name='property2',
direction='asc'),
],
ancestor=False),
state='building',
id=1,
),
],
model.get_indexes_async().get_result())
class CacheTests(test_utils.NDBTest):
def SetupContextCache(self):
"""Set up the context cache.
We only need cache active when testing the cache, so the default behavior
is to disable it to avoid misleading test results. Override this when
needed.
"""
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
ctx.set_cache_policy(True)
ctx.set_memcache_policy(True)
def testCachedEntityKeyMatchesGetArg(self):
# See issue 13. http://goo.gl/jxjOP
class Employee(model.Model):
pass
e = Employee(key=model.Key(Employee, 'joe'))
e.put()
e._key = model.Key(Employee, 'fred')
f = model.Key(Employee, 'joe').get()
# Now f is e;
# With bug this is True.
# self.assertEqual(f.key, model.Key(Employee, 'fred'))
# Removing key from context cache when it is set to a different one
# makes the test correct.
self.assertEqual(f.key, model.Key(Employee, 'joe'))
def testTransactionalDeleteClearsCache(self):
# See issue 57. http://goo.gl/bXkib
class Employee(model.Model):
pass
ctx = tasklets.get_context()
ctx.set_cache_policy(True)
ctx.set_memcache_policy(False)
e = Employee()
key = e.put()
key.get() # Warm the cache
def trans():
key.delete()
model.transaction(trans)
e = key.get()
self.assertEqual(e, None)
def testTransactionalDeleteClearsMemcache(self):
# See issue 57. http://goo.gl/bXkib
class Employee(model.Model):
pass
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
ctx.set_memcache_policy(True)
e = Employee()
key = e.put()
key.get() # Warm the cache
def trans():
key.delete()
model.transaction(trans)
e = key.get()
self.assertEqual(e, None)
def testCustomStructuredPropertyInRepeatedStructuredProperty(self):
class FuzzyDate(object):
def __init__(self, first, last=None):
assert isinstance(first, datetime.date)
assert last is None or isinstance(last, datetime.date)
self.first = first
self.last = last or first
def __eq__(self, other):
if not isinstance(other, FuzzyDate):
return NotImplemented
return self.first == other.first and self.last == other.last
def __ne__(self, other):
eq = self.__eq__(other)
if eq is not NotImplemented:
eq = not eq
return eq
def __repr__(self):
return 'FuzzyDate(%r, %r)' % (self.first, self.last)
class FuzzyDateModel(model.Model):
first = model.DateProperty()
last = model.DateProperty()
class FuzzyDateProperty(model.StructuredProperty):
def __init__(self, **kwds):
super(FuzzyDateProperty, self).__init__(FuzzyDateModel, **kwds)
def _validate(self, value):
assert isinstance(value, FuzzyDate)
def _to_base_type(self, value):
return FuzzyDateModel(first=value.first, last=value.last)
def _from_base_type(self, value):
return FuzzyDate(value.first, value.last)
class Inner(model.Model):
date = FuzzyDateProperty()
class Outer(model.Model):
wrap = model.StructuredProperty(Inner, repeated=True)
d = datetime.date(1900,1,1)
fd = FuzzyDate(d)
orig = Outer(wrap=[Inner(date=fd), Inner(date=fd)])
key = orig.put()
q = Outer.query()
copy = q.get()
self.assertEqual(copy, orig)
def testSubStructureEqualToNone(self):
class IntRangeModel(model.Model):
first = model.IntegerProperty()
last = model.IntegerProperty()
class Inner(model.Model):
range = model.StructuredProperty(IntRangeModel)
other = model.IntegerProperty()
class Outer(model.Model):
wrap = model.StructuredProperty(Inner, repeated=True)
orig = Outer(wrap=[Inner(other=2),
Inner(range=IntRangeModel(first=0, last=10), other=4)])
orig.put()
q = Outer.query()
copy = q.get()
self.assertEqual(copy.wrap[0].range, None)
self.assertEqual(copy.wrap[1].range, IntRangeModel(first=0, last=10))
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""NDB interface for Blobstore.
This currently builds on google.appengine.ext.blobstore and provides a
similar API. The main API differences:
- BlobInfo is an actual Model subclass rather than a pseudo-model class.
To query, use BlobInfo.query() and its documented properties. Other
changes:
- The kind is '__BlobInfo__' (BLOB_INFO_KIND).
- key() is a method returning a BlobKey instance.
- put() and friends are disabled.
- Added class methods get() and friends.
- Added instance methods delete() and friends, and open().
- Instead of BlobReferenceProperty, there's BlobKeyProperty.
- There is no create_rpc() function. Instead, functions and methods
take keyword arguments to specify deadline, callback, and (in some
case) datastore options.
- APIs (get() and delete()) that in ext.blobstore take either a blob
key or a list of blob keys are split into two: one that takes a blob
key and one that takes a list of blob keys, the latter having a name
ending in _multi.
- The following APIs have a synchronous and an async version:
- BlobInfo.get()
- BlobInfo.delete()
- create_upload_url()
- get()
- get_multi()
- delete()
- delete_multi()
- fetch_data()
"""
# TODO: Should delete() and friends accept BlobInfos too?
# TODO: Don't have redundant function/method versions of APIs?
# TODO: Refactor ext.blobstore to reduce duplication of code.
import base64
import email
from .google_imports import api_blobstore as blobstore
from .google_imports import ext_blobstore
from . import model
from . import tasklets
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_MIGRATION_KIND',
'BLOB_RANGE_HEADER',
'BlobFetchSizeTooLargeError',
'BlobInfo',
'BlobInfoParseError',
'BlobKey',
'BlobNotFoundError',
'BlobKeyProperty',
'BlobReader',
'DataIndexOutOfRangeError',
'PermissionDeniedError',
'Error',
'InternalError',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'create_upload_url',
'create_upload_url_async',
'delete',
'delete_async',
'delete_multi',
'delete_multi_async',
'fetch_data',
'fetch_data_async',
'get',
'get_async',
'get_multi',
'get_multi_async',
'parse_blob_info']
# Exceptions are all imported.
Error = blobstore.Error
InternalError = blobstore.InternalError
BlobFetchSizeTooLargeError = blobstore.BlobFetchSizeTooLargeError
BlobNotFoundError = blobstore.BlobNotFoundError
_CreationFormatError = blobstore._CreationFormatError
DataIndexOutOfRangeError = blobstore.DataIndexOutOfRangeError
PermissionDeniedError = blobstore.PermissionDeniedError
BlobInfoParseError = ext_blobstore.BlobInfoParseError
# So is BlobKey.
BlobKey = blobstore.BlobKey
# And the constants.
BLOB_INFO_KIND = blobstore.BLOB_INFO_KIND
BLOB_MIGRATION_KIND = blobstore.BLOB_MIGRATION_KIND
BLOB_KEY_HEADER = blobstore.BLOB_KEY_HEADER
BLOB_RANGE_HEADER = blobstore.BLOB_RANGE_HEADER
MAX_BLOB_FETCH_SIZE = blobstore.MAX_BLOB_FETCH_SIZE
UPLOAD_INFO_CREATION_HEADER = blobstore.UPLOAD_INFO_CREATION_HEADER
# Re-export BlobKeyProperty from ndb.model for completeness.
BlobKeyProperty = model.BlobKeyProperty
class BlobInfo(model.Model):
"""Information about blobs in Blobstore.
This is a Model subclass that has been doctored to be unwritable.
Properties:
- content_type: Content type of blob.
- creation: Creation date of blob, when it was uploaded.
- filename: Filename user selected from their machine.
- size: Size of uncompressed blob.
- md5_hash: The md5 hash value of the uploaded blob (in hex).
Additional API:
Class methods:
- get(): retrieve a BlobInfo by key
- get_multi(): retrieve a list of BlobInfos by keys
- get_async(), get_multi_async(): async version of get() and get_multi()
Instance methods:
- delete(): delete this blob
- delete_async(): async version of delete()
- key(): return the BlobKey for this blob
- open(): return a BlobReader instance for this blob
Because BlobInfo instances are synchronized with Blobstore, the class
cache policies are off.
Do not subclass this class.
"""
_use_cache = False
_use_memcache = False
content_type = model.StringProperty()
creation = model.DateTimeProperty()
filename = model.StringProperty()
size = model.IntegerProperty()
md5_hash = model.StringProperty()
@classmethod
def _get_kind(cls):
"""Override this to match the datastore entities written by Blobstore."""
return BLOB_INFO_KIND # __BlobInfo__
@classmethod
def get(cls, blob_key, **ctx_options):
"""Retrieve a BlobInfo by key.
Args:
blob_key: A blob key. This may be a str, unicode or BlobKey instance.
**ctx_options: Context options for Model().get_by_id().
Returns:
A BlobInfo entity associated with the provided key, If there was
no such entity, returns None.
"""
fut = cls.get_async(blob_key, **ctx_options)
return fut.get_result()
@classmethod
def get_async(cls, blob_key, **ctx_options):
"""Async version of get()."""
if not isinstance(blob_key, (BlobKey, basestring)):
raise TypeError('Expected blob key, got %r' % (blob_key,))
if 'parent' in ctx_options:
raise TypeError('Parent is not supported')
return cls.get_by_id_async(str(blob_key), **ctx_options)
@classmethod
def get_multi(cls, blob_keys, **ctx_options):
"""Multi-key version of get().
Args:
blob_keys: A list of blob keys.
**ctx_options: Context options for Model().get_by_id().
Returns:
A list whose items are each either a BlobInfo entity or None.
"""
futs = cls.get_multi_async(blob_keys, **ctx_options)
return [fut.get_result() for fut in futs]
@classmethod
def get_multi_async(cls, blob_keys, **ctx_options):
"""Async version of get_multi()."""
for blob_key in blob_keys:
if not isinstance(blob_key, (BlobKey, basestring)):
raise TypeError('Expected blob key, got %r' % (blob_key,))
if 'parent' in ctx_options:
raise TypeError('Parent is not supported')
blob_key_strs = map(str, blob_keys)
keys = [model.Key(BLOB_INFO_KIND, id) for id in blob_key_strs]
return model.get_multi_async(keys, **ctx_options)
def _put_async(self, **ctx_options):
"""Cheap way to make BlobInfo entities read-only."""
raise TypeError('BlobInfo is read-only')
put_async = _put_async
def key(self):
"""Get key for blob.
Returns:
BlobKey instance that identifies this blob.
"""
# TODO: Cache this?
return BlobKey(self._key.id())
def delete(self, **options):
"""Permanently delete this blob from Blobstore.
Args:
**options: Options for create_rpc().
"""
fut = delete_async(self.key(), **options)
fut.get_result()
def delete_async(self, **options):
"""Async version of delete()."""
return delete_async(self.key(), **options) # A Future!
def open(self, *args, **kwds):
"""Returns a BlobReader for this blob.
Args:
*args, **kwargs: Passed to BlobReader constructor.
Returns:
A BlobReader instance.
"""
return BlobReader(self, *args, **kwds)
get = BlobInfo.get
get_async = BlobInfo.get_async
get_multi = BlobInfo.get_multi
get_multi_async = BlobInfo.get_multi_async
def delete(blob_key, **options):
"""Delete a blob from Blobstore.
Args:
blob_key: A blob key. This may be a str, unicode or BlobKey instance.
**options: Options for create_rpc().
"""
fut = delete_async(blob_key, **options)
return fut.get_result()
@tasklets.tasklet
def delete_async(blob_key, **options):
"""Async version of delete()."""
if not isinstance(blob_key, (basestring, BlobKey)):
raise TypeError('Expected blob key, got %r' % (blob_key,))
rpc = blobstore.create_rpc(**options)
yield blobstore.delete_async(blob_key, rpc=rpc)
def delete_multi(blob_keys, **options):
"""Delete blobs from Blobstore.
Args:
blob_keys: A list of blob keys.
**options: Options for create_rpc().
"""
fut = delete_multi_async(blob_keys, **options)
fut.get_result()
@tasklets.tasklet
def delete_multi_async(blob_keys, **options):
"""Async version of delete_multi()."""
if isinstance(blob_keys, (basestring, BlobKey)):
raise TypeError('Expected a list, got %r' % (blob_key,))
rpc = blobstore.create_rpc(**options)
yield blobstore.delete_async(blob_keys, rpc=rpc)
def create_upload_url(success_path,
max_bytes_per_blob=None,
max_bytes_total=None,
**options):
"""Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
max_bytes_per_blob: The maximum size in bytes that any one blob in the
upload can be or None for no maximum size.
max_bytes_total: The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size.
**options: Options for create_rpc().
Returns:
The upload URL.
Raises:
TypeError: If max_bytes_per_blob or max_bytes_total are not integral types.
ValueError: If max_bytes_per_blob or max_bytes_total are not
positive values.
"""
fut = create_upload_url_async(success_path,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
**options)
return fut.get_result()
@tasklets.tasklet
def create_upload_url_async(success_path,
max_bytes_per_blob=None,
max_bytes_total=None,
**options):
"""Async version of create_upload_url()."""
rpc = blobstore.create_rpc(**options)
rpc = blobstore.create_upload_url_async(success_path,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
rpc=rpc)
result = yield rpc
raise tasklets.Return(result)
def parse_blob_info(field_storage):
"""Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
"""
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dct, name):
value = dct.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key_str = get_value(field_storage.type_options, 'blob-key')
blob_key = BlobKey(blob_key_str)
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
md5_hash_encoded = get_value(upload_content, 'content-md5')
md5_hash = base64.urlsafe_b64decode(md5_hash_encoded)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(id=blob_key_str,
content_type=content_type,
creation=creation,
filename=filename,
size=size,
md5_hash=md5_hash,
)
def fetch_data(blob, start_index, end_index, **options):
"""Fetch data for blob.
Fetches a fragment of a blob up to MAX_BLOB_FETCH_SIZE in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start_index until the end of the blob, which will be
a smaller size than requested. Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string. Attempting
to fetch a negative index will raise an exception.
Args:
blob: BlobInfo, BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (inclusive) of blob data to fetch. Must be
>= start_index.
**options: Options for create_rpc().
Returns:
str containing partial data of blob. If the indexes are legal but outside
the boundaries of the blob, will return empty string.
Raises:
TypeError if start_index or end_index are not indexes. Also when blob
is not a string, BlobKey or BlobInfo.
DataIndexOutOfRangeError when start_index < 0 or end_index < start_index.
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX_BLOB_FETCH_SIZE.
BlobNotFoundError when blob does not exist.
"""
fut = fetch_data_async(blob, start_index, end_index, **options)
return fut.get_result()
@tasklets.tasklet
def fetch_data_async(blob, start_index, end_index, **options):
"""Async version of fetch_data()."""
if isinstance(blob, BlobInfo):
blob = blob.key()
rpc = blobstore.create_rpc(**options)
rpc = blobstore.fetch_data_async(blob, start_index, end_index, rpc=rpc)
result = yield rpc
raise tasklets.Return(result)
class BlobReader(ext_blobstore.BlobReader):
"""Provides a read-only file-like interface to a blobstore blob."""
# This just overrides two methods to use the proper versions.
# Hack alert: this can access private attributes of the parent class
# because it has the same class name. (This is a Python feature.)
def __fill_buffer(self, size=0):
"""Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
"""
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size
@property
def blob_info(self):
"""Returns the BlobInfo for this file."""
if not self.__blob_info:
self.__blob_info = BlobInfo.get(self.__blob_key)
return self.__blob_info
| Python |
"""Tests for metadata.py."""
import unittest
from .google_imports import namespace_manager
from . import metadata
from . import model
from . import test_utils
class MetadataTests(test_utils.NDBTest):
def setUp(self):
super(MetadataTests, self).setUp()
class Foo(model.Model):
name = model.StringProperty()
age = model.IntegerProperty()
self.Foo = Foo
class Bar(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
self.Bar = Bar
class Ext(model.Expando):
pass
self.Ext = Ext
namespace_manager.set_namespace('') # Always start in default ns.
the_module = metadata
def testGetNamespaces(self):
self.assertEqual([], metadata.get_namespaces())
self.Foo().put()
self.assertEqual([''], metadata.get_namespaces())
self.assertEqual([], metadata.get_namespaces(None, ''))
for ns in 'x', 'xyzzy', 'y', 'z':
namespace_manager.set_namespace(ns)
self.Foo().put()
self.assertEqual(['', 'x', 'xyzzy', 'y', 'z'], metadata.get_namespaces())
self.assertEqual(['x', 'xyzzy'], metadata.get_namespaces('x', 'y'))
def testGetKinds(self):
self.assertEqual([], metadata.get_kinds())
self.Foo().put()
self.Bar().put()
self.Ext().put()
self.assertEqual(['Bar', 'Ext', 'Foo'], metadata.get_kinds())
self.assertEqual(['Bar', 'Ext'], metadata.get_kinds('A', 'F'))
self.assertEqual([], metadata.get_kinds(None, ''))
namespace_manager.set_namespace('x')
self.assertEqual([], metadata.get_kinds())
self.Foo().put()
self.assertEqual(['Foo'], metadata.get_kinds())
def testGetPropertiesOfKind(self):
self.Foo().put()
self.assertEqual(['age', 'name'], metadata.get_properties_of_kind('Foo'))
self.assertEqual(['age'], metadata.get_properties_of_kind('Foo', 'a', 'h'))
self.assertEqual([], metadata.get_properties_of_kind('Foo', None, ''))
e = self.Ext()
e.foo = 1
e.bar = 2
e.put()
self.assertEqual(['bar', 'foo'], metadata.get_properties_of_kind('Ext'))
namespace_manager.set_namespace('x')
e = self.Ext()
e.one = 1
e.two = 2
e.put()
self.assertEqual(['one', 'two'], metadata.get_properties_of_kind('Ext'))
def testGetRepresentationsOfKind(self):
e = self.Ext()
e.foo = 1
e.bar = 'a'
e.put()
self.assertEqual({'foo': ['INT64'], 'bar': ['STRING']},
metadata.get_representations_of_kind('Ext'))
self.assertEqual({'bar': ['STRING']},
metadata.get_representations_of_kind('Ext', 'a', 'e'))
self.assertEqual({},
metadata.get_representations_of_kind('Ext', None, ''))
f = self.Ext()
f.foo = 'x'
f.bar = 2
f.put()
self.assertEqual({'foo': ['INT64', 'STRING'],
'bar': ['INT64', 'STRING']},
metadata.get_representations_of_kind('Ext'))
def testDirectPropertyQueries(self):
e = self.Ext()
e.foo = 1
e.bar = 'a'
e.put()
f = self.Foo(name='a', age=42)
f.put()
q = metadata.Property.query()
res = q.fetch()
self.assertEqual([('Ext', 'bar'), ('Ext', 'foo'),
('Foo', 'age'), ('Foo', 'name')],
[(p.kind_name, p.property_name) for p in res])
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Some tests for datastore_rpc.py."""
import unittest
from .google_imports import apiproxy_stub_map
from .google_imports import datastore_rpc
from . import model
from . import test_utils
class PendingTests(test_utils.NDBTest):
"""Tests for the 'pending RPC' management."""
def testBasicSetup1(self):
ent = model.Expando()
ent.foo = 'bar'
rpc = self.conn.async_put(None, [ent])
[key] = rpc.get_result()
self.assertEqual(key, model.Key(flat=['Expando', 1]))
def testBasicSetup2(self):
key = model.Key(flat=['Expando', 1])
rpc = self.conn.async_get(None, [key])
[ent] = rpc.get_result()
self.assertTrue(ent is None)
def SetUpCallHooks(self):
self.pre_args = []
self.post_args = []
apiproxy_stub_map.apiproxy.GetPreCallHooks().Append('test1',
self.PreCallHook)
apiproxy_stub_map.apiproxy.GetPostCallHooks().Append('test1',
self.PostCallHook)
def PreCallHook(self, service, call, request, response, rpc=None):
self.pre_args.append((service, call, request, response, rpc))
def PostCallHook(self, service, call, request, response,
rpc=None, error=None):
self.post_args.append((service, call, request, response, rpc, error))
def testCallHooks(self):
self.SetUpCallHooks()
key = model.Key(flat=['Expando', 1])
rpc = self.conn.async_get(None, [key])
self.assertEqual(len(self.pre_args), 1)
self.assertEqual(self.post_args, [])
[ent] = rpc.get_result()
self.assertTrue(ent is None)
self.assertEqual(len(self.pre_args), 1)
self.assertEqual(len(self.post_args), 1)
self.assertEqual(self.pre_args[0][:2], ('datastore_v3', 'Get'))
self.assertEqual(self.post_args[0][:2], ('datastore_v3', 'Get'))
def testCallHooks_Pending(self):
self.SetUpCallHooks()
key = model.Key(flat=['Expando', 1])
rpc = self.conn.async_get(None, [key])
self.conn.wait_for_all_pending_rpcs()
self.assertEqual(rpc.state, 2) # FINISHING
self.assertEqual(len(self.pre_args), 1)
self.assertEqual(len(self.post_args), 1) # NAILED IT!
self.assertEqual(self.conn.get_pending_rpcs(), set())
def NastyCallback(self, rpc):
rpc.get_result()
key = model.Key(flat=['Expando', 1])
self.conn.async_get(None, [key])
def testCallHooks_Pending_CallbackAddsMore(self):
self.SetUpCallHooks()
conf = datastore_rpc.Configuration(on_completion=self.NastyCallback)
key = model.Key(flat=['Expando', 1])
self.conn.async_get(conf, [key])
self.conn.wait_for_all_pending_rpcs()
self.assertEqual(self.conn.get_pending_rpcs(), set())
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""The Key class, and associated utilities.
A Key encapsulates the following pieces of information, which together
uniquely designate a (possible) entity in the App Engine datastore:
- an application id (a string)
- a namespace (a string)
- a list of one or more (kind, id) pairs where kind is a string and id
is either a string or an integer.
The application id must always be part of the key, but since most
applications can only access their own entities, it defaults to the
current application id and you rarely need to worry about it. It must
not be empty.
The namespace designates a top-level partition of the key space for a
particular application. If you've never heard of namespaces, you can
safely ignore this feature.
Most of the action is in the (kind, id) pairs. A key must have at
least one (kind, id) pair. The last (kind, id) pair gives the kind
and the id of the entity that the key refers to, the others merely
specify a 'parent key'.
The kind is a string giving the name of the model class used to
represent the entity. (In more traditional databases this would be
the table name.) A model class is a Python class derived from
ndb.Model; see the documentation for ndb/model.py. Only the class
name itself is used as the kind. This means all your model classes
must be uniquely named within one application. You can override this
on a per-class basis.
The id is either a string or an integer. When the id is a string, the
application is in control of how it assigns ids: For example, if you
could use an email address as the id for Account entities.
To use integer ids, you must let the datastore choose a unique id for
an entity when it is first inserted into the datastore. You can set
the id to None to represent the key for an entity that hasn't yet been
inserted into the datastore. The final key (including the assigned
id) will be returned after the entity is successfully inserted into
the datastore.
A key for which the id of the last (kind, id) pair is set to None is
called an incomplete key. Such keys can only be used to insert
entities into the datastore.
A key with exactly one (kind, id) pair is called a top level key or a
root key. Top level keys are also used as entity groups, which play a
role in transaction management.
If there is more than one (kind, id) pair, all but the last pair
represent the 'ancestor path', also known as the key of the 'parent
entity'.
Other constraints:
- Kinds and string ids must not be empty and must be at most 500 bytes
long (after UTF-8 encoding, if given as Python unicode objects).
NOTE: This is defined as a module level constant _MAX_KEYPART_BYTES.
- Integer ids must be at least 1 and less than 2**63.
For more info about namespaces, see
http://code.google.com/appengine/docs/python/multitenancy/overview.html.
The namespace defaults to the 'default namespace' selected by the
namespace manager. To explicitly select the empty namespace pass
namespace=''.
"""
__author__ = 'guido@google.com (Guido van Rossum)'
import base64
import os
from .google_imports import datastore_errors
from .google_imports import datastore_types
from .google_imports import namespace_manager
from .google_imports import entity_pb
from . import utils
__all__ = ['Key']
_MAX_LONG = 2L ** 63 # Use 2L, see issue 65. http://goo.gl/ELczz
_MAX_KEYPART_BYTES = 500
class Key(object):
"""An immutable datastore key.
For flexibility and convenience, multiple constructor signatures are
supported.
The primary way to construct a key is using positional arguments:
- Key(kind1, id1, kind2, id2, ...).
This is shorthand for either of the following two longer forms:
- Key(pairs=[(kind1, id1), (kind2, id2), ...])
- Key(flat=[kind1, id1, kind2, id2, ...])
Either of the above constructor forms can additionally pass in another
key using parent=<key>. The (kind, id) pairs of the parent key are
inserted before the (kind, id) pairs passed explicitly.
You can also construct a Key from a 'url-safe' encoded string:
- Key(urlsafe=<string>)
For esoteric purposes the following constructors exist:
- Key(reference=<reference>) -- passing in a low-level Reference object
- Key(serialized=<string>) -- passing in a serialized low-level Reference
- Key(<dict>) -- for unpickling, the same as Key(**<dict>)
The 'url-safe' string is really a websafe-base64-encoded serialized
Reference, but it's best to think of it as just an opaque unique
string.
Additional constructor keyword arguments:
- app=<string> -- specify the application id
- namespace=<string> -- specify the namespace
If a Reference is passed (using one of reference, serialized or
urlsafe), the args and namespace keywords must match what is already
present in the Reference (after decoding if necessary). The parent
keyword cannot be combined with a Reference in any form.
Keys are immutable, which means that a Key object cannot be modified
once it has been created. This is enforced by the implementation as
well as Python allows.
For access to the contents of a key, the following methods and
operations are supported:
- repr(key), str(key) -- return a string representation resembling
the shortest constructor form, omitting the app and namespace
unless they differ from the default value.
- key1 == key2, key1 != key2 -- comparison for equality between Keys.
- hash(key) -- a hash value sufficient for storing Keys in a dict.
- key.pairs() -- a tuple of (kind, id) pairs.
- key.flat() -- a tuple of flattened kind and id values, i.e.
(kind1, id1, kind2, id2, ...).
- key.app() -- the application id.
- key.id() -- the string or integer id in the last (kind, id) pair,
or None if the key is incomplete.
- key.string_id() -- the string id in the last (kind, id) pair,
or None if the key has an integer id or is incomplete.
- key.integer_id() -- the integer id in the last (kind, id) pair,
or None if the key has a string id or is incomplete.
- key.namespace() -- the namespace.
- key.kind() -- a shortcut for key.pairs()[-1][0].
- key.parent() -- a Key constructed from all but the last (kind, id)
pairs.
- key.urlsafe() -- a websafe-base64-encoded serialized Reference.
- key.serialized() -- a serialized Reference.
- key.reference() -- a Reference object. The caller promises not to
mutate it.
Keys also support interaction with the datastore; these methods are
the only ones that engage in any kind of I/O activity. For Future
objects, see the document for ndb/tasklets.py.
- key.get() -- return the entity for the Key.
- key.get_async() -- return a Future whose eventual result is
the entity for the Key.
- key.delete() -- delete the entity for the Key.
- key.delete_async() -- asynchronously delete the entity for the Key.
Keys may be pickled.
Subclassing Key is best avoided; it would be hard to get right.
"""
__slots__ = ['__reference', '__pairs', '__app', '__namespace']
def __new__(cls, *_args, **kwargs):
"""Constructor. See the class docstring for arguments."""
if _args:
if len(_args) == 1 and isinstance(_args[0], dict):
if kwargs:
raise TypeError('Key() takes no keyword arguments when a dict is the '
'the first and only non-keyword argument (for '
'unpickling).')
kwargs = _args[0]
else:
if 'flat' in kwargs:
raise TypeError('Key() with positional arguments '
'cannot accept flat as a keyword argument.')
kwargs['flat'] = _args
self = super(Key, cls).__new__(cls)
# Either __reference or (__pairs, __app, __namespace) must be set.
# Either one fully specifies a key; if both are set they must be
# consistent with each other.
if 'reference' in kwargs or 'serialized' in kwargs or 'urlsafe' in kwargs:
self.__reference = _ConstructReference(cls, **kwargs)
self.__pairs = None
self.__app = None
self.__namespace = None
elif 'pairs' in kwargs or 'flat' in kwargs:
self.__reference = None
(self.__pairs,
self.__app,
self.__namespace) = self._parse_from_args(**kwargs)
else:
raise TypeError('Key() cannot create a Key instance without arguments.')
return self
@staticmethod
def _parse_from_args(pairs=None, flat=None, app=None, namespace=None,
parent=None):
if flat:
if pairs is not None:
raise TypeError('Key() cannot accept both flat and pairs arguments.')
if len(flat) % 2:
raise ValueError('Key() must have an even number of positional '
'arguments.')
pairs = [(flat[i], flat[i + 1]) for i in xrange(0, len(flat), 2)]
else:
pairs = list(pairs)
if not pairs:
raise TypeError('Key must consist of at least one pair.')
for i, (kind, id) in enumerate(pairs):
if isinstance(id, unicode):
id = id.encode('utf8')
elif id is None:
if i + 1 < len(pairs):
raise datastore_errors.BadArgumentError(
'Incomplete Key entry must be last')
else:
if not isinstance(id, (int, long, str)):
raise TypeError('Key id must be a string or a number; received %r' %
id)
if isinstance(kind, type):
kind = kind._get_kind()
if isinstance(kind, unicode):
kind = kind.encode('utf8')
if not isinstance(kind, str):
raise TypeError('Key kind must be a string or Model class; '
'received %r' % kind)
pairs[i] = (kind, id)
if parent is not None:
if not isinstance(parent, Key):
raise datastore_errors.BadValueError(
'Expected Key instance, got %r' % parent)
if not parent.id():
raise datastore_errors.BadArgumentError(
'Parent cannot have incomplete key')
pairs[:0] = parent.pairs()
if app:
if app != parent.app():
raise ValueError('Cannot specify a different app %r '
'than the parent app %r' %
(app, parent.app()))
else:
app = parent.app()
if namespace is not None:
if namespace != parent.namespace():
raise ValueError('Cannot specify a different namespace %r '
'than the parent namespace %r' %
(namespace, parent.namespace()))
else:
namespace = parent.namespace()
if not app:
app = _DefaultAppId()
if namespace is None:
namespace = _DefaultNamespace()
return tuple(pairs), app, namespace
def __repr__(self):
"""String representation, used by str() and repr().
We produce a short string that conveys all relevant information,
suppressing app and namespace when they are equal to the default.
"""
# TODO: Instead of "Key('Foo', 1)" perhaps return "Key(Foo, 1)" ?
args = []
for item in self.flat():
if not item:
args.append('None')
elif isinstance(item, basestring):
if not isinstance(item, str):
raise TypeError('Key item is not an 8-bit string %r' % item)
args.append(repr(item))
else:
args.append(str(item))
if self.app() != _DefaultAppId():
args.append('app=%r' % self.app())
if self.namespace() != _DefaultNamespace():
args.append('namespace=%r' % self.namespace())
return 'Key(%s)' % ', '.join(args)
__str__ = __repr__
def __hash__(self):
"""Hash value, for use in dict lookups."""
# This ignores app and namespace, which is fine since hash()
# doesn't need to return a unique value -- it only needs to ensure
# that the hashes of equal keys are equal, not the other way
# around.
return hash(tuple(self.pairs()))
def __eq__(self, other):
"""Equality comparison operation."""
if not isinstance(other, Key):
return NotImplemented
return (tuple(self.pairs()) == tuple(other.pairs()) and
self.app() == other.app() and
self.namespace() == other.namespace())
def __ne__(self, other):
"""The opposite of __eq__."""
if not isinstance(other, Key):
return NotImplemented
return not self.__eq__(other)
def __getstate__(self):
"""Private API used for pickling."""
return ({'pairs': list(self.pairs()),
'app': self.app(),
'namespace': self.namespace()},)
def __setstate__(self, state):
"""Private API used for pickling."""
if len(state) != 1:
raise TypeError('Invalid state length, expected 1; received %i' %
len(state))
kwargs = state[0]
if not isinstance(kwargs, dict):
raise TypeError('Key accepts a dict of keyword arguments as state; '
'received %r' % kwargs)
self.__reference = None
self.__pairs = kwargs['pairs']
self.__app = kwargs['app']
self.__namespace = kwargs['namespace']
def __getnewargs__(self):
"""Private API used for pickling."""
return ({'pairs': tuple(self.pairs()),
'app': self.app(),
'namespace': self.namespace()},)
def parent(self):
"""Return a Key constructed from all but the last (kind, id) pairs.
If there is only one (kind, id) pair, return None.
"""
pairs = self.pairs()
if len(pairs) <= 1:
return None
return Key(pairs=pairs[:-1], app=self.app(), namespace=self.namespace())
def root(self):
"""Return the root key. This is either self or the highest parent."""
pairs = self.pairs()
if len(pairs) <= 1:
return self
return Key(pairs=pairs[:1], app=self.app(), namespace=self.namespace())
def namespace(self):
"""Return the namespace."""
if self.__namespace is None:
self.__namespace = self.__reference.name_space()
return self.__namespace
def app(self):
"""Return the application id."""
if self.__app is None:
self.__app = self.__reference.app()
return self.__app
def id(self):
"""Return the string or integer id in the last (kind, id) pair, if any.
Returns:
A string or integer id, or None if the key is incomplete.
"""
if self.__pairs:
return self.__pairs[-1][1]
elem = self.__reference.path().element(-1)
return elem.name() or elem.id() or None
def string_id(self):
"""Return the string id in the last (kind, id) pair, if any.
Returns:
A string id, or None if the key has an integer id or is incomplete.
"""
if self.__reference is None:
id = self.id()
if not isinstance(id, basestring):
id = None
return id
elem = self.__reference.path().element(-1)
return elem.name() or None
def integer_id(self):
"""Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
"""
if self.__reference is None:
id = self.id()
if not isinstance(id, (int, long)):
id = None
return id
elem = self.__reference.path().element(-1)
return elem.id() or None
def pairs(self):
"""Return a tuple of (kind, id) pairs."""
pairs = self.__pairs
if pairs is None:
pairs = []
for elem in self.__reference.path().element_list():
kind = elem.type()
if elem.has_id():
id_or_name = elem.id()
else:
id_or_name = elem.name()
if not id_or_name:
id_or_name = None
tup = (kind, id_or_name)
pairs.append(tup)
self.__pairs = pairs = tuple(pairs)
return pairs
def flat(self):
"""Return a tuple of alternating kind and id values."""
flat = []
for kind, id in self.pairs():
flat.append(kind)
flat.append(id)
return tuple(flat)
def kind(self):
"""Return the kind of the entity referenced.
This is the kind from the last (kind, id) pair.
"""
if self.__pairs:
return self.__pairs[-1][0]
return self.__reference.path().element(-1).type()
def reference(self):
"""Return the Reference object for this Key.
This is a entity_pb.Reference instance -- a protocol buffer class
used by the lower-level API to the datastore.
NOTE: The caller should not mutate the return value.
"""
if self.__reference is None:
self.__reference = _ConstructReference(self.__class__,
pairs=self.__pairs,
app=self.__app,
namespace=self.__namespace)
return self.__reference
def serialized(self):
"""Return a serialized Reference object for this Key."""
return self.reference().Encode()
def urlsafe(self):
"""Return a url-safe string encoding this Key's Reference.
This string is compatible with other APIs and languages and with
the strings used to represent Keys in GQL and in the App Engine
Admin Console.
"""
# This is 3-4x faster than urlsafe_b64decode()
urlsafe = base64.b64encode(self.reference().Encode())
return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')
# Datastore API using the default context.
# These use local import since otherwise they'd be recursive imports.
def get(self, **ctx_options):
"""Synchronously get the entity for this Key.
Return None if there is no such entity.
"""
return self.get_async(**ctx_options).get_result()
def get_async(self, **ctx_options):
"""Return a Future whose result is the entity for this Key.
If no such entity exists, a Future is still returned, and the
Future's eventual return result be None.
"""
from . import model, tasklets
ctx = tasklets.get_context()
cls = model.Model._kind_map.get(self.kind())
if cls:
cls._pre_get_hook(self)
fut = ctx.get(self, **ctx_options)
if cls:
post_hook = cls._post_get_hook
if not cls._is_default_hook(model.Model._default_post_get_hook,
post_hook):
fut.add_immediate_callback(post_hook, self, fut)
return fut
def delete(self, **ctx_options):
"""Synchronously delete the entity for this Key.
This is a no-op if no such entity exists.
"""
return self.delete_async(**ctx_options).get_result()
def delete_async(self, **ctx_options):
"""Schedule deletion of the entity for this Key.
This returns a Future, whose result becomes available once the
deletion is complete. If no such entity exists, a Future is still
returned. In all cases the Future's result is None (i.e. there is
no way to tell whether the entity existed or not).
"""
from . import tasklets, model
ctx = tasklets.get_context()
cls = model.Model._kind_map.get(self.kind())
if cls:
cls._pre_delete_hook(self)
fut = ctx.delete(self, **ctx_options)
if cls:
post_hook = cls._post_delete_hook
if not cls._is_default_hook(model.Model._default_post_delete_hook,
post_hook):
fut.add_immediate_callback(post_hook, self, fut)
return fut
@classmethod
def from_old_key(cls, old_key):
return cls(urlsafe=str(old_key))
def to_old_key(self):
return datastore_types.Key(encoded=self.urlsafe())
# The remaining functions in this module are private.
# TODO: Conform to PEP 8 naming, e.g. _construct_reference() etc.
@utils.positional(1)
def _ConstructReference(cls, pairs=None, flat=None,
reference=None, serialized=None, urlsafe=None,
app=None, namespace=None, parent=None):
"""Construct a Reference; the signature is the same as for Key."""
if cls is not Key:
raise TypeError('Cannot construct Key reference on non-Key class; '
'received %r' % cls)
if (bool(pairs) + bool(flat) + bool(reference) + bool(serialized) +
bool(urlsafe)) != 1:
raise TypeError('Cannot construct Key reference from incompatible keyword '
'arguments.')
if flat or pairs:
if flat:
if len(flat) % 2:
raise TypeError('_ConstructReference() must have an even number of '
'positional arguments.')
pairs = [(flat[i], flat[i + 1]) for i in xrange(0, len(flat), 2)]
elif parent is not None:
pairs = list(pairs)
if not pairs:
raise TypeError('Key references must consist of at least one pair.')
if parent is not None:
if not isinstance(parent, Key):
raise datastore_errors.BadValueError(
'Expected Key instance, got %r' % parent)
pairs[:0] = parent.pairs()
if app:
if app != parent.app():
raise ValueError('Cannot specify a different app %r '
'than the parent app %r' %
(app, parent.app()))
else:
app = parent.app()
if namespace is not None:
if namespace != parent.namespace():
raise ValueError('Cannot specify a different namespace %r '
'than the parent namespace %r' %
(namespace, parent.namespace()))
else:
namespace = parent.namespace()
reference = _ReferenceFromPairs(pairs, app=app, namespace=namespace)
else:
if parent is not None:
raise TypeError('Key reference cannot be constructed when the parent '
'argument is combined with either reference, serialized '
'or urlsafe arguments.')
if urlsafe:
serialized = _DecodeUrlSafe(urlsafe)
if serialized:
reference = _ReferenceFromSerialized(serialized)
if not reference.path().element_size():
raise RuntimeError('Key reference path has no element size (%r, %r, %r).'
% (urlsafe, serialized, str(reference)))
# TODO: ensure that each element has a type and either an id or a name
if not serialized:
reference = _ReferenceFromReference(reference)
# You needn't specify app= or namespace= together with reference=,
# serialized= or urlsafe=, but if you do, their values must match
# what is already in the reference.
if app is not None:
if app != reference.app():
raise RuntimeError('Key reference constructed uses a different app %r '
'than the one specified %r' %
(reference.app(), app))
if namespace is not None:
if namespace != reference.name_space():
raise RuntimeError('Key reference constructed uses a different '
'namespace %r than the one specified %r' %
(reference.name_space(), namespace))
return reference
def _ReferenceFromPairs(pairs, reference=None, app=None, namespace=None):
"""Construct a Reference from a list of pairs.
If a Reference is passed in as the second argument, it is modified
in place. The app and namespace are set from the corresponding
keyword arguments, with the customary defaults.
"""
if reference is None:
reference = entity_pb.Reference()
path = reference.mutable_path()
last = False
for kind, idorname in pairs:
if last:
raise datastore_errors.BadArgumentError(
'Incomplete Key entry must be last')
t = type(kind)
if t is str:
pass
elif t is unicode:
kind = kind.encode('utf8')
else:
if issubclass(t, type):
# Late import to avoid cycles.
from .model import Model
modelclass = kind
if not issubclass(modelclass, Model):
raise TypeError('Key kind must be either a string or subclass of '
'Model; received %r' % modelclass)
kind = modelclass._get_kind()
t = type(kind)
if t is str:
pass
elif t is unicode:
kind = kind.encode('utf8')
elif issubclass(t, str):
pass
elif issubclass(t, unicode):
kind = kind.encode('utf8')
else:
raise TypeError('Key kind must be either a string or subclass of Model;'
' received %r' % kind)
if not (1 <= len(kind) <= _MAX_KEYPART_BYTES):
raise ValueError('Key kind string must be a non-empty string up to %i'
'bytes; received %s' %
(_MAX_KEYPART_BYTES, kind))
elem = path.add_element()
elem.set_type(kind)
t = type(idorname)
if t is int or t is long:
if not (1 <= idorname < _MAX_LONG):
raise ValueError('Key id number is too long; received %i' % idorname)
elem.set_id(idorname)
elif t is str:
if not (1 <= len(idorname) <= _MAX_KEYPART_BYTES):
raise ValueError('Key name strings must be non-empty strings up to %i '
'bytes; received %s' %
(_MAX_KEYPART_BYTES, idorname))
elem.set_name(idorname)
elif t is unicode:
idorname = idorname.encode('utf8')
if not (1 <= len(idorname) <= _MAX_KEYPART_BYTES):
raise ValueError('Key name unicode strings must be non-empty strings up'
' to %i bytes; received %s' %
(_MAX_KEYPART_BYTES, idorname))
elem.set_name(idorname)
elif idorname is None:
elem.set_id(0)
last = True
elif issubclass(t, (int, long)):
if not (1 <= idorname < _MAX_LONG):
raise ValueError('Key id number is too long; received %i' % idorname)
elem.set_id(idorname)
elif issubclass(t, basestring):
if issubclass(t, unicode):
idorname = idorname.encode('utf8')
if not (1 <= len(idorname) <= _MAX_KEYPART_BYTES):
raise ValueError('Key name strings must be non-empty strings up to %i '
'bytes; received %s' % (_MAX_KEYPART_BYTES, idorname))
elem.set_name(idorname)
else:
raise TypeError('id must be either a numeric id or a string name; '
'received %r' % idorname)
# An empty app id means to use the default app id.
if not app:
app = _DefaultAppId()
# Always set the app id, since it is mandatory.
reference.set_app(app)
# An empty namespace overrides the default namespace.
if namespace is None:
namespace = _DefaultNamespace()
# Only set the namespace if it is not empty.
if namespace:
reference.set_name_space(namespace)
return reference
def _ReferenceFromReference(reference):
"""Copy a Reference."""
new_reference = entity_pb.Reference()
new_reference.CopyFrom(reference)
return new_reference
def _ReferenceFromSerialized(serialized):
"""Construct a Reference from a serialized Reference."""
if not isinstance(serialized, basestring):
raise TypeError('serialized must be a string; received %r' % serialized)
elif isinstance(serialized, unicode):
serialized = serialized.encode('utf8')
return entity_pb.Reference(serialized)
def _DecodeUrlSafe(urlsafe):
"""Decode a url-safe base64-encoded string.
This returns the decoded string.
"""
if not isinstance(urlsafe, basestring):
raise TypeError('urlsafe must be a string; received %r' % urlsafe)
if isinstance(urlsafe, unicode):
urlsafe = urlsafe.encode('utf8')
mod = len(urlsafe) % 4
if mod:
urlsafe += '=' * (4 - mod)
# This is 3-4x faster than urlsafe_b64decode()
return base64.b64decode(urlsafe.replace('-', '+').replace('_', '/'))
def _DefaultAppId():
"""Return the default application id.
This is taken from the APPLICATION_ID environment variable.
"""
return os.getenv('APPLICATION_ID', '_')
def _DefaultNamespace():
"""Return the default namespace.
This is taken from the namespace manager.
"""
return namespace_manager.get_namespace()
| Python |
"""Tests for key.py."""
import base64
import pickle
import unittest
from .google_imports import datastore_errors
from .google_imports import datastore_types
from .google_imports import entity_pb
from . import eventloop
from . import key
from . import model
from . import tasklets
from . import test_utils
class KeyTests(test_utils.NDBTest):
the_module = key
def testShort(self):
k0 = key.Key('Kind', None)
self.assertEqual(k0.flat(), ('Kind', None))
k1 = key.Key('Kind', 1)
self.assertEqual(k1.flat(), ('Kind', 1))
k2 = key.Key('Parent', 42, 'Kind', 1)
self.assertEqual(k2.flat(), ('Parent', 42, 'Kind', 1))
def testFlat(self):
flat = ('Kind', 1)
pairs = tuple((flat[i], flat[i + 1]) for i in xrange(0, len(flat), 2))
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Kind')
def testFlatLong(self):
flat = ('Kind', 1, 'Subkind', 'foobar')
pairs = tuple((flat[i], flat[i + 1]) for i in xrange(0, len(flat), 2))
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Subkind')
def testSerialized(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
r = entity_pb.Reference()
r.set_app('_')
e = r.mutable_path().add_element()
e.set_type(flat[0])
e.set_id(flat[1])
e = r.mutable_path().add_element()
e.set_type(flat[2])
e.set_name(flat[3])
serialized = r.Encode()
urlsafe = base64.urlsafe_b64encode(r.Encode()).rstrip('=')
k = key.Key(flat=flat)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(urlsafe=urlsafe)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(serialized=serialized)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r)
self.assertTrue(k.reference() is not r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r, app=r.app(), namespace='')
self.assertTrue(k.reference() is not r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k1 = key.Key('A', 1)
self.assertEqual(k1.urlsafe(), 'agFfcgcLEgFBGAEM')
k2 = key.Key(urlsafe=k1.urlsafe())
self.assertEqual(k1, k2)
def testId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.id(), 'foo')
k2 = key.Key('Subkind', 42, parent=k1)
self.assertEqual(k2.id(), 42)
k3 = key.Key('Subkind', 'bar', parent=k2)
self.assertEqual(k3.id(), 'bar')
# incomplete key
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.id(), None)
def testStringId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.string_id(), 'foo')
k2 = key.Key('Subkind', 'bar', parent=k1)
self.assertEqual(k2.string_id(), 'bar')
k3 = key.Key('Subkind', 42, parent=k2)
self.assertEqual(k3.string_id(), None)
# incomplete key
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.string_id(), None)
def testIntegerId(self):
k1 = key.Key('Kind', 42, app='app1', namespace='ns1')
self.assertEqual(k1.integer_id(), 42)
k2 = key.Key('Subkind', 43, parent=k1)
self.assertEqual(k2.integer_id(), 43)
k3 = key.Key('Subkind', 'foobar', parent=k2)
self.assertEqual(k3.integer_id(), None)
# incomplete key
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.integer_id(), None)
def testParent(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.parent(), None)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.parent(), p)
k = key.Key('Subkind', 'foobar', parent=p,
app=p.app(), namespace=p.namespace())
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.parent(), p)
def testRoot(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.root(), p)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.root(), p)
k2 = key.Key('Subsubkind', 42, parent=k,
app=p.app(), namespace=p.namespace())
self.assertEqual(k2.flat(), ('Kind', 1,
'Subkind', 'foobar',
'Subsubkind', 42))
self.assertEqual(k2.root(), p)
def testRepr_Inferior(self):
k = key.Key('Kind', 1L, 'Subkind', 'foobar')
self.assertEqual(repr(k),
"Key('Kind', 1, 'Subkind', 'foobar')")
self.assertEqual(repr(k), str(k))
def testRepr_Toplevel(self):
k = key.Key('Kind', 1)
self.assertEqual(repr(k), "Key('Kind', 1)")
def testRepr_Incomplete(self):
k = key.Key('Kind', None)
self.assertEqual(repr(k), "Key('Kind', None)")
def testRepr_UnicodeKind(self):
k = key.Key(u'\u1234', 1)
self.assertEqual(repr(k), "Key('\\xe1\\x88\\xb4', 1)")
def testRepr_UnicodeId(self):
k = key.Key('Kind', u'\u1234')
self.assertEqual(repr(k), "Key('Kind', '\\xe1\\x88\\xb4')")
def testRepr_App(self):
k = key.Key('Kind', 1, app='foo')
self.assertEqual(repr(k), "Key('Kind', 1, app='foo')")
def testRepr_Namespace(self):
k = key.Key('Kind', 1, namespace='foo')
self.assertEqual(repr(k), "Key('Kind', 1, namespace='foo')")
def testUnicode(self):
flat_input = (u'Kind\u1234', 1, 'Subkind', u'foobar\u4321')
flat = (flat_input[0].encode('utf8'), flat_input[1],
flat_input[2], flat_input[3].encode('utf8'))
pairs = tuple((flat[i], flat[i + 1]) for i in xrange(0, len(flat), 2))
k = key.Key(flat=flat_input)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
# TODO: test these more thoroughly
r = k.reference()
serialized = k.serialized()
urlsafe = k.urlsafe()
key.Key(urlsafe=urlsafe.decode('utf8'))
key.Key(serialized=serialized.decode('utf8'))
key.Key(reference=r)
# TODO: this may not make sense -- the protobuf utf8-encodes values
r = entity_pb.Reference()
r.set_app('_')
e = r.mutable_path().add_element()
e.set_type(flat_input[0])
e.set_name(flat_input[3])
k = key.Key(reference=r)
self.assertEqual(k.reference(), r)
def testHash(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
pairs = [(flat[i], flat[i + 1]) for i in xrange(0, len(flat), 2)]
k = key.Key(flat=flat)
self.assertEqual(hash(k), hash(tuple(pairs)))
def testPickling(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
k = key.Key(flat=flat)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(k, protocol=proto)
kk = pickle.loads(s)
self.assertEqual(k, kk)
def testIncomplete(self):
key.Key(flat=['Kind', None])
self.assertRaises(datastore_errors.BadArgumentError,
key.Key, flat=['Kind', None, 'Subkind', 1])
self.assertRaises(TypeError, key.Key, flat=['Kind', ()])
def testKindFromModel(self):
class M(model.Model):
pass
class N(model.Model):
@classmethod
def _get_kind(cls):
return 'NN'
k = key.Key(M, 1)
self.assertEqual(k, key.Key('M', 1))
k = key.Key('X', 1, N, 2, 'Y', 3)
self.assertEqual(k, key.Key('X', 1, 'NN', 2, 'Y', 3))
def testKindFromBadValue(self):
# TODO: BadArgumentError
self.assertRaises(Exception, key.Key, 42, 42)
def testDeleteHooksCalled(self):
test = self # Closure for inside hook
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
@classmethod
def _pre_delete_hook(cls, key):
test.pre_counter += 1
if test.pre_counter == 1: # Cannot test for key in delete_multi
self.assertEqual(self.key, key)
@classmethod
def _post_delete_hook(cls, key, future):
test.post_counter += 1
self.assertEqual(self.key, key)
self.assertTrue(future.get_result() is None)
furniture = HatStand()
key = furniture.put()
self.key = key
self.assertEqual(self.pre_counter, 0, 'Pre delete hook called early')
future = key.delete_async()
self.assertEqual(self.pre_counter, 1, 'Pre delete hook not called')
self.assertEqual(self.post_counter, 0, 'Post delete hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post delete hook not called')
# All counters now read 1, calling delete_multi for 10 keys makes this 11
new_furniture = [HatStand() for _ in range(10)]
keys = [furniture.put() for furniture in new_furniture] # Sequential keys
multi_future = model.delete_multi_async(keys)
self.assertEqual(self.pre_counter, 11,
'Pre delete hooks not called on delete_multi')
self.assertEqual(self.post_counter, 1,
'Post delete hooks called early on delete_multi')
for fut, key in zip(multi_future, keys):
self.key = key
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post delete hooks not called on delete_multi')
def testNoDefaultDeleteCallback(self):
# See issue 58. http://goo.gl/hPN6j
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
class EmptyModel(model.Model):
pass
entity = EmptyModel()
entity.put()
fut = entity.key.delete_async()
self.assertFalse(fut._immediate_callbacks,
'Delete hook queued default no-op.')
def testGetHooksCalled(self):
test = self # Closure for inside hook
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, key):
test.pre_counter += 1
if test.pre_counter == 1: # Cannot test for key in get_multi
self.assertEqual(key, self.key)
@classmethod
def _post_get_hook(cls, key, future):
test.post_counter += 1
self.assertEqual(key, self.key)
self.assertEqual(future.get_result(), self.entity)
furniture = HatStand()
self.entity = furniture
key = furniture.put()
self.key = key
self.assertEqual(self.pre_counter, 0, 'Pre get hook called early')
future = key.get_async()
self.assertEqual(self.pre_counter, 1, 'Pre get hook not called')
self.assertEqual(self.post_counter, 0, 'Post get hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post get hook not called')
# All counters now read 1, calling get for 10 keys should make this 11
new_furniture = [HatStand() for _ in range(10)]
keys = [furniture.put() for furniture in new_furniture] # Sequential keys
multi_future = model.get_multi_async(keys)
self.assertEqual(self.pre_counter, 11,
'Pre get hooks not called on get_multi')
self.assertEqual(self.post_counter, 1,
'Post get hooks called early on get_multi')
for fut, key, entity in zip(multi_future, keys, new_furniture):
self.key = key
self.entity = entity
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post get hooks not called on get_multi')
def testMonkeyPatchHooks(self):
hook_attr_names = ('_pre_get_hook', '_post_get_hook',
'_pre_delete_hook', '_post_delete_hook')
original_hooks = {}
# Backup the original hooks
for name in hook_attr_names:
original_hooks[name] = getattr(model.Model, name)
self.pre_get_flag = False
self.post_get_flag = False
self.pre_delete_flag = False
self.post_delete_flag = False
# TODO: Should the unused arguments to Monkey Patched tests be tested?
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, unused_key):
self.pre_get_flag = True
@classmethod
def _post_get_hook(cls, unused_key, unused_future):
self.post_get_flag = True
@classmethod
def _pre_delete_hook(cls, unused_key):
self.pre_delete_flag = True
@classmethod
def _post_delete_hook(cls, unused_key, unused_future):
self.post_delete_flag = True
# Monkey patch the hooks
for name in hook_attr_names:
hook = getattr(HatStand, name)
setattr(model.Model, name, hook)
try:
key = HatStand().put()
key.get()
self.assertTrue(self.pre_get_flag,
'Pre get hook not called when model is monkey patched')
self.assertTrue(self.post_get_flag,
'Post get hook not called when model is monkey patched')
key.delete()
self.assertTrue(self.pre_delete_flag,
'Pre delete hook not called when model is monkey patched')
self.assertTrue(self.post_delete_flag,
'Post delete hook not called when model is monkey patched')
finally:
# Restore the original hooks
for name in hook_attr_names:
setattr(model.Model, name, original_hooks[name])
def testPreHooksCannotCancelRPC(self):
class Foo(model.Model):
@classmethod
def _pre_get_hook(cls, unused_key):
raise tasklets.Return()
@classmethod
def _pre_delete_hook(cls, unused_key):
raise tasklets.Return()
entity = Foo()
entity.put()
self.assertRaises(tasklets.Return, entity.key.get)
self.assertRaises(tasklets.Return, entity.key.delete)
def testNoDefaultGetCallback(self):
# See issue 58. http://goo.gl/hPN6j
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
class EmptyModel(model.Model):
pass
entity = EmptyModel()
entity.put()
fut = entity.key.get_async()
self.assertFalse(fut._immediate_callbacks, 'Get hook queued default no-op.')
def testFromOldKey(self):
old_key = datastore_types.Key.from_path('TestKey', 1234)
new_key = key.Key.from_old_key(old_key)
self.assertEquals(str(old_key), new_key.urlsafe())
old_key2 = new_key.to_old_key()
self.assertEquals(old_key, old_key2)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""NDB -- A new datastore API for the Google App Engine Python runtime."""
__version__ = '0.9.9'
__all__ = []
from tasklets import *
__all__ += tasklets.__all__
from model import * # This implies key.*
__all__ += model.__all__
from query import *
__all__ += query.__all__
from context import *
__all__ += context.__all__
| Python |
"""Tests for blobstore.py."""
import cgi
import cStringIO
import datetime
import pickle
import unittest
from .google_imports import namespace_manager
from .google_imports import datastore_types
from . import blobstore
from . import model
from . import tasklets
from . import test_utils
class BlobstoreTests(test_utils.NDBTest):
def setUp(self):
super(BlobstoreTests, self).setUp()
self.testbed.init_blobstore_stub()
the_module = blobstore
def testConstants(self):
# This intentionally hardcodes the values. I'd like to know when
# they change.
self.assertEqual(blobstore.BLOB_INFO_KIND, '__BlobInfo__')
self.assertEqual(blobstore.BLOB_MIGRATION_KIND, '__BlobMigration__')
self.assertEqual(blobstore.BLOB_KEY_HEADER, 'X-AppEngine-BlobKey')
self.assertEqual(blobstore.BLOB_RANGE_HEADER, 'X-AppEngine-BlobRange')
self.assertEqual(blobstore.UPLOAD_INFO_CREATION_HEADER,
'X-AppEngine-Upload-Creation')
self.assertEqual(blobstore.MAX_BLOB_FETCH_SIZE, 1015808)
def testExceptions(self):
self.assertTrue(issubclass(blobstore.Error, Exception))
self.assertTrue(issubclass(blobstore.InternalError, blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobFetchSizeTooLargeError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobNotFoundError, blobstore.Error))
self.assertTrue(issubclass(blobstore.DataIndexOutOfRangeError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.PermissionDeniedError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobInfoParseError, blobstore.Error))
def create_blobinfo(self, blobkey):
"""Handcraft a dummy BlobInfo."""
b = blobstore.BlobInfo(key=model.Key(blobstore.BLOB_INFO_KIND, blobkey),
content_type='text/plain',
creation=datetime.datetime(2012, 1, 24, 8, 15, 0),
filename='hello.txt',
size=42,
md5_hash='xxx')
model.Model._put_async(b).check_success()
return b
def testBlobInfo(self):
b = self.create_blobinfo('dummy')
self.assertEqual(b._get_kind(), blobstore.BLOB_INFO_KIND)
self.assertEqual(b.key(), blobstore.BlobKey('dummy'))
self.assertEqual(b.content_type, 'text/plain')
self.assertEqual(b.creation, datetime.datetime(2012, 1, 24, 8, 15, 0))
self.assertEqual(b.filename, 'hello.txt')
self.assertEqual(b.md5_hash, 'xxx')
def testBlobInfo_PutErrors(self):
b = self.create_blobinfo('dummy')
self.assertRaises(Exception, b.put)
self.assertRaises(Exception, b.put_async)
self.assertRaises(Exception, model.put_multi, [b])
self.assertRaises(Exception, model.put_multi_async, [b])
def testBlobInfo_Get(self):
b = self.create_blobinfo('dummy')
c = blobstore.BlobInfo.get(b.key())
self.assertEqual(c, b)
self.assertTrue(c is not b)
c = blobstore.BlobInfo.get('dummy')
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobInfo_GetAsync(self):
b = self.create_blobinfo('dummy')
cf = blobstore.BlobInfo.get_async(b.key())
self.assertTrue(isinstance(cf, tasklets.Future))
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
df = blobstore.BlobInfo.get_async(str(b.key()))
self.assertTrue(isinstance(df, tasklets.Future))
d = df.get_result()
self.assertEqual(d, b)
self.assertTrue(d is not b)
def testBlobInfo_GetMulti(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
d, e = blobstore.BlobInfo.get_multi([b.key(), str(c.key())])
self.assertEqual(d, b)
self.assertEqual(e, c)
def testBlobInfo_GetMultiAsync(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
df, ef = blobstore.BlobInfo.get_multi_async([str(b.key()), c.key()])
self.assertTrue(isinstance(df, tasklets.Future))
self.assertTrue(isinstance(ef, tasklets.Future))
d, e = df.get_result(), ef.get_result()
self.assertEqual(d, b)
self.assertEqual(e, c)
def testBlobInfo_Delete(self):
b = self.create_blobinfo('dummy')
c = blobstore.get(b._key.id())
self.assertEqual(c, b)
b.delete()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobInfo_DeleteAsync(self):
b = self.create_blobinfo('dummy')
df = b.delete_async()
self.assertTrue(isinstance(df, tasklets.Future), df)
df.get_result()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_Get(self):
b = self.create_blobinfo('dummy')
c = blobstore.get(b.key())
self.assertEqual(c, b)
self.assertTrue(c is not b)
c = blobstore.get('dummy')
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobstore_GetAsync(self):
b = self.create_blobinfo('dummy')
cf = blobstore.get_async(b.key())
self.assertTrue(isinstance(cf, tasklets.Future))
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
cf = blobstore.get_async('dummy')
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobstore_Delete(self):
b = self.create_blobinfo('dummy')
blobstore.delete(b.key())
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_DeleteAsync(self):
b = self.create_blobinfo('dummy')
df = blobstore.delete_async(b.key())
self.assertTrue(isinstance(df, tasklets.Future), df)
df.get_result()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_DeleteMulti(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
blobstore.delete_multi([b.key(), str(c.key())])
d, e = blobstore.get_multi([b.key(), str(c.key())])
self.assertEqual(d, None)
self.assertEqual(e, None)
def testBlobstore_DeleteMultiAsync(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
f = blobstore.delete_multi_async([b.key(), str(c.key())])
self.assertTrue(isinstance(f, tasklets.Future), f)
f.get_result()
d, e = blobstore.get_multi([b.key(), str(c.key())])
self.assertEqual(d, None)
self.assertEqual(e, None)
def testBlobstore_CreateUploadUrl(self):
url = blobstore.create_upload_url('/foo')
self.assertTrue('/_ah/upload/' in url, url)
def testBlobstore_CreateUploadUrlAsync(self):
urlf = blobstore.create_upload_url_async('/foo')
self.assertTrue(isinstance(urlf, tasklets.Future), urlf)
url = urlf.get_result()
self.assertTrue('/_ah/upload/' in url, url)
def testBlobstore_ParseBlobInfo_Errors(self):
nope = blobstore.parse_blob_info(None)
self.assertEqual(nope, None)
env = {'REQUEST_METHOD': 'POST'}
hdrs = {'content-disposition': 'blah; filename=hello.txt; name=hello',
'content-type': 'text/plain; blob-key=xxx'}
fd = cStringIO.StringIO(
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: hello\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: BLAH-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
def testBlobstore_ParseBlobInfo(self):
env = {'REQUEST_METHOD': 'POST'}
hdrs = {'content-disposition': 'blah; filename=hello.txt; name=hello',
'content-type': 'text/plain; blob-key=xxx'}
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
bi = blobstore.parse_blob_info(fs)
self.assertTrue(isinstance(bi, blobstore.BlobInfo))
self.assertEqual(
bi,
blobstore.BlobInfo(key=model.Key(blobstore.BlobInfo, 'xxx'),
content_type='image/jpeg',
creation=datetime.datetime(2012, 1, 24, 17, 35),
filename='hello.txt',
md5_hash='xxx',
size=42))
def testBlobstore_FetchData(self):
self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
result = blobstore.fetch_data('xxx', 0, 3) # Range is inclusive!
self.assertEqual(result, 'abcd')
def testBlobstore_FetchDataAsync(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
fut = blobstore.fetch_data_async(b, 0, 2)
self.assertTrue(isinstance(fut, tasklets.Future), fut)
result = fut.get_result()
self.assertEqual(result, 'abc')
def testBlobInfo_Open(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
f = b.open()
self.assertEqual(f.read(3), 'abc')
self.assertEqual(f.read(3), 'de')
self.assertEqual(f.blob_info, b)
def testBlobReader(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
f = blobstore.BlobReader('xxx')
self.assertEqual(f.read(), 'abcde')
self.assertEqual(f.blob_info, b)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Model and Property classes and associated stuff.
A model class represents the structure of entities stored in the
datastore. Applications define model classes to indicate the
structure of their entities, then instantiate those model classes
to create entities.
All model classes must inherit (directly or indirectly) from Model.
Through the magic of metaclasses, straightforward assignments in the
model class definition can be used to declare the model's structure:
class Person(Model):
name = StringProperty()
age = IntegerProperty()
We can now create a Person entity and write it to the datastore:
p = Person(name='Arthur Dent', age=42)
k = p.put()
The return value from put() is a Key (see the documentation for
ndb/key.py), which can be used to retrieve the same entity later:
p2 = k.get()
p2 == p # Returns True
To update an entity, simple change its attributes and write it back
(note that this doesn't change the key):
p2.name = 'Arthur Philip Dent'
p2.put()
We can also delete an entity (by using the key):
k.delete()
The property definitions in the class body tell the system the names
and the types of the fields to be stored in the datastore, whether
they must be indexed, their default value, and more.
Many different Property types exist. Most are indexed by default, the
exceptions indicated in the list below:
- StringProperty: a short text string, limited to 500 bytes
- TextProperty: an unlimited text string; unindexed
- BlobProperty: an unlimited byte string; unindexed
- IntegerProperty: a 64-bit signed integer
- FloatProperty: a double precision floating point number
- BooleanProperty: a bool value
- DateTimeProperty: a datetime object. Note: App Engine always uses
UTC as the timezone
- DateProperty: a date object
- TimeProperty: a time object
- GeoPtProperty: a geographical location, i.e. (latitude, longitude)
- KeyProperty: a datastore Key value, optionally constrained to
referring to a specific kind
- UserProperty: a User object (for backwards compatibility only)
- StructuredProperty: a field that is itself structured like an
entity; see below for more details
- LocalStructuredProperty: like StructuredProperty but the on-disk
representation is an opaque blob; unindexed
- ComputedProperty: a property whose value is computed from other
properties by a user-defined function. The property value is
written to the datastore so that it can be used in queries, but the
value from the datastore is not used when the entity is read back
- GenericProperty: a property whose type is not constrained; mostly
used by the Expando class (see below) but also usable explicitly
- JsonProperty: a property whose value is any object that can be
serialized using JSON; the value written to the datastore is a JSON
representation of that object
- PickleProperty: a property whose value is any object that can be
serialized using Python's pickle protocol; the value written to the
datastore is the pickled representation of that object, using the
highest available pickle protocol
Most Property classes have similar constructor signatures. They
accept several optional keyword arguments:
- name=<string>: the name used to store the property value in the
datastore. Unlike the following options, this may also be given as
a positional argument
- indexed=<bool>: indicates whether the property should be indexed
(allowing queries on this property's value)
- repeated=<bool>: indicates that this property can have multiple
values in the same entity.
- required=<bool>: indicates that this property must be given a value
- default=<value>: a default value if no explicit value is given
- choices=<list of values>: a list or tuple of allowable values
- validator=<function>: a general-purpose validation function. It
will be called with two arguments (prop, value) and should either
return the validated value or raise an exception. It is also
allowed for the function to modify the value, but calling it again
on the modified value should not modify the value further. (For
example: a validator that returns value.strip() or value.lower() is
fine, but one that returns value + '$' is not.)
- verbose_name=<value>: A human readable name for this property. This
human readable name can be used for html form labels.
The repeated, required and default options are mutually exclusive: a
repeated property cannot be required nor can it specify a default
value (the default is always an empty list and an empty list is always
an allowed value), and a required property cannot have a default.
Some property types have additional arguments. Some property types
do not support all options.
Repeated properties are always represented as Python lists; if there
is only one value, the list has only one element. When a new list is
assigned to a repeated property, all elements of the list are
validated. Since it is also possible to mutate lists in place,
repeated properties are re-validated before they are written to the
datastore.
No validation happens when an entity is read from the datastore;
however property values read that have the wrong type (e.g. a string
value for an IntegerProperty) are ignored.
For non-repeated properties, None is always a possible value, and no
validation is called when the value is set to None. However for
required properties, writing the entity to the datastore requires
the value to be something other than None (and valid).
The StructuredProperty is different from most other properties; it
lets you define a sub-structure for your entities. The substructure
itself is defined using a model class, and the attribute value is an
instance of that model class. However it is not stored in the
datastore as a separate entity; instead, its attribute values are
included in the parent entity using a naming convention (the name of
the structured attribute followed by a dot followed by the name of the
subattribute). For example:
class Address(Model):
street = StringProperty()
city = StringProperty()
class Person(Model):
name = StringProperty()
address = StructuredProperty(Address)
p = Person(name='Harry Potter',
address=Address(street='4 Privet Drive',
city='Little Whinging'))
k.put()
This would write a single 'Person' entity with three attributes (as
you could verify using the Datastore Viewer in the Admin Console):
name = 'Harry Potter'
address.street = '4 Privet Drive'
address.city = 'Little Whinging'
Structured property types can be nested arbitrarily deep, but in a
hierarchy of nested structured property types, only one level can have
the repeated flag set. It is fine to have multiple structured
properties referencing the same model class.
It is also fine to use the same model class both as a top-level entity
class and as for a structured property; however queries for the model
class will only return the top-level entities.
The LocalStructuredProperty works similar to StructuredProperty on the
Python side. For example:
class Address(Model):
street = StringProperty()
city = StringProperty()
class Person(Model):
name = StringProperty()
address = LocalStructuredProperty(Address)
p = Person(name='Harry Potter',
address=Address(street='4 Privet Drive',
city='Little Whinging'))
k.put()
However the data written to the datastore is different; it writes a
'Person' entity with a 'name' attribute as before and a single
'address' attribute whose value is a blob which encodes the Address
value (using the standard"protocol buffer" encoding).
Sometimes the set of properties is not known ahead of time. In such
cases you can use the Expando class. This is a Model subclass that
creates properties on the fly, both upon assignment and when loading
an entity from the datastore. For example:
class SuperPerson(Expando):
name = StringProperty()
superpower = StringProperty()
razorgirl = SuperPerson(name='Molly Millions',
superpower='bionic eyes, razorblade hands',
rasta_name='Steppin\' Razor',
alt_name='Sally Shears')
elastigirl = SuperPerson(name='Helen Parr',
superpower='stretchable body')
elastigirl.max_stretch = 30 # Meters
You can inspect the properties of an expando instance using the
_properties attribute:
>>> print razorgirl._properties.keys()
['rasta_name', 'name', 'superpower', 'alt_name']
>>> print elastigirl._properties
{'max_stretch': GenericProperty('max_stretch'),
'name': StringProperty('name'),
'superpower': StringProperty('superpower')}
Note: this property exists for plain Model instances too; it is just
not as interesting for those.
The Model class offers basic query support. You can create a Query
object by calling the query() class method. Iterating over a Query
object returns the entities matching the query one at a time.
Query objects are fully described in the docstring for query.py, but
there is one handy shortcut that is only available through
Model.query(): positional arguments are interpreted as filter
expressions which are combined through an AND operator. For example:
Person.query(Person.name == 'Harry Potter', Person.age >= 11)
is equivalent to:
Person.query().filter(Person.name == 'Harry Potter', Person.age >= 11)
Keyword arguments passed to .query() are passed along to the Query()
constructor.
It is possible to query for field values of stuctured properties. For
example:
qry = Person.query(Person.address.city == 'London')
A number of top-level functions also live in this module:
- transaction() runs a function inside a transaction
- get_multi() reads multiple entities at once
- put_multi() writes multiple entities at once
- delete_multi() deletes multiple entities at once
All these have a corresponding *_async() variant as well.
The *_multi_async() functions return a list of Futures.
And finally these (without async variants):
- in_transaction() tests whether you are currently running in a transaction
- @transactional decorates functions that should be run in a transaction
There are many other interesting features. For example, Model
subclasses may define pre-call and post-call hooks for most operations
(get, put, delete, allocate_ids), and Property classes may be
subclassed to suit various needs. Documentation for writing a
Property subclass is in the docstring for the Property class.
"""
__author__ = 'guido@google.com (Guido van Rossum)'
import copy
import cPickle as pickle
import datetime
import zlib
from .google_imports import datastore_errors
from .google_imports import datastore_types
from .google_imports import users
from .google_imports import datastore_query
from .google_imports import datastore_rpc
from .google_imports import entity_pb
from . import utils
# NOTE: 'key' is a common local variable name.
from . import key as key_module
Key = key_module.Key # For export.
# NOTE: Property and Error classes are added later.
__all__ = ['Key', 'BlobKey', 'GeoPt', 'Rollback',
'Index', 'IndexState', 'IndexProperty',
'ModelAdapter', 'ModelAttribute',
'ModelKey', 'MetaModel', 'Model', 'Expando',
'transaction', 'transaction_async',
'in_transaction', 'transactional',
'get_multi', 'get_multi_async',
'put_multi', 'put_multi_async',
'delete_multi', 'delete_multi_async',
'get_indexes', 'get_indexes_async',
'make_connection',
]
BlobKey = datastore_types.BlobKey
GeoPt = datastore_types.GeoPt
Rollback = datastore_errors.Rollback
class KindError(datastore_errors.BadValueError):
"""Raised when an implementation for a kind can't be found.
Also raised when the Kind is not an 8-bit string.
"""
class ComputedPropertyError(datastore_errors.Error):
"""Raised when attempting to assign a value to a computed property."""
# Various imported limits.
_MAX_LONG = key_module._MAX_LONG
_MAX_STRING_LENGTH = datastore_types._MAX_STRING_LENGTH
# Map index directions to human-readable strings.
_DIR_MAP = {
entity_pb.Index_Property.ASCENDING: 'asc',
entity_pb.Index_Property.DESCENDING: 'desc',
}
# Map index states to human-readable strings.
_STATE_MAP = {
entity_pb.CompositeIndex.ERROR: 'error',
entity_pb.CompositeIndex.DELETED: 'deleting',
entity_pb.CompositeIndex.READ_WRITE: 'serving',
entity_pb.CompositeIndex.WRITE_ONLY: 'building',
}
class _NotEqualMixin(object):
"""Mix-in class that implements __ne__ in terms of __eq__."""
def __ne__(self, other):
"""Implement self != other as not(self == other)."""
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
class IndexProperty(_NotEqualMixin):
"""Immutable object representing a single property in an index."""
@utils.positional(1)
def __new__(cls, name, direction):
"""Constructor."""
obj = object.__new__(cls)
obj.__name = name
obj.__direction = direction
return obj
@property
def name(self):
"""The property name being indexed, a string."""
return self.__name
@property
def direction(self):
"""The direction in the index for this property, 'asc' or 'desc'."""
return self.__direction
def __repr__(self):
"""Return a string representation."""
return '%s(name=%r, direction=%r)' % (self.__class__.__name__,
self.name,
self.direction)
def __eq__(self, other):
"""Compare two index properties for equality."""
if not isinstance(other, IndexProperty):
return NotImplemented
return self.name == other.name and self.direction == other.direction
def __hash__(self):
return hash((self.name, self.direction))
class Index(_NotEqualMixin):
"""Immutable object representing an index."""
@utils.positional(1)
def __new__(cls, kind, properties, ancestor):
"""Constructor."""
obj = object.__new__(cls)
obj.__kind = kind
obj.__properties = properties
obj.__ancestor = ancestor
return obj
@property
def kind(self):
"""The kind being indexed, a string."""
return self.__kind
@property
def properties(self):
"""A list of PropertyIndex objects giving the properties being indexed."""
return self.__properties
@property
def ancestor(self):
"""Whether this is an ancestor index, a bool."""
return self.__ancestor
def __repr__(self):
"""Return a string representation."""
parts = []
parts.append('kind=%r' % self.kind)
parts.append('properties=%r' % self.properties)
parts.append('ancestor=%s' % self.ancestor)
return '%s(%s)' % (self.__class__.__name__, ', '.join(parts))
def __eq__(self, other):
"""Compare two indexes."""
if not isinstance(other, Index):
return NotImplemented
return (self.kind == other.kind and
self.properties == other.properties and
self.ancestor == other.ancestor)
def __hash__(self):
return hash((self.kind, self.properties, self.ancestor))
class IndexState(_NotEqualMixin):
"""Immutable object representing and index and its state."""
@utils.positional(1)
def __new__(cls, definition, state, id):
"""Constructor."""
obj = object.__new__(cls)
obj.__definition = definition
obj.__state = state
obj.__id = id
return obj
@property
def definition(self):
"""An Index object describing the index."""
return self.__definition
@property
def state(self):
"""The index state, a string.
Possible values are 'error', 'deleting', 'serving' or 'building'.
"""
return self.__state
@property
def id(self):
"""The index ID, an integer."""
return self.__id
def __repr__(self):
"""Return a string representation."""
parts = []
parts.append('definition=%r' % self.definition)
parts.append('state=%r' % self.state)
parts.append('id=%d' % self.id)
return '%s(%s)' % (self.__class__.__name__, ', '.join(parts))
def __eq__(self, other):
"""Compare two index states."""
if not isinstance(other, IndexState):
return NotImplemented
return (self.definition == other.definition and
self.state == other.state and
self.id == other.id)
def __hash__(self):
return hash((self.definition, self.state, self.id))
class ModelAdapter(datastore_rpc.AbstractAdapter):
"""Conversions between 'our' Key and Model classes and protobufs.
This is needed to construct a Connection object, which in turn is
needed to construct a Context object.
See the base class docstring for more info about the signatures.
"""
def __init__(self, default_model=None):
"""Constructor.
Args:
default_model: If an implementation for the kind cannot be found, use
this model class. If none is specified, an exception will be thrown
(default).
"""
self.default_model = default_model
self.want_pbs = 0
# Make this a context manager to request setting _orig_pb.
# Used in query.py by _MultiQuery.run_to_queue().
def __enter__(self):
self.want_pbs += 1
def __exit__(self, *unused_args):
self.want_pbs -= 1
def pb_to_key(self, pb):
return Key(reference=pb)
def key_to_pb(self, key):
return key.reference()
def pb_to_entity(self, pb):
key = None
kind = None
if pb.has_key():
key = Key(reference=pb.key())
kind = key.kind()
modelclass = Model._kind_map.get(kind, self.default_model)
if modelclass is None:
raise KindError(
"No model class found for kind '%s'. Did you forget to import it?" %
kind)
entity = modelclass._from_pb(pb, key=key, set_key=False)
if self.want_pbs:
entity._orig_pb = pb
return entity
def entity_to_pb(self, ent):
pb = ent._to_pb()
return pb
def pb_to_index(self, pb):
index_def = pb.definition()
properties = [IndexProperty(name=prop.name(),
direction=_DIR_MAP[prop.direction()])
for prop in index_def.property_list()]
index = Index(kind=index_def.entity_type(),
properties=properties,
ancestor=bool(index_def.ancestor()),
)
index_state = IndexState(definition=index,
state=_STATE_MAP[pb.state()],
id=pb.id(),
)
return index_state
def make_connection(config=None, default_model=None):
"""Create a new Connection object with the right adapter.
Optionally you can pass in a datastore_rpc.Configuration object.
"""
return datastore_rpc.Connection(
adapter=ModelAdapter(default_model),
config=config)
class ModelAttribute(object):
"""A Base class signifying the presence of a _fix_up() method."""
def _fix_up(self, cls, code_name):
pass
class _BaseValue(_NotEqualMixin):
"""A marker object wrapping a 'base type' value.
This is used to be able to tell whether ent._values[name] is a
user value (i.e. of a type that the Python code understands) or a
base value (i.e of a type that serialization understands).
User values are unwrapped; base values are wrapped in a
_BaseValue instance.
"""
__slots__ = ['b_val']
def __init__(self, b_val):
"""Constructor. Argument is the base value to be wrapped."""
assert b_val is not None
assert not isinstance(b_val, list), repr(b_val)
self.b_val = b_val
def __repr__(self):
return '_BaseValue(%r)' % (self.b_val,)
def __eq__(self, other):
if not isinstance(other, _BaseValue):
return NotImplemented
return self.b_val == other.b_val
def __hash__(self):
raise TypeError('_BaseValue is not immutable')
class Property(ModelAttribute):
"""A class describing a typed, persisted attribute of a datastore entity.
Not to be confused with Python's 'property' built-in.
This is just a base class; there are specific subclasses that
describe Properties of various types (and GenericProperty which
describes a dynamically typed Property).
All special Property attributes, even those considered 'public',
have names starting with an underscore, because StructuredProperty
uses the non-underscore attribute namespace to refer to nested
Property names; this is essential for specifying queries on
subproperties (see the module docstring).
The Property class and its predefined subclasses allow easy
subclassing using composable (or stackable) validation and
conversion APIs. These require some terminology definitions:
- A 'user value' is a value such as would be set and accessed by the
application code using standard attributes on the entity.
- A 'base value' is a value such as would be serialized to
and deserialized from the datastore.
The values stored in ent._values[name] and accessed by
_store_value() and _retrieve_value() can be either user values or
base values. To retrieve user values, use
_get_user_value(). To retrieve base values, use
_get_base_value(). In particular, _get_value() calls
_get_user_value(), and _serialize() effectively calls
_get_base_value().
To store a user value, just call _store_value(). To store a
base value, wrap the value in a _BaseValue() and then
call _store_value().
A Property subclass that wants to implement a specific
transformation between user values and serialiazble values should
implement two methods, _to_base_type() and _from_base_type().
These should *NOT* call their super() method; super calls are taken
care of by _call_to_base_type() and _call_from_base_type().
This is what is meant by composable (or stackable) APIs.
The API supports 'stacking' classes with ever more sophisticated
user<-->base conversions: the user-->base conversion
goes from more sophisticated to less sophisticated, while the
base-->user conversion goes from less sophisticated to more
sophisticated. For example, see the relationship between
BlobProperty, TextProperty and StringProperty.
In addition to _to_base_type() and _from_base_type(), the
_validate() method is also a composable API.
The validation API distinguishes between 'lax' and 'strict' user
values. The set of lax values is a superset of the set of strict
values. The _validate() method takes a lax value and if necessary
converts it to a strict value. This means that when setting the
property value, lax values are accepted, while when getting the
property value, only strict values will be returned. If no
conversion is needed, _validate() may return None. If the argument
is outside the set of accepted lax values, _validate() should raise
an exception, preferably TypeError or
datastore_errors.BadValueError.
Example/boilerplate:
def _validate(self, value):
'Lax user value to strict user value.'
if not isinstance(value, <top type>):
raise TypeError(...) # Or datastore_errors.BadValueError(...).
def _to_base_type(self, value):
'(Strict) user value to base value.'
if isinstance(value, <user type>):
return <base type>(value)
def _from_base_type(self, value):
'base value to (strict) user value.'
if not isinstance(value, <base type>):
return <user type>(value)
Things that _validate(), _to_base_type() and _from_base_type()
do *not* need to handle:
- None: They will not be called with None (and if they return None,
this means that the value does not need conversion).
- Repeated values: The infrastructure (_get_user_value() and
_get_base_value()) takes care of calling
_from_base_type() or _to_base_type() for each list item in a
repeated value.
- Wrapping values in _BaseValue(): The wrapping and unwrapping is
taken care of by the infrastructure that calls the composable APIs.
- Comparisons: The comparison operations call _to_base_type() on
their operand.
- Distinguishing between user and base values: the
infrastructure guarantees that _from_base_type() will be called
with an (unwrapped) base value, and that
_to_base_type() will be called with a user value.
- Returning the original value: if any of these return None, the
original value is kept. (Returning a differen value not equal to
None will substitute the different value.)
"""
# TODO: Separate 'simple' properties from base Property class
_code_name = None
_name = None
_indexed = True
_repeated = False
_required = False
_default = None
_choices = None
_validator = None
_verbose_name = None
_attributes = ['_name', '_indexed', '_repeated', '_required', '_default',
'_choices', '_validator', '_verbose_name']
_positional = 1 # Only name is a positional argument.
@utils.positional(1 + _positional) # Add 1 for self.
def __init__(self, name=None, indexed=None, repeated=None,
required=None, default=None, choices=None, validator=None,
verbose_name=None):
"""Constructor. For arguments see the module docstring."""
if name is not None:
if isinstance(name, unicode):
name = name.encode('utf-8')
if not isinstance(name, str):
raise TypeError('Name %r is not a string' % (name,))
if '.' in name:
raise ValueError('Name %r cannot contain period characters' % (name,))
self._name = name
if indexed is not None:
self._indexed = indexed
if repeated is not None:
self._repeated = repeated
if required is not None:
self._required = required
if default is not None:
# TODO: Call _validate() on default?
self._default = default
if verbose_name is not None:
self._verbose_name = verbose_name
if (bool(self._repeated) +
bool(self._required) +
(self._default is not None)) > 1:
raise ValueError('repeated, required and default are mutally exclusive.')
if choices is not None:
if not isinstance(choices, (list, tuple, set, frozenset)):
raise TypeError('choices must be a list, tuple or set; received %r' %
choices)
# TODO: Call _validate() on each choice?
self._choices = frozenset(choices)
if validator is not None:
# The validator is called as follows:
# value = validator(prop, value)
# It should return the value to be used, or raise an exception.
# It should be idempotent, i.e. calling it a second time should
# not further modify the value. So a validator that returns e.g.
# value.lower() or value.strip() is fine, but one that returns
# value + '$' is not.
if not hasattr(validator, '__call__'):
raise TypeError('validator must be callable or None; received %r' %
validator)
self._validator = validator
def __repr__(self):
"""Return a compact unambiguous string representation of a property."""
args = []
cls = self.__class__
for i, attr in enumerate(self._attributes):
val = getattr(self, attr)
if val is not getattr(cls, attr):
if isinstance(val, type):
s = val.__name__
else:
s = repr(val)
if i >= cls._positional:
if attr.startswith('_'):
attr = attr[1:]
s = '%s=%s' % (attr, s)
args.append(s)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def _datastore_type(self, value):
"""Internal hook used by property filters.
Sometimes the low-level query interface needs a specific data type
in order for the right filter to be constructed. See _comparison().
"""
return value
def _comparison(self, op, value):
"""Internal helper for comparison operators.
Args:
op: The operator ('=', '<' etc.).
Returns:
A FilterNode instance representing the requested comparison.
"""
# NOTE: This is also used by query.gql().
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed property %s' % self._name)
from .query import FilterNode # Import late to avoid circular imports.
if value is not None:
value = self._do_validate(value)
value = self._call_to_base_type(value)
value = self._datastore_type(value)
return FilterNode(self._name, op, value)
# Comparison operators on Property instances don't compare the
# properties; instead they return FilterNode instances that can be
# used in queries. See the module docstrings above and in query.py
# for details on how these can be used.
def __eq__(self, value):
"""Return a FilterNode instance representing the '=' comparison."""
return self._comparison('=', value)
def __ne__(self, value):
"""Return a FilterNode instance representing the '!=' comparison."""
return self._comparison('!=', value)
def __lt__(self, value):
"""Return a FilterNode instance representing the '<' comparison."""
return self._comparison('<', value)
def __le__(self, value):
"""Return a FilterNode instance representing the '<=' comparison."""
return self._comparison('<=', value)
def __gt__(self, value):
"""Return a FilterNode instance representing the '>' comparison."""
return self._comparison('>', value)
def __ge__(self, value):
"""Return a FilterNode instance representing the '>=' comparison."""
return self._comparison('>=', value)
def _IN(self, value):
"""Comparison operator for the 'in' comparison operator.
The Python 'in' operator cannot be overloaded in the way we want
to, so we define a method. For example:
Employee.query(Employee.rank.IN([4, 5, 6]))
Note that the method is called ._IN() but may normally be invoked
as .IN(); ._IN() is provided for the case you have a
StructuredProperty with a model that has a Property named IN.
"""
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed property %s' % self._name)
from .query import FilterNode # Import late to avoid circular imports.
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadArgumentError(
'Expected list, tuple or set, got %r' % (value,))
values = []
for val in value:
if val is not None:
val = self._do_validate(val)
val = self._call_to_base_type(val)
val = self._datastore_type(val)
values.append(val)
return FilterNode(self._name, 'in', values)
IN = _IN
def __neg__(self):
"""Return a descending sort order on this Property.
For example:
Employee.query().order(-Employee.rank)
"""
return datastore_query.PropertyOrder(
self._name, datastore_query.PropertyOrder.DESCENDING)
def __pos__(self):
"""Return an ascending sort order on this Property.
Note that this is redundant but provided for consistency with
__neg__. For example, the following two are equivalent:
Employee.query().order(+Employee.rank)
Employee.query().order(Employee.rank)
"""
return datastore_query.PropertyOrder(self._name)
def _do_validate(self, value):
"""Call all validations on the value.
This calls the most derived _validate() method(s), then the custom
validator function, and then checks the choices. It returns the
value, possibly modified in an idempotent way, or raises an
exception.
Note that this does not call all composable _validate() methods.
It only calls _validate() methods up to but not including the
first _to_base_type() method, when the MRO is traversed looking
for _validate() and _to_base_type() methods. (IOW if a class
defines both _validate() and _to_base_type(), its _validate()
is called and then the search is aborted.)
Note that for a repeated Property this function should be called
for each item in the list, not for the list as a whole.
"""
if isinstance(value, _BaseValue):
return value
value = self._call_shallow_validation(value)
if self._validator is not None:
newvalue = self._validator(self, value)
if newvalue is not None:
value = newvalue
if self._choices is not None:
if value not in self._choices:
raise datastore_errors.BadValueError(
'Value %r for property %s is not an allowed choice' %
(value, self._name))
return value
def _fix_up(self, cls, code_name):
"""Internal helper called to tell the property its name.
This is called by _fix_up_properties() which is called by
MetaModel when finishing the construction of a Model subclass.
The name passed in is the name of the class attribute to which the
Property is assigned (a.k.a. the code name). Note that this means
that each Property instance must be assigned to (at most) one
class attribute. E.g. to declare three strings, you must call
StringProperty() three times, you cannot write
foo = bar = baz = StringProperty()
"""
self._code_name = code_name
if self._name is None:
self._name = code_name
def _store_value(self, entity, value):
"""Internal helper to store a value in an entity for a Property.
This assumes validation has already taken place. For a repeated
Property the value should be a list.
"""
entity._values[self._name] = value
def _set_value(self, entity, value):
"""Internal helper to set a value in an entity for a Property.
This performs validation first. For a repeated Property the value
should be a list.
"""
if self._repeated:
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadValueError('Expected list or tuple, got %r' %
(value,))
value = [self._do_validate(v) for v in value]
else:
if value is not None:
value = self._do_validate(value)
self._store_value(entity, value)
def _has_value(self, entity, unused_rest=None):
"""Internal helper to ask if the entity has a value for this Property."""
return self._name in entity._values
def _retrieve_value(self, entity, default=None):
"""Internal helper to retrieve the value for this Property from an entity.
This returns None if no value is set, or the default argument if
given. For a repeated Property this returns a list if a value is
set, otherwise None. No additional transformations are applied.
"""
return entity._values.get(self._name, default)
def _get_user_value(self, entity):
"""Return the user value for this property of the given entity.
This implies removing the _BaseValue() wrapper if present, and
if it is, calling all _from_base_type() methods, in the reverse
method resolution order of the property's class. It also handles
default values and repeated properties.
"""
return self._apply_to_values(entity, self._opt_call_from_base_type)
def _get_base_value(self, entity):
"""Return the base value for this property of the given entity.
This implies calling all _to_base_type() methods, in the method
resolution order of the property's class, and adding a
_BaseValue() wrapper, if one is not already present. (If one
is present, no work is done.) It also handles default values and
repeated properties.
"""
return self._apply_to_values(entity, self._opt_call_to_base_type)
# TODO: Invent a shorter name for this.
def _get_base_value_unwrapped_as_list(self, entity):
"""Like _get_base_value(), but always returns a list.
Returns:
A new list of unwrapped base values. For an unrepeated
property, if the value is missing or None, returns [None]; for a
repeated property, if the original value is missing or None or
empty, returns [].
"""
wrapped = self._get_base_value(entity)
if self._repeated:
if wrapped is None:
return []
assert isinstance(wrapped, list)
return [w.b_val for w in wrapped]
else:
if wrapped is None:
return [None]
assert isinstance(wrapped, _BaseValue)
return [wrapped.b_val]
def _opt_call_from_base_type(self, value):
"""Call _from_base_type() if necessary.
If the value is a _BaseValue instance, unwrap it and call all
_from_base_type() methods. Otherwise, return the value
unchanged.
"""
if isinstance(value, _BaseValue):
value = self._call_from_base_type(value.b_val)
return value
def _opt_call_to_base_type(self, value):
"""Call _to_base_type() if necessary.
If the value is a _BaseValue instance, return it unchanged.
Otherwise, call all _validate() and _to_base_type() methods and
wrap it in a _BaseValue instance.
"""
if not isinstance(value, _BaseValue):
value = _BaseValue(self._call_to_base_type(value))
return value
def _call_from_base_type(self, value):
"""Call all _from_base_type() methods on the value.
This calls the methods in the reverse method resolution order of
the property's class.
"""
methods = self._find_methods('_from_base_type', reverse=True)
call = self._apply_list(methods)
return call(value)
def _call_to_base_type(self, value):
"""Call all _validate() and _to_base_type() methods on the value.
This calls the methods in the method resolution order of the
property's class.
"""
methods = self._find_methods('_validate', '_to_base_type')
call = self._apply_list(methods)
return call(value)
def _call_shallow_validation(self, value):
"""Call the initial set of _validate() methods.
This is similar to _call_to_base_type() except it only calls
those _validate() methods that can be called without needing to
call _to_base_type().
An example: suppose the class hierarchy is A -> B -> C ->
Property, and suppose A defines _validate() only, but B and C
define _validate() and _to_base_type(). The full list of
methods called by _call_to_base_type() is:
A._validate()
B._validate()
B._to_base_type()
C._validate()
C._to_base_type()
This method will call A._validate() and B._validate() but not the
others.
"""
methods = []
for method in self._find_methods('_validate', '_to_base_type'):
if method.__name__ != '_validate':
break
methods.append(method)
call = self._apply_list(methods)
return call(value)
@classmethod
def _find_methods(cls, *names, **kwds):
"""Compute a list of composable methods.
Because this is a common operation and the class hierarchy is
static, the outcome is cached (assuming that for a particular list
of names the reversed flag is either always on, or always off).
Args:
*names: One or more method names.
reverse: Optional flag, default False; if True, the list is
reversed.
Returns:
A list of callable class method objects.
"""
reverse = kwds.pop('reverse', False)
assert not kwds, repr(kwds)
cache = cls.__dict__.get('_find_methods_cache')
if cache:
hit = cache.get(names)
if hit is not None:
return hit
else:
cls._find_methods_cache = cache = {}
methods = []
for c in cls.__mro__:
for name in names:
method = c.__dict__.get(name)
if method is not None:
methods.append(method)
if reverse:
methods.reverse()
cache[names] = methods
return methods
def _apply_list(self, methods):
"""Return a single callable that applies a list of methods to a value.
If a method returns None, the last value is kept; if it returns
some other value, that replaces the last value. Exceptions are
not caught.
"""
def call(value):
for method in methods:
newvalue = method(self, value)
if newvalue is not None:
value = newvalue
return value
return call
def _apply_to_values(self, entity, function):
"""Apply a function to the property value/values of a given entity.
This retrieves the property value, applies the function, and then
stores the value back. For a repeated property, the function is
applied separately to each of the values in the list. The
resulting value or list of values is both stored back in the
entity and returned from this method.
"""
value = self._retrieve_value(entity, self._default)
if self._repeated:
if value is None:
value = []
self._store_value(entity, value)
else:
value[:] = map(function, value)
else:
if value is not None:
newvalue = function(value)
if newvalue is not None and newvalue is not value:
self._store_value(entity, newvalue)
value = newvalue
return value
def _get_value(self, entity):
"""Internal helper to get the value for this Property from an entity.
For a repeated Property this initializes the value to an empty
list if it is not set.
"""
return self._get_user_value(entity)
def _delete_value(self, entity):
"""Internal helper to delete the value for this Property from an entity.
Note that if no value exists this is a no-op; deleted values will
not be serialized but requesting their value will return None (or
an empty list in the case of a repeated Property).
"""
if self._name in entity._values:
del entity._values[self._name]
def _is_initialized(self, entity):
"""Internal helper to ask if the entity has a value for this Property.
This returns False if a value is stored but it is None.
"""
return not self._required or (self._has_value(entity) and
self._get_value(entity) is not None)
def __get__(self, entity, unused_cls=None):
"""Descriptor protocol: get the value from the entity."""
if entity is None:
return self # __get__ called on class
return self._get_value(entity)
def __set__(self, entity, value):
"""Descriptor protocol: set the value on the entity."""
self._set_value(entity, value)
def __delete__(self, entity):
"""Descriptor protocol: delete the value from the entity."""
self._delete_value(entity)
def _serialize(self, entity, pb, prefix='', parent_repeated=False):
"""Internal helper to serialize this property to a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
pb: The protocol buffer, an EntityProto instance.
prefix: Optional name prefix used for StructuredProperty
(if present, must end in '.').
parent_repeated: True if the parent (or an earlier ancestor)
is a repeated Property.
"""
values = self._get_base_value_unwrapped_as_list(entity)
for val in values:
if self._indexed:
p = pb.add_property()
else:
p = pb.add_raw_property()
p.set_name(prefix + self._name)
p.set_multiple(self._repeated or parent_repeated)
v = p.mutable_value()
if val is not None:
self._db_set_value(v, p, val)
def _deserialize(self, entity, p, unused_depth=1):
"""Internal helper to deserialize this property from a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
p: A Property Message object (a protocol buffer).
depth: Optional nesting depth, default 1 (unused here, but used
by some subclasses that override this method).
"""
v = p.value()
val = self._db_get_value(v, p)
if val is not None:
val = _BaseValue(val)
if self._repeated:
if self._has_value(entity):
value = self._retrieve_value(entity)
assert isinstance(value, list), repr(value)
value.append(val)
else:
value = [val]
else:
value = val
self._store_value(entity, value)
def _prepare_for_put(self, entity):
pass
def _get_for_dict(self, entity):
"""Retrieve the value like _get_value(), processed for _to_dict().
Property subclasses can override this if they want the dictionary
returned by entity._to_dict() to contain a different value. The
main use case is StructuredProperty and LocalStructuredProperty.
"""
return self._get_value(entity)
def _validate_key(value, entity=None):
if not isinstance(value, Key):
# TODO: BadKeyError.
raise datastore_errors.BadValueError('Expected Key, got %r' % value)
if entity and entity.__class__ not in (Model, Expando):
if value.kind() != entity._get_kind():
raise KindError('Expected Key kind to be %s; received %s' %
(entity._get_kind(), value.kind()))
return value
class ModelKey(Property):
"""Special property to store the Model key."""
def __init__(self):
super(ModelKey, self).__init__()
self._name = '__key__'
def _datastore_type(self, value):
return datastore_types.Key(value.urlsafe())
def _comparison(self, op, value):
if value is not None:
return super(ModelKey, self)._comparison(op, value)
raise datastore_errors.BadValueError(
"__key__ filter query can't be compared to None")
# TODO: Support IN().
def _validate(self, value):
return _validate_key(value)
def _set_value(self, entity, value):
"""Setter for key attribute."""
if value is not None:
value = _validate_key(value, entity=entity)
value = entity._validate_key(value)
entity._entity_key = value
def _get_value(self, entity):
"""Getter for key attribute."""
return entity._entity_key
def _delete_value(self, entity):
"""Deleter for key attribute."""
entity._entity_key = None
class BooleanProperty(Property):
"""A Property whose value is a Python bool."""
# TODO: Allow int/long values equal to 0 or 1?
def _validate(self, value):
if not isinstance(value, bool):
raise datastore_errors.BadValueError('Expected bool, got %r' %
(value,))
return value
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, bool):
raise TypeError('BooleanProperty %s can only be set to bool values; '
'received %r' % (self._name, value))
v.set_booleanvalue(value)
def _db_get_value(self, v, unused_p):
if not v.has_booleanvalue():
return None
# The booleanvalue field is an int32, so booleanvalue() returns an
# int, hence the conversion.
return bool(v.booleanvalue())
class IntegerProperty(Property):
"""A Property whose value is a Python int or long (or bool)."""
def _validate(self, value):
if not isinstance(value, (int, long)):
raise datastore_errors.BadValueError('Expected integer, got %r' %
(value,))
return int(value)
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, (bool, int, long)):
raise TypeError('IntegerProperty %s can only be set to integer values; '
'received %r' % (self._name, value))
v.set_int64value(value)
def _db_get_value(self, v, unused_p):
if not v.has_int64value():
return None
return int(v.int64value())
class FloatProperty(Property):
"""A Property whose value is a Python float.
Note: int, long and bool are also allowed.
"""
def _validate(self, value):
if not isinstance(value, (int, long, float)):
raise datastore_errors.BadValueError('Expected float, got %r' %
(value,))
return float(value)
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, (bool, int, long, float)):
raise TypeError('FloatProperty %s can only be set to integer or float '
'values; received %r' % (self._name, value))
v.set_doublevalue(float(value))
def _db_get_value(self, v, unused_p):
if not v.has_doublevalue():
return None
return v.doublevalue()
# A custom 'meaning' for compressed properties.
_MEANING_URI_COMPRESSED = 'ZLIB'
class _CompressedValue(_NotEqualMixin):
"""A marker object wrapping compressed values."""
__slots__ = ['z_val']
def __init__(self, z_val):
"""Constructor. Argument is a string returned by zlib.compress()."""
assert isinstance(z_val, str), repr(z_val)
self.z_val = z_val
def __repr__(self):
return '_CompressedValue(%s)' % repr(self.z_val)
def __eq__(self, other):
if not isinstance(other, _CompressedValue):
return NotImplemented
return self.z_val == other.z_val
def __hash__(self):
raise TypeError('_CompressedValue is not immutable')
class BlobProperty(Property):
"""A Property whose value is a byte string. It may be compressed."""
_indexed = False
_compressed = False
_attributes = Property._attributes + ['_compressed']
@utils.positional(1 + Property._positional)
def __init__(self, name=None, compressed=False, **kwds):
super(BlobProperty, self).__init__(name=name, **kwds)
self._compressed = compressed
if compressed and self._indexed:
# TODO: Allow this, but only allow == and IN comparisons?
raise NotImplementedError('BlobProperty %s cannot be compressed and '
'indexed at the same time.' % self._name)
def _validate(self, value):
if not isinstance(value, str):
raise datastore_errors.BadValueError('Expected str, got %r' %
(value,))
if (self._indexed and
not isinstance(self, TextProperty) and
len(value) > _MAX_STRING_LENGTH):
raise datastore_errors.BadValueError(
'Indexed value %s must be at most %d bytes' %
(self._name, _MAX_STRING_LENGTH))
def _to_base_type(self, value):
if self._compressed:
return _CompressedValue(zlib.compress(value))
def _from_base_type(self, value):
if isinstance(value, _CompressedValue):
return zlib.decompress(value.z_val)
def _datastore_type(self, value):
# Since this is only used for queries, and queries imply an
# indexed property, always use ByteString.
return datastore_types.ByteString(value)
def _db_set_value(self, v, p, value):
if isinstance(value, _CompressedValue):
self._db_set_compressed_meaning(p)
value = value.z_val
else:
self._db_set_uncompressed_meaning(p)
v.set_stringvalue(value)
def _db_set_compressed_meaning(self, p):
# Use meaning_uri because setting meaning to something else that is not
# BLOB or BYTESTRING will cause the value to be decoded from utf-8 in
# datastore_types.FromPropertyPb. That would break the compressed string.
p.set_meaning_uri(_MEANING_URI_COMPRESSED)
p.set_meaning(entity_pb.Property.BLOB)
def _db_set_uncompressed_meaning(self, p):
if self._indexed:
p.set_meaning(entity_pb.Property.BYTESTRING)
else:
p.set_meaning(entity_pb.Property.BLOB)
def _db_get_value(self, v, p):
if not v.has_stringvalue():
return None
value = v.stringvalue()
if p.meaning_uri() == _MEANING_URI_COMPRESSED:
value = _CompressedValue(value)
return value
class TextProperty(BlobProperty):
"""An unindexed Property whose value is a text string of unlimited length."""
def _validate(self, value):
if isinstance(value, str):
# Decode from UTF-8 -- if this fails, we can't write it.
try:
value = unicode(value, 'utf-8')
except UnicodeError:
raise datastore_errors.BadValueError('Expected valid UTF-8, got %r' %
(value,))
elif not isinstance(value, unicode):
raise datastore_errors.BadValueError('Expected string, got %r' %
(value,))
if self._indexed and len(value) > _MAX_STRING_LENGTH:
raise datastore_errors.BadValueError(
'Indexed value %s must be at most %d characters' %
(self._name, _MAX_STRING_LENGTH))
def _to_base_type(self, value):
if isinstance(value, unicode):
return value.encode('utf-8')
def _from_base_type(self, value):
if isinstance(value, str):
try:
return unicode(value, 'utf-8')
except UnicodeDecodeError:
# Since older versions of NDB could write non-UTF-8 TEXT
# properties, we can't just reject these. But _validate() now
# rejects these, so you can't write new non-UTF-8 TEXT
# properties.
# TODO: Eventually we should close this hole.
pass
def _db_set_uncompressed_meaning(self, p):
if not self._indexed:
p.set_meaning(entity_pb.Property.TEXT)
class StringProperty(TextProperty):
"""An indexed Property whose value is a text string of limited length."""
_indexed = True
class GeoPtProperty(Property):
"""A Property whose value is a GeoPt."""
def _validate(self, value):
if not isinstance(value, GeoPt):
raise datastore_errors.BadValueError('Expected GeoPt, got %r' %
(value,))
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, GeoPt):
raise TypeError('GeoPtProperty %s can only be set to GeoPt values; '
'received %r' % (self._name, value))
pv = v.mutable_pointvalue()
pv.set_x(value.lat)
pv.set_y(value.lon)
def _db_get_value(self, v, unused_p):
if not v.has_pointvalue():
return None
pv = v.pointvalue()
return GeoPt(pv.x(), pv.y())
def _unpack_user(v):
"""Internal helper to unpack a User value from a protocol buffer."""
uv = v.uservalue()
email = unicode(uv.email().decode('utf-8'))
auth_domain = unicode(uv.auth_domain().decode('utf-8'))
obfuscated_gaiaid = uv.obfuscated_gaiaid().decode('utf-8')
obfuscated_gaiaid = unicode(obfuscated_gaiaid)
federated_identity = None
if uv.has_federated_identity():
federated_identity = unicode(
uv.federated_identity().decode('utf-8'))
value = users.User(email=email,
_auth_domain=auth_domain,
_user_id=obfuscated_gaiaid,
federated_identity=federated_identity)
return value
class PickleProperty(BlobProperty):
"""A Property whose value is any picklable Python object."""
def _to_base_type(self, value):
return pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
def _from_base_type(self, value):
return pickle.loads(value)
class JsonProperty(BlobProperty):
"""A property whose value is any Json-encodable Python object."""
# Use late import so the dependency is optional.
def _to_base_type(self, value):
try:
import json
except ImportError:
import simplejson as json
return json.dumps(value, 2)
def _from_base_type(self, value):
try:
import json
except ImportError:
import simplejson as json
return json.loads(value)
class UserProperty(Property):
"""A Property whose value is a User object.
Note: this exists for backwards compatibility with existing
datastore schemas only; we do not recommend storing User objects
directly in the datastore, but instead recommend storing the
user.user_id() value.
"""
_attributes = Property._attributes + ['_auto_current_user',
'_auto_current_user_add']
_auto_current_user = False
_auto_current_user_add = False
@utils.positional(1 + Property._positional)
def __init__(self, name=None, auto_current_user=False,
auto_current_user_add=False, **kwds):
super(UserProperty, self).__init__(name=name, **kwds)
# TODO: Disallow combining auto_current_user* and default?
if self._repeated:
if auto_current_user:
raise ValueError('UserProperty could use auto_current_user and be '
'repeated, but there would be no point.')
elif auto_current_user_add:
raise ValueError('UserProperty could use auto_current_user_add and be '
'repeated, but there would be no point.')
self._auto_current_user = auto_current_user
self._auto_current_user_add = auto_current_user_add
def _validate(self, value):
if not isinstance(value, users.User):
raise datastore_errors.BadValueError('Expected User, got %r' %
(value,))
def _prepare_for_put(self, entity):
if (self._auto_current_user or
(self._auto_current_user_add and not self._has_value(entity))):
value = users.get_current_user()
if value is not None:
self._store_value(entity, value)
def _db_set_value(self, v, p, value):
datastore_types.PackUser(p.name(), value, v)
def _db_get_value(self, v, unused_p):
if not v.has_uservalue():
return None
return _unpack_user(v)
class KeyProperty(Property):
"""A Property whose value is a Key object.
Optional keyword argument: kind=<kind>, to require that keys
assigned to this property always have the indicated kind. May be a
string or a Model subclass.
"""
_attributes = Property._attributes + ['_kind']
_kind = None
@utils.positional(2 + Property._positional)
def __init__(self, *args, **kwds):
# Support several positional signatures:
# () => name=None, kind from kwds
# (None) => name=None, kind from kwds
# (name) => name=arg 0, kind from kwds
# (kind) => name=None, kind=arg 0
# (name, kind) => name=arg 0, kind=arg 1
# (kind, name) => name=arg 1, kind=arg 0
# The positional kind must be a Model subclass; it cannot be a string.
name = kind = None
for arg in args:
if isinstance(arg, basestring):
if name is not None:
raise TypeError('You can only specify one name')
name = arg
elif isinstance(arg, type) and issubclass(arg, Model):
if kind is not None:
raise TypeError('You can only specify one kind')
kind = arg
elif arg is not None:
raise TypeError('Unexpected positional argument: %r' % (arg,))
if name is None:
name = kwds.pop('name', None)
elif 'name' in kwds:
raise TypeError('You can only specify name once')
if kind is None:
kind = kwds.pop('kind', None)
elif 'kind' in kwds:
raise TypeError('You can only specify kind once')
if kind is not None:
if isinstance(kind, type) and issubclass(kind, Model):
kind = kind._get_kind()
if isinstance(kind, unicode):
kind = kind.encode('utf-8')
if not isinstance(kind, str):
raise TypeError('kind must be a Model class or a string')
super(KeyProperty, self).__init__(name, **kwds)
self._kind = kind
def _datastore_type(self, value):
return datastore_types.Key(value.urlsafe())
def _validate(self, value):
if not isinstance(value, Key):
raise datastore_errors.BadValueError('Expected Key, got %r' % (value,))
# Reject incomplete keys.
if not value.id():
raise datastore_errors.BadValueError('Expected complete Key, got %r' %
(value,))
if self._kind is not None:
if value.kind() != self._kind:
raise datastore_errors.BadValueError(
'Expected Key with kind=%r, got %r' % (self._kind, value))
def _db_set_value(self, v, unused_p, value):
if not isinstance(value, Key):
raise TypeError('KeyProperty %s can only be set to Key values; '
'received %r' % (self._name, value))
# See datastore_types.PackKey
ref = value.reference()
rv = v.mutable_referencevalue() # A Reference
rv.set_app(ref.app())
if ref.has_name_space():
rv.set_name_space(ref.name_space())
for elem in ref.path().element_list():
rv.add_pathelement().CopyFrom(elem)
def _db_get_value(self, v, unused_p):
if not v.has_referencevalue():
return None
ref = entity_pb.Reference()
rv = v.referencevalue()
if rv.has_app():
ref.set_app(rv.app())
if rv.has_name_space():
ref.set_name_space(rv.name_space())
path = ref.mutable_path()
for elem in rv.pathelement_list():
path.add_element().CopyFrom(elem)
return Key(reference=ref)
class BlobKeyProperty(Property):
"""A Property whose value is a BlobKey object."""
def _validate(self, value):
if not isinstance(value, datastore_types.BlobKey):
raise datastore_errors.BadValueError('Expected BlobKey, got %r' %
(value,))
def _db_set_value(self, v, p, value):
if not isinstance(value, datastore_types.BlobKey):
raise TypeError('BlobKeyProperty %s can only be set to BlobKey values; '
'received %r' % (self._name, value))
p.set_meaning(entity_pb.Property.BLOBKEY)
v.set_stringvalue(str(value))
def _db_get_value(self, v, unused_p):
if not v.has_stringvalue():
return None
return datastore_types.BlobKey(v.stringvalue())
# The Epoch (a zero POSIX timestamp).
_EPOCH = datetime.datetime.utcfromtimestamp(0)
class DateTimeProperty(Property):
"""A Property whose value is a datetime object.
Note: Unlike Django, auto_now_add can be overridden by setting the
value before writing the entity. And unlike classic db, auto_now
does not supply a default value. Also unlike classic db, when the
entity is written, the property values are updated to match what
was written. Finally, beware that this also updates the value in
the in-process cache, *and* that auto_now_add may interact weirdly
with transaction retries (a retry of a property with auto_now_add
set will reuse the value that was set on the first try).
"""
_attributes = Property._attributes + ['_auto_now', '_auto_now_add']
_auto_now = False
_auto_now_add = False
@utils.positional(1 + Property._positional)
def __init__(self, name=None, auto_now=False, auto_now_add=False, **kwds):
super(DateTimeProperty, self).__init__(name=name, **kwds)
# TODO: Disallow combining auto_now* and default?
if self._repeated:
if auto_now:
raise ValueError('DateTimeProperty %s could use auto_now and be '
'repeated, but there would be no point.' % self._name)
elif auto_now_add:
raise ValueError('DateTimeProperty %s could use auto_now_add and be '
'repeated, but there would be no point.' % self._name)
self._auto_now = auto_now
self._auto_now_add = auto_now_add
def _validate(self, value):
if not isinstance(value, datetime.datetime):
raise datastore_errors.BadValueError('Expected datetime, got %r' %
(value,))
def _now(self):
return datetime.datetime.now()
def _prepare_for_put(self, entity):
if (self._auto_now or
(self._auto_now_add and not self._has_value(entity))):
value = self._now()
self._store_value(entity, value)
def _db_set_value(self, v, p, value):
if not isinstance(value, datetime.datetime):
raise TypeError('DatetimeProperty %s can only be set to datetime values; '
'received %r' % (self._name, value))
if value.tzinfo is not None:
raise NotImplementedError('DatetimeProperty %s can only support UTC. '
'Please derive a new Property to support '
'alternative timezones.' % self._name)
dt = value - _EPOCH
ival = dt.microseconds + 1000000 * (dt.seconds + 24 * 3600 * dt.days)
v.set_int64value(ival)
p.set_meaning(entity_pb.Property.GD_WHEN)
def _db_get_value(self, v, unused_p):
if not v.has_int64value():
return None
ival = v.int64value()
return _EPOCH + datetime.timedelta(microseconds=ival)
def _date_to_datetime(value):
"""Convert a date to a datetime for datastore storage.
Args:
value: A datetime.date object.
Returns:
A datetime object with time set to 0:00.
"""
if not isinstance(value, datetime.date):
raise TypeError('Cannot convert to datetime expected date value; '
'received %s' % value)
return datetime.datetime(value.year, value.month, value.day)
def _time_to_datetime(value):
"""Convert a time to a datetime for datastore storage.
Args:
value: A datetime.time object.
Returns:
A datetime object with date set to 1970-01-01.
"""
if not isinstance(value, datetime.time):
raise TypeError('Cannot convert to datetime expected time value; '
'received %s' % value)
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second,
value.microsecond)
class DateProperty(DateTimeProperty):
"""A Property whose value is a date object."""
def _validate(self, value):
if not isinstance(value, datetime.date):
raise datastore_errors.BadValueError('Expected date, got %r' %
(value,))
def _to_base_type(self, value):
assert isinstance(value, datetime.date), repr(value)
return _date_to_datetime(value)
def _from_base_type(self, value):
assert isinstance(value, datetime.datetime), repr(value)
return value.date()
def _now(self):
return datetime.date.today()
class TimeProperty(DateTimeProperty):
"""A Property whose value is a time object."""
def _validate(self, value):
if not isinstance(value, datetime.time):
raise datastore_errors.BadValueError('Expected time, got %r' %
(value,))
def _to_base_type(self, value):
assert isinstance(value, datetime.time), repr(value)
return _time_to_datetime(value)
def _from_base_type(self, value):
assert isinstance(value, datetime.datetime), repr(value)
return value.time()
def _now(self):
return datetime.datetime.now().time()
class _StructuredGetForDictMixin(Property):
"""Mixin class so *StructuredProperty can share _get_for_dict().
The behavior here is that sub-entities are converted to dictionaries
by calling to_dict() on them (also doing the right thing for
repeated properties).
"""
def _get_for_dict(self, entity):
value = self._get_value(entity)
if self._repeated:
value = [v._to_dict() for v in value]
elif value is not None:
value = value._to_dict()
return value
class StructuredProperty(_StructuredGetForDictMixin):
"""A Property whose value is itself an entity.
The values of the sub-entity are indexed and can be queried.
See the module docstring for details.
"""
_modelclass = None
_attributes = ['_modelclass'] + Property._attributes
_positional = Property._positional + 1 # Add modelclass as positional arg.
@utils.positional(1 + _positional)
def __init__(self, modelclass, name=None, **kwds):
super(StructuredProperty, self).__init__(name=name, **kwds)
if self._repeated:
if modelclass._has_repeated:
raise TypeError('Cannot repeat StructuredProperty %s that has repeated '
'properties of its own.' % self._name)
self._modelclass = modelclass
def __getattr__(self, attrname):
"""Dynamically get a subproperty."""
# Optimistically try to use the dict key.
prop = self._modelclass._properties.get(attrname)
# We're done if we have a hit and _code_name matches.
if prop is None or prop._code_name != attrname:
# Otherwise, use linear search looking for a matching _code_name.
for prop in self._modelclass._properties.values():
if prop._code_name == attrname:
break
else:
# This is executed when we never execute the above break.
prop = None
if prop is None:
raise AttributeError('Model subclass %s has no attribute %s' %
(self._modelclass.__name__, attrname))
prop_copy = copy.copy(prop)
prop_copy._name = self._name + '.' + prop_copy._name
# Cache the outcome, so subsequent requests for the same attribute
# name will get the copied property directly rather than going
# through the above motions all over again.
setattr(self, attrname, prop_copy)
return prop_copy
def _comparison(self, op, value):
if op != '=':
raise datastore_errors.BadFilterError(
'StructuredProperty filter can only use ==')
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed StructuredProperty %s' % self._name)
# Import late to avoid circular imports.
from .query import ConjunctionNode, PostFilterNode
from .query import RepeatedStructuredPropertyPredicate
if value is None:
from .query import FilterNode # Import late to avoid circular imports.
return FilterNode(self._name, op, value)
value = self._do_validate(value)
value = self._call_to_base_type(value)
filters = []
match_keys = []
# TODO: Why not just iterate over value._values?
for prop in self._modelclass._properties.itervalues():
vals = prop._get_base_value_unwrapped_as_list(value)
if prop._repeated:
if vals:
raise datastore_errors.BadFilterError(
'Cannot query for non-empty repeated property %s' % prop._name)
continue
assert isinstance(vals, list) and len(vals) == 1, repr(vals)
val = vals[0]
if val is not None:
altprop = getattr(self, prop._code_name)
filt = altprop._comparison(op, val)
filters.append(filt)
match_keys.append(altprop._name)
if not filters:
raise datastore_errors.BadFilterError(
'StructuredProperty filter without any values')
if len(filters) == 1:
return filters[0]
if self._repeated:
pb = value._to_pb(allow_partial=True)
pred = RepeatedStructuredPropertyPredicate(match_keys, pb,
self._name + '.')
filters.append(PostFilterNode(pred))
return ConjunctionNode(*filters)
def _IN(self, value):
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadArgumentError(
'Expected list, tuple or set, got %r' % (value,))
from .query import DisjunctionNode, FalseNode
# Expand to a series of == filters.
filters = [self._comparison('=', val) for val in value]
if not filters:
# DisjunctionNode doesn't like an empty list of filters.
# Running the query will still fail, but this matches the
# behavior of IN for regular properties.
return FalseNode()
else:
return DisjunctionNode(*filters)
IN = _IN
def _validate(self, value):
if not isinstance(value, self._modelclass):
raise datastore_errors.BadValueError('Expected %s instance, got %r' %
(self._modelclass.__name__, value))
def _has_value(self, entity, rest=None):
# rest: optional list of attribute names to check in addition.
# Basically, prop._has_value(self, ent, ['x', 'y']) is similar to
# (prop._has_value(ent) and
# prop.x._has_value(ent.x) and
# prop.x.y._has_value(ent.x.y))
# assuming prop.x and prop.x.y exist.
# NOTE: This is not particularly efficient if len(rest) > 1,
# but that seems a rare case, so for now I don't care.
ok = super(StructuredProperty, self)._has_value(entity)
if ok and rest:
lst = self._get_base_value_unwrapped_as_list(entity)
if len(lst) != 1:
raise RuntimeError('Failed to retrieve sub-entity of StructuredProperty'
' %s' % self._name)
subent = lst[0]
if subent is None:
return True
subprop = subent._properties.get(rest[0])
if subprop is None:
ok = False
else:
ok = subprop._has_value(subent, rest[1:])
return ok
def _serialize(self, entity, pb, prefix='', parent_repeated=False):
# entity -> pb; pb is an EntityProto message
values = self._get_base_value_unwrapped_as_list(entity)
for value in values:
if value is not None:
# TODO: Avoid re-sorting for repeated values.
for unused_name, prop in sorted(value._properties.iteritems()):
prop._serialize(value, pb, prefix + self._name + '.',
self._repeated or parent_repeated)
else:
# Serialize a single None
super(StructuredProperty, self)._serialize(
entity, pb, prefix=prefix, parent_repeated=parent_repeated)
def _deserialize(self, entity, p, depth=1):
if not self._repeated:
subentity = self._retrieve_value(entity)
if subentity is None:
subentity = self._modelclass()
self._store_value(entity, _BaseValue(subentity))
cls = self._modelclass
if isinstance(subentity, _BaseValue):
# NOTE: It may not be a _BaseValue when we're deserializing a
# repeated structured property.
subentity = subentity.b_val
if not isinstance(subentity, cls):
raise RuntimeError('Cannot deserialize StructuredProperty %s; value '
'retrieved not a %s instance %r' %
(self._name, cls.__name__, subentity))
prop = subentity._get_property_for(p, depth=depth)
if prop is None:
# Special case: kill subentity after all.
self._store_value(entity, None)
return
prop._deserialize(subentity, p, depth + 1)
return
# The repeated case is more complicated.
# TODO: Prove we won't get here for orphans.
name = p.name()
parts = name.split('.')
if len(parts) <= depth:
raise RuntimeError('StructuredProperty %s expected to find properties '
'separated by periods at a depth of %i; received %r' %
(self._name, depth, parts))
next = parts[depth]
rest = parts[depth + 1:]
prop = self._modelclass._properties.get(next)
if prop is None:
raise RuntimeError('Unable to find property %s of StructuredProperty %s.'
% (next, self._name))
values = self._get_base_value_unwrapped_as_list(entity)
# Find the first subentity that doesn't have a value for this
# property yet.
for sub in values:
if not isinstance(sub, self._modelclass):
raise TypeError('sub-entities must be instances of their Model class.')
if not prop._has_value(sub, rest):
subentity = sub
break
else:
# We didn't find one. Add a new one to the underlying list of
# values (the list returned by
# _get_base_value_unwrapped_as_list() is a copy so we
# can't append to it).
subentity = self._modelclass()
values = self._retrieve_value(entity)
values.append(_BaseValue(subentity))
prop._deserialize(subentity, p, depth + 1)
def _prepare_for_put(self, entity):
values = self._get_base_value_unwrapped_as_list(entity)
for value in values:
if value is not None:
value._prepare_for_put()
class LocalStructuredProperty(_StructuredGetForDictMixin, BlobProperty):
"""Substructure that is serialized to an opaque blob.
This looks like StructuredProperty on the Python side, but is
written like a BlobProperty in the datastore. It is not indexed
and you cannot query for subproperties. On the other hand, the
on-disk representation is more efficient and can be made even more
efficient by passing compressed=True, which compresses the blob
data using gzip.
"""
_indexed = False
_modelclass = None
_attributes = ['_modelclass'] + BlobProperty._attributes
_positional = BlobProperty._positional + 1 # Add modelclass as positional.
@utils.positional(1 + _positional)
def __init__(self, modelclass, name=None, compressed=False, **kwds):
super(LocalStructuredProperty, self).__init__(name=name,
compressed=compressed,
**kwds)
if self._indexed:
raise NotImplementedError('Cannot index LocalStructuredProperty %s.' %
self._name)
self._modelclass = modelclass
def _validate(self, value):
if not isinstance(value, self._modelclass):
raise datastore_errors.BadValueError('Expected %s instance, got %r' %
(self._modelclass.__name__, value))
def _to_base_type(self, value):
if isinstance(value, self._modelclass):
pb = value._to_pb(set_key=False)
return pb.SerializePartialToString()
def _from_base_type(self, value):
if not isinstance(value, self._modelclass):
pb = entity_pb.EntityProto()
pb.MergePartialFromString(value)
return self._modelclass._from_pb(pb, set_key=False)
def _prepare_for_put(self, entity):
# TODO: Using _get_user_value() here makes it impossible to
# subclass this class and add a _from_base_type(). But using
# _get_base_value() won't work, since that would return
# the serialized (and possibly compressed) serialized blob.
value = self._get_user_value(entity)
if value is not None:
if self._repeated:
for subent in value:
subent._prepare_for_put()
else:
value._prepare_for_put()
class GenericProperty(Property):
"""A Property whose value can be (almost) any basic type.
This is mainly used for Expando and for orphans (values present in
the datastore but not represented in the Model subclass) but can
also be used explicitly for properties with dynamically-typed
values.
This supports compressed=True, which is only effective for str
values (not for unicode), and implies indexed=False.
"""
_compressed = False
_attributes = Property._attributes + ['_compressed']
@utils.positional(1 + Property._positional)
def __init__(self, name=None, compressed=False, **kwds):
if compressed: # Compressed implies unindexed.
kwds.setdefault('indexed', False)
super(GenericProperty, self).__init__(name=name, **kwds)
self._compressed = compressed
if compressed and self._indexed:
# TODO: Allow this, but only allow == and IN comparisons?
raise NotImplementedError('GenericProperty %s cannot be compressed and '
'indexed at the same time.' % self._name)
def _to_base_type(self, value):
if self._compressed and isinstance(value, str):
return _CompressedValue(zlib.compress(value))
def _from_base_type(self, value):
if isinstance(value, _CompressedValue):
return zlib.decompress(value.z_val)
def _validate(self, value):
if (isinstance(value, basestring) and
self._indexed and
len(value) > _MAX_STRING_LENGTH):
raise datastore_errors.BadValueError(
'Indexed value %s must be at most %d bytes' %
(self._name, _MAX_STRING_LENGTH))
def _db_get_value(self, v, p):
# This is awkward but there seems to be no faster way to inspect
# what union member is present. datastore_types.FromPropertyPb(),
# the undisputed authority, has the same series of if-elif blocks.
# (We don't even want to think about multiple members... :-)
if v.has_stringvalue():
sval = v.stringvalue()
meaning = p.meaning()
if meaning == entity_pb.Property.BLOBKEY:
sval = BlobKey(sval)
elif meaning == entity_pb.Property.BLOB:
if p.meaning_uri() == _MEANING_URI_COMPRESSED:
sval = _CompressedValue(sval)
elif meaning != entity_pb.Property.BYTESTRING:
try:
sval.decode('ascii')
# If this passes, don't return unicode.
except UnicodeDecodeError:
try:
sval = unicode(sval.decode('utf-8'))
except UnicodeDecodeError:
pass
return sval
elif v.has_int64value():
ival = v.int64value()
if p.meaning() == entity_pb.Property.GD_WHEN:
return _EPOCH + datetime.timedelta(microseconds=ival)
return ival
elif v.has_booleanvalue():
# The booleanvalue field is an int32, so booleanvalue() returns
# an int, hence the conversion.
return bool(v.booleanvalue())
elif v.has_doublevalue():
return v.doublevalue()
elif v.has_referencevalue():
rv = v.referencevalue()
app = rv.app()
namespace = rv.name_space()
pairs = [(elem.type(), elem.id() or elem.name())
for elem in rv.pathelement_list()]
return Key(pairs=pairs, app=app, namespace=namespace)
elif v.has_pointvalue():
pv = v.pointvalue()
return GeoPt(pv.x(), pv.y())
elif v.has_uservalue():
return _unpack_user(v)
else:
# A missing value implies null.
return None
def _db_set_value(self, v, p, value):
# TODO: use a dict mapping types to functions
if isinstance(value, str):
v.set_stringvalue(value)
# TODO: Set meaning to BLOB or BYTESTRING if it's not UTF-8?
# (Or TEXT if unindexed.)
elif isinstance(value, unicode):
v.set_stringvalue(value.encode('utf8'))
if not self._indexed:
p.set_meaning(entity_pb.Property.TEXT)
elif isinstance(value, bool): # Must test before int!
v.set_booleanvalue(value)
elif isinstance(value, (int, long)):
if not (-_MAX_LONG <= value < _MAX_LONG):
raise TypeError('Property %s can only accept 64-bit integers; '
'received %s' % value)
v.set_int64value(value)
elif isinstance(value, float):
v.set_doublevalue(value)
elif isinstance(value, Key):
# See datastore_types.PackKey
ref = value.reference()
rv = v.mutable_referencevalue() # A Reference
rv.set_app(ref.app())
if ref.has_name_space():
rv.set_name_space(ref.name_space())
for elem in ref.path().element_list():
rv.add_pathelement().CopyFrom(elem)
elif isinstance(value, datetime.datetime):
if value.tzinfo is not None:
raise NotImplementedError('Property %s can only support the UTC. '
'Please derive a new Property to support '
'alternative timezones.' % self._name)
dt = value - _EPOCH
ival = dt.microseconds + 1000000 * (dt.seconds + 24 * 3600 * dt.days)
v.set_int64value(ival)
p.set_meaning(entity_pb.Property.GD_WHEN)
elif isinstance(value, GeoPt):
pv = v.mutable_pointvalue()
pv.set_x(value.lat)
pv.set_y(value.lon)
elif isinstance(value, users.User):
datastore_types.PackUser(p.name(), value, v)
elif isinstance(value, BlobKey):
v.set_stringvalue(str(value))
p.set_meaning(entity_pb.Property.BLOBKEY)
elif isinstance(value, _CompressedValue):
value = value.z_val
v.set_stringvalue(value)
p.set_meaning_uri(_MEANING_URI_COMPRESSED)
p.set_meaning(entity_pb.Property.BLOB)
else:
raise NotImplementedError('Property %s does not support %s types.' %
(self._name, type(value)))
class ComputedProperty(GenericProperty):
"""A Property whose value is determined by a user-supplied function.
Computed properties cannot be set directly, but are instead generated by a
function when required. They are useful to provide fields in the datastore
that can be used for filtering or sorting without having to manually set the
value in code - for example, sorting on the length of a BlobProperty, or
using an equality filter to check if another field is not empty.
ComputedProperty can be declared as a regular property, passing a function as
the first argument, or it can be used as a decorator for the function that
does the calculation.
Example:
>>> class DatastoreFile(Model):
... name = StringProperty()
... name_lower = ComputedProperty(lambda self: self.name.lower())
...
... data = BlobProperty()
...
... @ComputedProperty
... def size(self):
... return len(self.data)
...
... def _compute_hash(self):
... return hashlib.sha1(self.data).hexdigest()
... hash = ComputedProperty(_compute_hash, name='sha1')
"""
def __init__(self, func, name=None, indexed=None, repeated=None):
"""Constructor.
Args:
func: A function that takes one argument, the model instance, and returns
a calculated value.
"""
super(ComputedProperty, self).__init__(name=name, indexed=indexed,
repeated=repeated)
self._func = func
def _set_value(self, entity, value):
raise ComputedPropertyError("Cannot assign to a ComputedProperty")
def _get_value(self, entity):
value = self._func(entity)
self._store_value(entity, value)
return value
def _prepare_for_put(self, entity):
self._get_value(entity) # For its side effects.
class MetaModel(type):
"""Metaclass for Model.
This exists to fix up the properties -- they need to know their name.
This is accomplished by calling the class's _fix_properties() method.
"""
def __init__(cls, name, bases, classdict):
super(MetaModel, cls).__init__(name, bases, classdict)
cls._fix_up_properties()
def __repr__(cls):
props = []
for _, prop in sorted(cls._properties.iteritems()):
props.append('%s=%r' % (prop._code_name, prop))
return '%s<%s>' % (cls.__name__, ', '.join(props))
class Model(_NotEqualMixin):
"""A class describing datastore entities.
Model instances are usually called entities. All model classes
inheriting from Model automatically have MetaModel as their
metaclass, so that the properties are fixed up properly after the
class once the class is defined.
Because of this, you cannot use the same Property object to describe
multiple properties -- you must create separate Property objects for
each property. E.g. this does not work:
wrong_prop = StringProperty()
class Wrong(Model):
wrong1 = wrong_prop
wrong2 = wrong_prop
The kind is normally equal to the class name (exclusive of the
module name or any other parent scope). To override the kind,
define a class method named _get_kind(), as follows:
class MyModel(Model):
@classmethod
def _get_kind(cls):
return 'AnotherKind'
"""
__metaclass__ = MetaModel
# Class variables updated by _fix_up_properties()
_properties = None
_has_repeated = False
_kind_map = {} # Dict mapping {kind: Model subclass}
# Defaults for instance variables.
_entity_key = None
_values = None
# Hardcoded pseudo-property for the key.
_key = ModelKey()
key = _key
def __init__(*args, **kwds):
"""Creates a new instance of this model (a.k.a. an entity).
The new entity must be written to the datastore using an explicit
call to .put().
Keyword Args:
key: Key instance for this model. If key is used, id and parent must
be None.
id: Key id for this model. If id is used, key must be None.
parent: Key instance for the parent model or None for a top-level one.
If parent is used, key must be None.
namespace: Optional namespace.
app: Optional app ID.
**kwds: Keyword arguments mapping to properties of this model.
Note: you cannot define a property named key; the .key attribute
always refers to the entity's key. But you can define properties
named id or parent. Values for the latter cannot be passed
through the constructor, but can be assigned to entity attributes
after the entity has been created.
"""
(self,) = args
get_arg = self.__get_arg
key = get_arg(kwds, 'key')
id = get_arg(kwds, 'id')
app = get_arg(kwds, 'app')
namespace = get_arg(kwds, 'namespace')
parent = get_arg(kwds, 'parent')
if key is not None:
if (id is not None or parent is not None or
app is not None or namespace is not None):
raise datastore_errors.BadArgumentError(
'Model constructor given key= does not accept '
'id=, app=, namespace=, or parent=.')
self._key = _validate_key(key, entity=self)
elif (id is not None or parent is not None or
app is not None or namespace is not None):
self._key = Key(self._get_kind(), id,
parent=parent, app=app, namespace=namespace)
self._values = {}
self._set_attributes(kwds)
@classmethod
def __get_arg(cls, kwds, kwd):
"""Helper method to parse keywords that may be property names."""
alt_kwd = '_' + kwd
if alt_kwd in kwds:
return kwds.pop(alt_kwd)
if kwd in kwds:
obj = getattr(cls, kwd, None)
if not isinstance(obj, Property) or isinstance(obj, ModelKey):
return kwds.pop(kwd)
return None
def __getstate__(self):
return self._to_pb().Encode()
def __setstate__(self, serialized_pb):
pb = entity_pb.EntityProto(serialized_pb)
self.__init__()
self.__class__._from_pb(pb, set_key=False, ent=self)
def _populate(self, **kwds):
"""Populate an instance from keyword arguments.
Each keyword argument will be used to set a corresponding
property. Keywords must refer to valid property name. This is
similar to passing keyword arguments to the Model constructor,
except that no provisions for key, id or parent are made.
"""
self._set_attributes(kwds)
populate = _populate
def _set_attributes(self, kwds):
"""Internal helper to set attributes from keyword arguments.
Expando overrides this.
"""
cls = self.__class__
for name, value in kwds.iteritems():
prop = getattr(cls, name) # Raises AttributeError for unknown properties.
if not isinstance(prop, Property):
raise TypeError('Cannot set non-property %s' % name)
prop._set_value(self, value)
def _find_uninitialized(self):
"""Internal helper to find uninitialized properties.
Returns:
A set of property names.
"""
return set(name
for name, prop in self._properties.iteritems()
if not prop._is_initialized(self))
def _check_initialized(self):
"""Internal helper to check for uninitialized properties.
Raises:
BadValueError if it finds any.
"""
baddies = self._find_uninitialized()
if baddies:
raise datastore_errors.BadValueError(
'Entity has uninitialized properties: %s' % ', '.join(baddies))
def __repr__(self):
"""Return an unambiguous string representation of an entity."""
args = []
for prop in self._properties.itervalues():
if prop._has_value(self):
val = prop._retrieve_value(self)
# Manually apply _from_base_type() so as not to have a side
# effect on what's contained in the entity. Printing a value
# should not change it!
if prop._repeated:
val = [prop._opt_call_from_base_type(v) for v in val]
elif val is not None:
val = prop._opt_call_from_base_type(val)
args.append('%s=%r' % (prop._code_name, val))
args.sort()
if self._key is not None:
args.insert(0, 'key=%r' % self._key)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
@classmethod
def _get_kind(cls):
"""Return the kind name for this class.
This defaults to cls.__name__; users may overrid this to give a
class a different on-disk name than its class name.
"""
return cls.__name__
@classmethod
def _reset_kind_map(cls):
"""Clear the kind map. Useful for testing."""
# Preserve "system" kinds, like __namespace__
keep = {}
for name, value in cls._kind_map.iteritems():
if name.startswith('__') and name.endswith('__'):
keep[name] = value
cls._kind_map.clear()
cls._kind_map.update(keep)
def _has_complete_key(self):
"""Return whether this entity has a complete key."""
return self._key is not None and self._key.id() is not None
def __hash__(self):
"""Dummy hash function.
Raises:
Always TypeError to emphasize that entities are mutable.
"""
raise TypeError('Model is not immutable')
def __eq__(self, other):
"""Compare two entities of the same class for equality."""
if other.__class__ is not self.__class__:
return NotImplemented
# It's okay to use private names -- we're the same class
if self._key != other._key:
# TODO: If one key is None and the other is an explicit
# incomplete key of the simplest form, this should be OK.
return False
return self._equivalent(other)
def _equivalent(self, other):
"""Compare two entities of the same class, excluding keys."""
if other.__class__ is not self.__class__: # TODO: What about subclasses?
raise NotImplementedError('Cannot compare different model classes. '
'%s is not %s' % (self.__class__.__name__,
other.__class_.__name__))
# It's all about determining inequality early.
if len(self._properties) != len(other._properties):
return False # Can only happen for Expandos.
my_prop_names = set(self._properties.iterkeys())
their_prop_names = set(other._properties.iterkeys())
if my_prop_names != their_prop_names:
return False # Again, only possible for Expandos.
for name in my_prop_names:
my_value = self._properties[name]._get_value(self)
their_value = other._properties[name]._get_value(other)
if my_value != their_value:
return False
return True
def _to_pb(self, pb=None, allow_partial=False, set_key=True):
"""Internal helper to turn an entity into an EntityProto protobuf."""
if not allow_partial:
self._check_initialized()
if pb is None:
pb = entity_pb.EntityProto()
if set_key:
# TODO: Move the key stuff into ModelAdapter.entity_to_pb()?
self._key_to_pb(pb)
for unused_name, prop in sorted(self._properties.iteritems()):
prop._serialize(self, pb)
return pb
def _key_to_pb(self, pb):
"""Internal helper to copy the key into a protobuf."""
key = self._key
if key is None:
pairs = [(self._get_kind(), None)]
ref = key_module._ReferenceFromPairs(pairs, reference=pb.mutable_key())
else:
ref = key.reference()
pb.mutable_key().CopyFrom(ref)
group = pb.mutable_entity_group() # Must initialize this.
# To work around an SDK issue, only set the entity group if the
# full key is complete. TODO: Remove the top test once fixed.
if key is not None and key.id():
elem = ref.path().element(0)
if elem.id() or elem.name():
group.add_element().CopyFrom(elem)
@classmethod
def _from_pb(cls, pb, set_key=True, ent=None, key=None):
"""Internal helper to create an entity from an EntityProto protobuf."""
if not isinstance(pb, entity_pb.EntityProto):
raise TypeError('pb must be a EntityProto; received %r' % pb)
if ent is None:
ent = cls()
# A key passed in overrides a key in the pb.
if key is None and pb.has_key():
key = Key(reference=pb.key())
# If set_key is not set, skip a trivial incomplete key.
if key is not None and (set_key or key.id() or key.parent()):
ent._key = key
indexed_properties = pb.property_list()
unindexed_properties = pb.raw_property_list()
for plist in [indexed_properties, unindexed_properties]:
for p in plist:
prop = ent._get_property_for(p, plist is indexed_properties)
prop._deserialize(ent, p)
return ent
def _get_property_for(self, p, indexed=True, depth=0):
"""Internal helper to get the Property for a protobuf-level property."""
name = p.name()
parts = name.split('.')
if len(parts) <= depth:
# Apparently there's an unstructured value here.
# Assume it is a None written for a missing value.
# (It could also be that a schema change turned an unstructured
# value into a structured one. In that case, too, it seems
# better to return None than to return an unstructured value,
# since the latter doesn't match the current schema.)
return None
next = parts[depth]
prop = self._properties.get(next)
if prop is None:
prop = self._fake_property(p, next, indexed)
return prop
def _clone_properties(self):
"""Internal helper to clone self._properties if necessary."""
cls = self.__class__
if self._properties is cls._properties:
self._properties = dict(cls._properties)
def _fake_property(self, p, next, indexed=True):
"""Internal helper to create a fake Property."""
self._clone_properties()
if p.name() != next and not p.name().endswith('.' + next):
prop = StructuredProperty(Expando, next)
prop._store_value(self, _BaseValue(Expando()))
else:
compressed = p.meaning_uri() == _MEANING_URI_COMPRESSED
prop = GenericProperty(next,
repeated=p.multiple(),
indexed=indexed,
compressed=compressed)
prop._code_name = next
self._properties[prop._name] = prop
return prop
@utils.positional(1)
def _to_dict(self, include=None, exclude=None):
"""Return a dict containing the entity's property values.
Args:
include: Optional set of property names to include, default all.
exclude: Optional set of property names to skip, default none.
A name contained in both include and exclude is excluded.
"""
if (include is not None and
not isinstance(include, (list, tuple, set, frozenset))):
raise TypeError('include should be a list, tuple or set')
if (exclude is not None and
not isinstance(exclude, (list, tuple, set, frozenset))):
raise TypeError('exclude should be a list, tuple or set')
values = {}
for prop in self._properties.itervalues():
name = prop._code_name
if include is not None and name not in include:
continue
if exclude is not None and name in exclude:
continue
values[name] = prop._get_for_dict(self)
return values
to_dict = _to_dict
@classmethod
def _fix_up_properties(cls):
"""Fix up the properties by calling their _fix_up() method.
Note: This is called by MetaModel, but may also be called manually
after dynamically updating a model class.
"""
# Verify that _get_kind() returns an 8-bit string.
kind = cls._get_kind()
if not isinstance(kind, basestring):
raise KindError('Class %s defines a _get_kind() method that returns '
'a non-string (%r)' % (cls.__name__, kind))
if not isinstance(kind, str):
try:
kind = kind.encode('ascii') # ASCII contents is okay.
except UnicodeEncodeError:
raise KindError('Class %s defines a _get_kind() method that returns '
'a Unicode string (%r); please encode using utf-8' %
(cls.__name__, kind))
cls._properties = {} # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return
for name in set(dir(cls)):
attr = getattr(cls, name, None)
if isinstance(attr, ModelAttribute) and not isinstance(attr, ModelKey):
if name.startswith('_'):
raise TypeError('ModelAttribute %s cannot begin with an underscore '
'character. _ prefixed attributes are reserved for '
'temporary Model instance values.' % name)
attr._fix_up(cls, name)
if isinstance(attr, Property):
if attr._repeated:
cls._has_repeated = True
cls._properties[attr._name] = attr
cls._update_kind_map()
@classmethod
def _update_kind_map(cls):
"""Update the kind map to include this class."""
cls._kind_map[cls._get_kind()] = cls
def _prepare_for_put(self):
if self._properties:
for prop in self._properties.itervalues():
prop._prepare_for_put(self)
def _validate_key(self, key):
"""Validation for _key attribute (designed to be overridden).
Args:
key: Proposed Key to use for entity.
Returns:
A valid key.
"""
return key
# Datastore API using the default context.
# These use local import since otherwise they'd be recursive imports.
@classmethod
def _query(cls, *args, **kwds):
"""Create a Query object for this class.
Keyword arguments are passed to the Query() constructor. If
positional arguments are given they are used to apply an initial
filter.
Returns:
A Query object.
"""
# TODO: Disallow non-empty args and filter=.
from .query import Query # Import late to avoid circular imports.
qry = Query(kind=cls._get_kind(), **kwds)
if args:
qry = qry.filter(*args)
return qry
query = _query
@classmethod
def _gql(cls, query_string, *args, **kwds):
"""Run a GQL query."""
from .query import gql # Import late to avoid circular imports.
return gql('SELECT * FROM %s %s' % (cls._get_kind(), query_string),
*args, **kwds)
gql = _gql
def _put(self, **ctx_options):
"""Write this entity to the datastore.
If the operation creates or completes a key, the entity's key
attribute is set to the new, complete key.
Returns:
The key for the entity. This is always a complete key.
"""
return self._put_async(**ctx_options).get_result()
put = _put
def _put_async(self, **ctx_options):
"""Write this entity to the datastore.
This is the asynchronous version of Model._put().
"""
from . import tasklets
ctx = tasklets.get_context()
self._prepare_for_put()
if self._key is None:
self._key = Key(self._get_kind(), None)
self._pre_put_hook()
fut = ctx.put(self, **ctx_options)
post_hook = self._post_put_hook
if not self._is_default_hook(Model._default_post_put_hook, post_hook):
fut.add_immediate_callback(post_hook, fut)
return fut
put_async = _put_async
@classmethod
def _get_or_insert(*args, **kwds):
"""Transactionally retrieves an existing entity or creates a new one.
Positional Args:
name: Key name to retrieve or create.
Keyword Args:
namespace: Optional namespace.
app: Optional app ID.
parent: Parent entity key, if any.
context_options: ContextOptions object (not keyword args!) or None.
**kwds: Keyword arguments to pass to the constructor of the model class
if an instance for the specified key name does not already exist. If
an instance with the supplied key_name and parent already exists,
these arguments will be discarded.
Returns:
Existing instance of Model class with the specified key name and parent
or a new one that has just been created.
"""
cls, args = args[0], args[1:]
return cls._get_or_insert_async(*args, **kwds).get_result()
get_or_insert = _get_or_insert
@classmethod
def _get_or_insert_async(*args, **kwds):
"""Transactionally retrieves an existing entity or creates a new one.
This is the asynchronous version of Model._get_or_insert().
"""
# NOTE: The signature is really weird here because we want to support
# models with properties named e.g. 'cls' or 'name'.
from . import tasklets
cls, name = args # These must always be positional.
get_arg = cls.__get_arg
app = get_arg(kwds, 'app')
namespace = get_arg(kwds, 'namespace')
parent = get_arg(kwds, 'parent')
context_options = get_arg(kwds, 'context_options')
# (End of super-special argument parsing.)
# TODO: Test the heck out of this, in all sorts of evil scenarios.
if not isinstance(name, basestring):
raise TypeError('name must be a string; received %r' % name)
elif not name:
raise ValueError('name cannot be an empty string.')
key = Key(cls, name, app=app, namespace=namespace, parent=parent)
@tasklets.tasklet
def internal_tasklet():
@tasklets.tasklet
def txn():
ent = yield key.get_async(options=context_options)
if ent is None:
ent = cls(**kwds) # TODO: Use _populate().
ent._key = key
yield ent.put_async(options=context_options)
raise tasklets.Return(ent)
if in_transaction():
# Run txn() in existing transaction.
ent = yield txn()
else:
# Maybe avoid a transaction altogether.
ent = yield key.get_async(options=context_options)
if ent is None:
# Run txn() in new transaction.
ent = yield transaction_async(txn)
raise tasklets.Return(ent)
return internal_tasklet()
get_or_insert_async = _get_or_insert_async
@classmethod
def _allocate_ids(cls, size=None, max=None, parent=None, **ctx_options):
"""Allocates a range of key IDs for this model class.
Args:
size: Number of IDs to allocate. Either size or max can be specified,
not both.
max: Maximum ID to allocate. Either size or max can be specified,
not both.
parent: Parent key for which the IDs will be allocated.
**ctx_options: Context options.
Returns:
A tuple with (start, end) for the allocated range, inclusive.
"""
return cls._allocate_ids_async(size=size, max=max, parent=parent,
**ctx_options).get_result()
allocate_ids = _allocate_ids
@classmethod
def _allocate_ids_async(cls, size=None, max=None, parent=None,
**ctx_options):
"""Allocates a range of key IDs for this model class.
This is the asynchronous version of Model._allocate_ids().
"""
from . import tasklets
ctx = tasklets.get_context()
cls._pre_allocate_ids_hook(size, max, parent)
key = Key(cls._get_kind(), None, parent=parent)
fut = ctx.allocate_ids(key, size=size, max=max, **ctx_options)
post_hook = cls._post_allocate_ids_hook
if not cls._is_default_hook(Model._default_post_allocate_ids_hook,
post_hook):
fut.add_immediate_callback(post_hook, size, max, parent, fut)
return fut
allocate_ids_async = _allocate_ids_async
@classmethod
def _get_by_id(cls, id, parent=None, **ctx_options):
"""Returns an instance of Model class by ID.
This is really just a shorthand for Key(cls, id).get().
Args:
id: A string or integer key ID.
parent: Parent key of the model to get.
**ctx_options: Context options.
Returns:
A model instance or None if not found.
"""
return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()
get_by_id = _get_by_id
@classmethod
def _get_by_id_async(cls, id, parent=None, **ctx_options):
"""Returns an instance of Model class by ID.
This is the asynchronous version of Model._get_by_id().
"""
key = Key(cls._get_kind(), id, parent=parent)
return key.get_async(**ctx_options)
get_by_id_async = _get_by_id_async
# Hooks that wrap around mutations. Most are class methods with
# the notable exception of put, which is an instance method.
# To use these, override them in your model class and call
# super(<myclass>, cls).<hook>(*args).
# Note that the pre-hooks are called before the operation is
# scheduled. The post-hooks are called (by the Future) after the
# operation has completed.
# Do not use or touch the _default_* hooks. These exist for
# internal use only.
@classmethod
def _pre_allocate_ids_hook(cls, size, max, parent):
pass
_default_pre_allocate_ids_hook = _pre_allocate_ids_hook
@classmethod
def _post_allocate_ids_hook(cls, size, max, parent, future):
pass
_default_post_allocate_ids_hook = _post_allocate_ids_hook
@classmethod
def _pre_delete_hook(cls, key):
pass
_default_pre_delete_hook = _pre_delete_hook
@classmethod
def _post_delete_hook(cls, key, future):
pass
_default_post_delete_hook = _post_delete_hook
@classmethod
def _pre_get_hook(cls, key):
pass
_default_pre_get_hook = _pre_get_hook
@classmethod
def _post_get_hook(cls, key, future):
pass
_default_post_get_hook = _post_get_hook
def _pre_put_hook(self):
pass
_default_pre_put_hook = _pre_put_hook
def _post_put_hook(self, future):
pass
_default_post_put_hook = _post_put_hook
@staticmethod
def _is_default_hook(default_hook, hook):
"""Checks whether a specific hook is in its default state.
Args:
cls: A ndb.model.Model class.
default_hook: Callable specified by ndb internally (do not override).
hook: The hook defined by a model class using _post_*_hook.
Raises:
TypeError if either the default hook or the tested hook are not callable.
"""
if not hasattr(default_hook, '__call__'):
raise TypeError('Default hooks for ndb.model.Model must be callable')
if not hasattr(hook, '__call__'):
raise TypeError('Hooks must be callable')
return default_hook.im_func is hook.im_func
class Expando(Model):
"""Model subclass to support dynamic Property names and types.
See the module docstring for details.
"""
# Set this to False (in an Expando subclass or entity) to make
# properties default to unindexed.
_default_indexed = True
def _set_attributes(self, kwds):
for name, value in kwds.iteritems():
setattr(self, name, value)
def __getattr__(self, name):
if name.startswith('_'):
return super(Expando, self).__getattr__(name)
prop = self._properties.get(name)
if prop is None:
return super(Expando, self).__getattribute__(name)
return prop._get_value(self)
def __setattr__(self, name, value):
if (name.startswith('_') or
isinstance(getattr(self.__class__, name, None), (Property, property))):
return super(Expando, self).__setattr__(name, value)
# TODO: Refactor this to share code with _fake_property().
self._clone_properties()
if isinstance(value, Model):
prop = StructuredProperty(Model, name)
else:
repeated = isinstance(value, list)
indexed = self._default_indexed
# TODO: What if it's a list of Model instances?
prop = GenericProperty(name, repeated=repeated, indexed=indexed)
prop._code_name = name
self._properties[name] = prop
prop._set_value(self, value)
def __delattr__(self, name):
if (name.startswith('_') or
isinstance(getattr(self.__class__, name, None), (Property, property))):
return super(Expando, self).__delattr__(name)
prop = self._properties.get(name)
if not isinstance(prop, Property):
raise TypeError('Model properties must be Property instances; not %r' %
prop)
prop._delete_value(self)
if prop in self.__class__._properties:
raise RuntimeError('Property %s still in the list of properties for the '
'base class.' % name)
del self._properties[name]
@utils.positional(1)
def transaction(callback, **ctx_options):
"""Run a callback in a transaction.
Args:
callback: A function or tasklet to be called.
**ctx_options: Context options.
Returns:
Whatever callback() returns.
Raises:
Whatever callback() raises; datastore_errors.TransactionFailedError
if the transaction failed.
Note:
To pass arguments to a callback function, use a lambda, e.g.
def my_callback(key, inc):
...
transaction(lambda: my_callback(Key(...), 1))
"""
fut = transaction_async(callback, **ctx_options)
return fut.get_result()
@utils.positional(1)
def transaction_async(callback, **kwds):
"""Run a callback in a transaction.
This is the asynchronous version of transaction().
"""
from . import tasklets
ctx = tasklets.get_context()
if ctx.in_transaction():
raise datastore_errors.BadRequestError(
'Nested transactions are not supported.')
return ctx.transaction(callback, **kwds)
def in_transaction():
"""Return whether a transaction is currently active."""
from . import tasklets
return tasklets.get_context().in_transaction()
@utils.positional(1)
def transactional(func=None, **ctx_options):
"""Decorator to make a function automatically run in a transaction.
If we're already in a transaction this is a no-op.
This supports two forms:
(1) Vanilla:
@transactional
def callback(arg):
...
(2) With options:
@transactional(retries=1)
def callback(arg):
...
"""
if func is not None:
# Form (1), vanilla.
if ctx_options:
raise TypeError('@transactional() does not take positional arguments')
return transactional()(func)
# Form (2), with options.
def outer_transactional_wrapper(func):
@utils.wrapping(func)
def transactional_wrapper(*args, **kwds):
if in_transaction():
return func(*args, **kwds)
else:
return transaction(lambda: func(*args, **kwds), **ctx_options)
return transactional_wrapper
return outer_transactional_wrapper
def get_multi_async(keys, **ctx_options):
"""Fetches a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list of futures.
"""
return [key.get_async(**ctx_options) for key in keys]
def get_multi(keys, **ctx_options):
"""Fetches a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list whose items are either a Model instance or None if the key wasn't
found.
"""
return [future.get_result()
for future in get_multi_async(keys, **ctx_options)]
def put_multi_async(entities, **ctx_options):
"""Stores a sequence of Model instances.
Args:
entities: A sequence of Model instances.
**ctx_options: Context options.
Returns:
A list of futures.
"""
return [entity.put_async(**ctx_options) for entity in entities]
def put_multi(entities, **ctx_options):
"""Stores a sequence of Model instances.
Args:
entities: A sequence of Model instances.
**ctx_options: Context options.
Returns:
A list with the stored keys.
"""
return [future.get_result()
for future in put_multi_async(entities, **ctx_options)]
def delete_multi_async(keys, **ctx_options):
"""Deletes a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list of futures.
"""
return [key.delete_async(**ctx_options) for key in keys]
def delete_multi(keys, **ctx_options):
"""Deletes a sequence of keys.
Args:
keys: A sequence of keys.
**ctx_options: Context options.
Returns:
A list whose items are all None, one per deleted key.
"""
return [future.get_result()
for future in delete_multi_async(keys, **ctx_options)]
def get_indexes_async(**ctx_options):
"""Get a data structure representing the configured indexes.
Args:
**ctx_options: Context options.
Returns:
A future.
"""
from . import tasklets
ctx = tasklets.get_context()
return ctx.get_indexes(**ctx_options)
def get_indexes(**ctx_options):
"""Get a data structure representing the configured indexes.
Args:
**ctx_options: Context options.
Returns:
A list of Index objects.
"""
return get_indexes_async(**ctx_options).get_result()
# Update __all__ to contain all Property and Exception subclasses.
for _name, _object in globals().items():
if ((_name.endswith('Property') and issubclass(_object, Property)) or
(_name.endswith('Error') and issubclass(_object, Exception))):
__all__.append(_name)
| Python |
"""Run all unittests."""
__author__ = 'Beech Horn'
import sys
import unittest
try:
import ndb
location = 'ndb'
except ImportError:
import google3.third_party.apphosting.python.ndb
location = 'google3.third_party.apphosting.python.ndb'
def load_tests():
mods = ['context', 'eventloop', 'key', 'metadata', 'model', 'polymodel',
'prospective_search', 'query', 'stats', 'tasklets', 'blobstore']
test_mods = ['%s_test' % name for name in mods]
ndb = __import__(location, fromlist=test_mods, level=1)
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for mod in [getattr(ndb, name) for name in test_mods]:
for name in set(dir(mod)):
if name.endswith('Tests'):
test_module = getattr(mod, name)
tests = loader.loadTestsFromTestCase(test_module)
suite.addTests(tests)
return suite
def main():
v = 1
for arg in sys.argv[1:]:
if arg.startswith('-v'):
v += arg.count('v')
elif arg == '-q':
v = 0
result = unittest.TextTestRunner(verbosity=v).run(load_tests())
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
#
# library for accessing a web service (API) with the OAuth protocol
# (trying to make web service and web app server independent, not there yet)
#
# This library is a derivative of the tweetapp framework by tav@espians.com available at:
# http://github.com/tav/tweetapp/tree/master
#
# Other credits include:
# The "official" OAuth python library: http://oauth.googlecode.com/svn/code/python/
# The fftogo application: http://github.com/bgolub/fftogo/tree/master
# The FriendFeed python library: http://code.google.com/p/friendfeed-api/
#
""""OAuth library for making RESTful API calls using the OAuth protocol"""
import cgi
import logging
import urllib
import time
from hashlib import sha1
from hmac import new as hmac
from random import getrandbits
from google.appengine.api import urlfetch
# We require a JSON parsing library. These seem to be the most popular.
try:
import cjson
decode_json = lambda s: cjson.decode(s.decode("utf-8"), True)
except ImportError:
try:
# Django includes simplejson
from django.utils import simplejson
decode_json = lambda s: simplejson.loads(s.decode("utf-8"))
except ImportError:
import json
decode_json = lambda s: _unicodify(json.read(s))
# ------------------------------------------------------------------------------
# oauth client
# ------------------------------------------------------------------------------
class OAuthToken(object):
'''OAuthToken is a data type that represents an End User via either an access or request token.'''
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
# @staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthClient(object):
"""OAuth client."""
def __init__(self, webapp_api, service_info, token=None):
self.service_info = service_info
self.service_key = None
self.oauth_callback = service_info['oauth_callback']
self.token = token
# public methods
def get(self, api_method, **extra_params):
if not (api_method.startswith('http://') or api_method.startswith('https://')):
api_method = '%s%s%s' % (
self.service_info['default_api_prefix'], api_method,
self.service_info['default_api_suffix']
)
fetch = urlfetch.fetch(self.get_signed_url(
api_method, self.token, **extra_params
))
if fetch.status_code != 200:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
return decode_json(fetch.content)
def post(self, api_method, **extra_params):
if not (api_method.startswith('http://') or api_method.startswith('https://')):
api_method = '%s%s%s' % (
self.service_info['default_api_prefix'], api_method,
self.service_info['default_api_suffix']
)
payload = self.get_signed_payload(
api_method, self.token, **extra_params
)
headers = {}
headers["Content-Type"] = "application/x-www-form-urlencoded"
fetch = urlfetch.fetch(api_method, payload=payload, method=urlfetch.POST, headers=headers)
if fetch.status_code != 200:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
return decode_json(fetch.content)
# oauth workflow
def get_request_token(self):
token_info = self.get_data_from_signed_url(self.service_info['request_token_url'])
token = OAuthToken.from_string(token_info)
return token
def get_access_token(self, oauth_token):
token_info = self.get_data_from_signed_url(
self.service_info['access_token_url'], oauth_token
)
token = OAuthToken.from_string(token_info)
return token
def get_authorize_url(self, oauth_token):
if self.oauth_callback:
oauth_callback = {'oauth_callback': self.oauth_callback}
else:
oauth_callback = {}
return self.get_signed_url(
self.service_info['user_auth_url'], oauth_token, **oauth_callback
)
# request marshalling
def get_data_from_signed_url(self, __url, __token=None, __meth='GET', **extra_params):
signed_url = self.get_signed_url(__url, __token, __meth, **extra_params)
fetch = urlfetch.fetch(signed_url)
if fetch.status_code != 200:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
data = fetch.content
#logging.debug(data)
return data
def get_signed_url(self, __url, __token=None, __meth='GET', **extra_params):
service_info = self.service_info
kwargs = {
'oauth_consumer_key': service_info['consumer_key'],
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_timestamp': int(time.time()),
'oauth_nonce': getrandbits(64),
}
kwargs.update(extra_params)
if self.service_key is None:
self.service_key = self.service_info['consumer_secret']+'&'
if __token is not None:
kwargs['oauth_token'] = __token.key
key = self.service_key + encode(__token.secret)
else:
key = self.service_key
message = '&'.join(map(encode, [
__meth.upper(), __url, '&'.join(
'%s=%s' % (encode(k), encode(kwargs[k])) for k in sorted(kwargs)
)
]))
kwargs['oauth_signature'] = hmac(
key, message, sha1
).digest().encode('base64')[:-1]
return '%s?%s' % (__url, urllib.urlencode(kwargs))
def get_signed_payload(self, __url, __token=None, __meth='POST', **extra_params):
service_info = self.service_info
kwargs = {
'oauth_consumer_key': service_info['consumer_key'],
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_timestamp': int(time.time()),
'oauth_nonce': getrandbits(64),
}
kwargs.update(extra_params)
if self.service_key is None:
self.service_key = self.service_info['consumer_secret']+'&'
if __token is not None:
kwargs['oauth_token'] = __token.key
key = self.service_key + encode(__token.secret)
else:
key = self.service_key
message = '&'.join(map(encode, [
__meth.upper(), __url, '&'.join(
'%s=%s' % (encode(k), encode(kwargs[k])) for k in sorted(kwargs)
)
]))
kwargs['oauth_signature'] = hmac(
key, message, sha1
).digest().encode('base64')[:-1]
return urllib.urlencode(kwargs)
# ------------------------------------------------------------------------------
# utility functions
# ------------------------------------------------------------------------------
def encode(text):
return urllib.quote(str(text), '')
def _encodify(s):
return unicode(s).encode('utf-8')
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging
from google.appengine.api import memcache
from google.appengine.datastore import entity_pb
from google.appengine.ext import db
import counters
import feeds
import models
import utils
import webapp2
# use underscores since usernames are guaranteed to not have them
# still a problem with journal names?
C_ACTIVITIES = 'activities_%s_%s_%s'
C_ACTIVITIES_FOLLOWER = 'activities_follower_%s'
C_ACTIVITIES_FOLLOWER_DATA = 'activities_follower_data_%s'
C_ACTIVITIES_FOLLOWER_KEYS = 'activities_follower_keys_%s'
C_BLOG_COUNT = 'blog_count'
C_BLOG_ENTRIES_KEYS = 'blog_entries_keys'
C_BLOG_ENTRIES_KEYS_PAGE = 'blog_entries_keys_page_%s'
C_BLOG_ENTRIES_PAGE = 'blog_entries_page_%s'
C_BLOG_TOP = 'blog_top'
C_ENTRIES_KEYS = 'entries_keys_%s'
C_ENTRIES_KEYS_PAGE = 'entries_keys_page_%s_%s'
C_ENTRIES_PAGE = 'entries_page_%s_%s_%s'
C_ENTRY = 'entry_%s_%s_%s'
C_ENTRY_KEY = 'entry_key_%s_%s_%s'
C_ENTRY_RENDER = 'entry_render_%s_%s_%s'
C_FEED = 'feed_%s_%s'
C_FOLLOWERS = 'followers_%s'
C_FOLLOWING = 'following_%s'
C_JOURNAL = 'journal_%s_%s'
C_JOURNALS = 'journals_%s'
C_JOURNAL_KEY = 'journal_key_%s_%s'
C_JOURNAL_LIST = 'journals_list_%s'
C_KEY = 'key_%s'
C_STATS = 'stats'
def set(value, c, *args):
memcache.set(c %args, value)
def set_multi(mapping):
memcache.set_multi(mapping)
def set_keys(entities):
memcache.set_multi(dict([(C_KEY %i.key(), pack(i)) for i in entities]))
def delete(keys):
memcache.delete_multi(keys)
def flush():
memcache.flush_all()
def pack(models):
if models is None:
return None
elif isinstance(models, db.Model):
# Just one instance
return db.model_to_protobuf(models).Encode()
else:
# A list
return [db.model_to_protobuf(x).Encode() for x in models]
def unpack(data):
if data is None:
return None
elif isinstance(data, str):
# Just one instance
return db.model_from_protobuf(entity_pb.EntityProto(data))
else:
return [db.model_from_protobuf(entity_pb.EntityProto(x)) for x in data]
def get_by_key(key):
n = C_KEY %key
data = unpack(memcache.get(n))
if data is None:
data = db.get(key)
memcache.add(n, pack(data))
return data
# idea: use async functions, although i'm not convinced it'd be faster
# fetches all keys; if kind is specified, converts the given key names to keys of that kind
def get_by_keys(keys, kind=None):
if kind:
keys = [str(db.Key.from_path(kind, i)) for i in keys]
client = memcache.Client()
values = client.get_multi(keys)
data = [values.get(i) for i in keys]
if None in data:
to_fetch = []
for i in range(len(keys)):
if data[i] is None:
to_fetch.append(i)
fetch_keys = [keys[i] for i in to_fetch]
fetched = db.get(fetch_keys)
set_multi(dict(zip(fetch_keys, fetched)))
for i in to_fetch:
data[i] = fetched.pop(0)
return data
def get_journals(user_key):
n = C_JOURNALS %user_key
data = unpack(memcache.get(n))
if data is None:
data = models.Journal.all().ancestor(user_key).fetch(models.Journal.MAX_JOURNALS)
memcache.add(n, pack(data))
return data
# returns a list of journal names
def get_journal_list(user_key):
n = C_JOURNAL_LIST %user_key
data = memcache.get(n)
if data is None:
journals = get_journals(user_key)
data = [(i.url(), i.name) for i in journals]
memcache.add(n, data)
return data
# returns all entry keys sorted by descending date
def get_entries_keys(journal_key):
n = C_ENTRIES_KEYS %journal_key
data = memcache.get(n)
if data is None:
# todo: fix limit to 1000 most recent journal entries
data = models.Entry.all(keys_only=True).ancestor(journal_key).order('-date').fetch(1000)
memcache.add(n, data)
return data
# returns entry keys of given page
def get_entries_keys_page(journal_key, page):
n = C_ENTRIES_KEYS_PAGE %(journal_key, page)
data = memcache.get(n)
if data is None:
entries = get_entries_keys(journal_key)
data = entries[(page - 1) * models.Journal.ENTRIES_PER_PAGE:page * models.Journal.ENTRIES_PER_PAGE]
memcache.add(n, data)
if not data:
logging.warning('Page %i requested from %s, but only %i entries, %i pages.', page, journal_key, len(entries), len(entries) / models.Journal.ENTRIES_PER_PAGE + 1)
return data
# returns entries of given page
def get_entries_page(username, journal_name, page, journal_key):
n = C_ENTRIES_PAGE %(username, journal_name, page)
data = memcache.get(n)
if data is None:
if page < 1:
page = 1
entries = get_entries_keys_page(journal_key, page)
data = [unicode(get_entry_render(username, journal_name, i.id())) for i in entries]
memcache.add(n, data)
return data
def get_entry_key(username, journal_name, entry_id):
n = C_ENTRY_KEY %(username, journal_name, entry_id)
data = memcache.get(n)
if data is None:
data = db.get(db.Key.from_path('Entry', long(entry_id), parent=get_journal_key(username, journal_name)))
if data:
data = data.key()
memcache.add(n, data)
return data
# called when a new entry is posted, and we must clear all the entry and page cache
def clear_entries_cache(journal_key):
journal = get_by_key(journal_key)
keys = [C_ENTRIES_KEYS %journal_key, C_JOURNALS %journal_key.parent()]
# add one key per page for get_entries_page and get_entries_keys_page
for p in range(1, journal.entry_count / models.Journal.ENTRIES_PER_PAGE + 2):
keys.extend([C_ENTRIES_PAGE %(journal.key().parent().name(), journal.name, p), C_ENTRIES_KEYS_PAGE %(journal_key, p)])
memcache.delete_multi(keys)
def get_stats():
n = C_STATS
data = memcache.get(n)
if data is None:
data = [(i, counters.get_count(i)) for i in [
counters.COUNTER_USERS,
counters.COUNTER_JOURNALS,
counters.COUNTER_ENTRIES,
counters.COUNTER_CHARS,
counters.COUNTER_WORDS,
counters.COUNTER_SENTENCES,
]]
memcache.add(n, data)
return data
def clear_journal_cache(user_key):
memcache.delete_multi([C_JOURNALS %user_key, C_JOURNAL_LIST %user_key])
def get_activities(username='', action='', object_key=''):
n = C_ACTIVITIES %(username, action, object_key)
data = unpack(memcache.get(n))
if data is None:
data = models.Activity.all()
if username:
data = data.filter('user', username)
if action:
data = data.filter('action', action)
if object_key:
data = data.filter('object', object_key)
data = data.order('-date').fetch(models.Activity.RESULTS)
memcache.add(n, pack(data), 60) # cache for 1 minute
return data
def get_activities_follower_keys(username):
n = C_ACTIVITIES_FOLLOWER_KEYS %username
data = memcache.get(n)
if data is None:
index_keys = models.ActivityIndex.all(keys_only=True).filter('receivers', username).order('-date').fetch(50)
data = [str(i.parent()) for i in index_keys]
memcache.add(n, data, 300) # cache for 5 minutes
return data
def get_activities_follower_data(keys):
n = C_ACTIVITIES_FOLLOWER_DATA %'_'.join(keys)
data = unpack(memcache.get(n))
if data is None:
data = db.get(keys)
memcache.add(n, pack(data)) # no limit on this cache since this data never changes
return data
def get_activities_follower(username):
n = C_ACTIVITIES_FOLLOWER %username
data = unpack(memcache.get(n))
if data is None:
keys = get_activities_follower_keys(username)
# perhaps the keys didn't change, so keep a backup of that data
data = get_activities_follower_data(keys)
memcache.add(n, pack(data), 300) # cache for 5 minutes
return data
def get_feed(feed, token):
n = C_FEED %(feed, token)
data = memcache.get(n)
if data is None:
data = feeds.feed(feed, token)
memcache.add(n, data, 600) # cache for 10 minutes
return data
def get_user(username):
user_key = db.Key.from_path('User', username)
return get_by_key(user_key)
def get_followers(username):
n = C_FOLLOWERS %username
data = memcache.get(n)
if data is None:
followers = models.UserFollowersIndex.get_by_key_name(username, parent=db.Key.from_path('User', username))
if not followers:
data = []
else:
data = followers.users
memcache.add(n, data)
return data
def get_following(username):
n = C_FOLLOWING %username
data = memcache.get(n)
if data is None:
following = models.UserFollowingIndex.get_by_key_name(username, parent=db.Key.from_path('User', username))
if not following:
data = []
else:
data = following.users
memcache.add(n, data)
return data
def get_journal(username, journal_name):
n = C_JOURNAL %(username, journal_name)
data = unpack(memcache.get(n))
if data is None:
journal_key = get_journal_key(username, journal_name)
if journal_key:
data = db.get(journal_key)
memcache.add(n, pack(data))
return data
def get_journal_key(username, journal_name):
n = C_JOURNAL_KEY %(username, journal_name)
data = memcache.get(n)
if data is None:
user_key = db.Key.from_path('User', username)
data = models.Journal.all(keys_only=True).ancestor(user_key).filter('name', journal_name.decode('utf-8')).get()
memcache.add(n, data)
return data
def get_entry(username, journal_name, entry_id, entry_key=None):
n = C_ENTRY %(username, journal_name, entry_id)
data = memcache.get(n)
if data is None:
if not entry_key:
entry_key = get_entry_key(username, journal_name, entry_id)
entry = get_by_key(entry_key)
# try async queries here
content = get_by_key(entry.content_key)
if entry.blobs:
blobs = pack(db.get(entry.blob_keys))
else:
blobs = []
data = (pack(entry), pack(content), blobs)
memcache.add(n, data)
entry, content, blobs = data
entry = unpack(entry)
content = unpack(content)
blobs = unpack(blobs)
return entry, content, blobs
def get_entry_render(username, journal_name, entry_id):
n = C_ENTRY_RENDER %(username, journal_name, entry_id)
data = memcache.get(n)
if data is None:
entry, content, blobs = get_entry(username, journal_name, entry_id)
data = utils.render('entry-render.html', {
'blobs': blobs,
'content': content,
'entry': entry,
'entry_url': webapp2.uri_for('view-entry', username=username, journal_name=journal_name, entry_id=entry_id),
})
memcache.add(n, data)
return data
def get_blog_entries_page(page):
n = C_BLOG_ENTRIES_PAGE %page
data = unpack(memcache.get(n))
if data is None:
if page < 1:
page = 1
entries = get_blog_entries_keys_page(page)
data = [get_by_key(i) for i in entries]
memcache.add(n, pack(data))
return data
# returns all blog entry keys sorted by descending date
def get_blog_entries_keys():
n = C_BLOG_ENTRIES_KEYS
data = memcache.get(n)
if data is None:
# todo: fix limit to 1000 most recent blog entries
data = models.BlogEntry.all(keys_only=True).filter('draft', False).order('-date').fetch(1000)
memcache.add(n, data)
return data
# returns blog entry keys of given page
def get_blog_entries_keys_page(page):
n = C_BLOG_ENTRIES_KEYS_PAGE %page
data = memcache.get(n)
if data is None:
entries = get_blog_entries_keys()
data = entries[(page - 1) * models.BlogEntry.ENTRIES_PER_PAGE:page * models.BlogEntry.ENTRIES_PER_PAGE]
memcache.add(n, data)
if not data:
logging.warning('Page %i requested from blog, but only %i entries, %i pages.', page, len(entries), len(entries) / models.BlogEntry.ENTRIES_PER_PAGE + 1)
return data
# called when a new blog entry is posted, and we must clear all the entry and page cache
def clear_blog_entries_cache():
keys = [C_BLOG_ENTRIES_KEYS, C_BLOG_COUNT, C_BLOG_TOP]
# add one key per page for get_blog_entries_page and get_blog_entries_keys_page
for p in range(1, get_blog_count() / models.BlogEntry.ENTRIES_PER_PAGE + 2):
keys.extend([C_BLOG_ENTRIES_PAGE %p, C_BLOG_ENTRIES_KEYS_PAGE %p])
memcache.delete_multi(keys)
def get_blog_count():
n = C_BLOG_COUNT
data = memcache.get(n)
if data is None:
try:
data = models.Config.get_by_key_name('blog_count').count
except:
data = 0
memcache.add(n, data)
return data
def get_blog_top():
n = C_BLOG_TOP
data = memcache.get(n)
if data is None:
keys = get_blog_entries_keys()[:25]
blogentries = db.get(keys)
data = utils.render('blog-top.html', {'top': blogentries})
memcache.add(n, data)
return data
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import with_statement
import base64
import datetime
import logging
import re
import os
from django.utils import html
from google.appengine.api import files
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from webapp2_extras import sessions
import django
import json
import webapp2
import cache
import counters
import facebook
import filters
import models
import settings
import twitter
import utils
class BaseHandler(webapp2.RequestHandler):
def render(self, _template, context={}):
context['session'] = self.session
context['user'] = self.session.get('user')
context['messages'] = self.get_messages()
context['active'] = _template.partition('.')[0]
for k in ['login_source']:
if k in self.session:
context[k] = self.session[k]
if settings.GOOGLE_ANALYTICS:
context['google_analytics'] = settings.GOOGLE_ANALYTICS
rv = utils.render(_template, context)
self.response.write(rv)
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session(backend='datastore')
# This should be called anytime the session data needs to be updated.
# session['var'] = var should never be used, except in this function
def populate_user_session(self, user=None):
if 'user' not in self.session and not user:
return
elif not user:
user = cache.get_user(self.session['user']['name'])
self.session['user'] = {
'admin': users.is_current_user_admin(),
'avatar': user.gravatar(),
'email': user.email,
'key': str(user.key()),
'name': user.name,
'token': user.token,
}
self.session['journals'] = cache.get_journal_list(db.Key(self.session['user']['key']))
MESSAGE_KEY = '_flash_message'
def add_message(self, level, message):
self.session.add_flash(message, level, BaseHandler.MESSAGE_KEY)
def get_messages(self):
return self.session.get_flashes(BaseHandler.MESSAGE_KEY)
def process_credentials(self, name, email, source, uid):
user = models.User.all().filter('%s_id' %source, uid).get()
if not user:
registered = False
self.session['register'] = {'name': name, 'email': email, 'source': source, 'uid': uid}
else:
registered = True
self.populate_user_session(user)
self.session['login_source'] = source
user.put() # to update last_active
return user, registered
def logout(self):
for k in ['user', 'journals']:
if k in self.session:
del self.session[k]
class BaseUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
session_store = None
def add_message(self, level, message):
self.session.add_flash(message, level, BaseHandler.MESSAGE_KEY)
self.store()
def store(self):
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
if not self.session_store:
self.session_store = sessions.get_store(request=self.request)
return self.session_store.get_session(backend='datastore')
class MainPage(BaseHandler):
def get(self):
if 'user' in self.session:
following = cache.get_by_keys(cache.get_following(self.session['user']['name']), 'User')
followers = cache.get_by_keys(cache.get_followers(self.session['user']['name']), 'User')
journals = cache.get_journals(db.Key(self.session['user']['key']))
self.render('index-user.html', {
'activities': cache.get_activities_follower(self.session['user']['name']),
'journals': journals,
'thisuser': True,
'token': self.session['user']['token'],
'following': following,
'followers': followers,
})
else:
self.render('index.html')
class FacebookCallback(BaseHandler):
def get(self):
if 'code' in self.request.GET and 'local_redirect' in self.request.GET:
local_redirect = self.request.get('local_redirect')
access_dict = facebook.access_dict(self.request.get('code'), {'local_redirect': local_redirect})
if access_dict:
self.session['access_token'] = access_dict['access_token']
self.redirect(webapp2.uri_for(local_redirect, callback='callback'))
return
self.redirect(webapp2.uri_for('main'))
class GoogleLogin(BaseHandler):
def get(self):
current_user = users.get_current_user()
user, registered = self.process_credentials(current_user.nickname(), current_user.email(), models.USER_SOURCE_GOOGLE, current_user.user_id())
if not registered:
self.redirect(webapp2.uri_for('register'))
else:
self.redirect(webapp2.uri_for('main'))
class FacebookLogin(BaseHandler):
def get(self):
if 'callback' in self.request.GET:
user_data = facebook.graph_request(self.session['access_token'])
if user_data is not False and 'username' in user_data and 'email' in user_data:
user, registered = self.process_credentials(user_data['username'], user_data['email'], models.USER_SOURCE_FACEBOOK, user_data['id'])
if not registered:
self.redirect(webapp2.uri_for('register'))
return
else:
self.redirect(facebook.oauth_url({'local_redirect': 'login-facebook'}, {'scope': 'email'}))
return
self.redirect(webapp2.uri_for('main'))
class Register(BaseHandler):
USERNAME_RE = re.compile("^[a-z0-9][a-z0-9-]+$")
def get(self):
return self.post()
def post(self):
if 'register' in self.session:
errors = {}
if 'submit' in self.request.POST:
username = self.request.get('username')
lusername = username.lower()
email = self.request.get('email')
lusers = models.User.all(keys_only=True).filter('lname', lusername).get()
if not Register.USERNAME_RE.match(lusername):
errors['username'] = 'Username may only contain alphanumeric characters or dashes and cannot begin with a dash.'
elif lusername in RESERVED_NAMES or lusers:
errors['username'] = 'Username is already taken.'
else:
source = self.session['register']['source']
uid = self.session['register']['uid']
if not email:
email = None
user = models.User.get_or_insert(username,
name=username,
lname=lusername,
email=email,
facebook_id=uid if source == models.USER_SOURCE_FACEBOOK else None,
google_id=uid if source == models.USER_SOURCE_GOOGLE else None,
token=base64.urlsafe_b64encode(os.urandom(30))[:32],
)
if getattr(user, '%s_id' %source) != uid:
errors['username'] = 'Username is already taken.'
else:
del self.session['register']
self.populate_user_session(user)
counters.increment(counters.COUNTER_USERS)
self.add_message('success', '%s, you have been registered at jounalr.' %user)
self.redirect(webapp2.uri_for('new-journal'))
return
else:
username = ''
email = self.session['register']['email']
self.render('register.html', {'username': username, 'email': email, 'errors': errors})
else:
self.redirect(webapp2.uri_for('main'))
class Logout(BaseHandler):
def get(self):
self.logout()
self.redirect(webapp2.uri_for('main'))
class GoogleSwitch(BaseHandler):
def get(self):
self.logout()
self.redirect(users.create_logout_url(webapp2.uri_for('login-google')))
class AccountHandler(BaseHandler):
def get(self):
if 'user' not in self.session:
self.add_message('error', 'You must log in to access your account.')
self.redirect(webapp2.uri_for('main'))
return
u = cache.get_user(self.session['user']['name'])
changed = False
if 'callback' in self.request.GET:
if 'access_token' in self.session:
user_data = facebook.graph_request(self.session['access_token'])
if u.facebook_id and user_data['id'] != u.facebook_id:
self.add_message('error', 'This account has already been attached to a facebook account.')
else:
u.facebook_id = user_data['id']
u.facebook_enable = True
u.facebook_token = self.session['access_token']
changed = True
self.add_message('success', 'Facebook integration enabled.')
elif 'disable' in self.request.GET:
disable = self.request.get('disable')
if disable in models.USER_SOCIAL_NETWORKS or disable in models.USER_BACKUP_NETWORKS:
setattr(u, '%s_enable' %disable, False)
self.add_message('success', '%s posting disabled.' %disable.title())
changed = True
elif 'enable' in self.request.GET:
enable = self.request.get('enable')
if enable in models.USER_SOCIAL_NETWORKS or enable in models.USER_BACKUP_NETWORKS:
setattr(u, '%s_enable' %enable, True)
self.add_message('success', '%s posting enabled.' %enable.title())
changed = True
elif 'deauthorize' in self.request.GET:
deauthorize = self.request.get('deauthorize')
changed = True
if deauthorize == models.USER_SOURCE_FACEBOOK:
u.facebook_token = None
u.facebook_enable = False
self.add_message('success', 'Facebook posting deauthorized.')
elif deauthorize == models.USER_SOURCE_TWITTER:
u.twitter_key = None
u.twitter_secret = None
u.twitter_enable = None
self.add_message('success', 'Twitter posting deauthorized.')
elif deauthorize == models.USER_BACKUP_DROPBOX:
u.dropbox_token = None
u.dropbox_enable = None
self.add_message('success', 'Dropbox backup deauthorized.')
elif deauthorize == models.USER_BACKUP_GOOGLE_DOCS:
utils.google_revoke(u.google_docs_token)
u.google_docs_token = None
u.google_docs_enable = None
self.add_message('success', 'Google Docs backup deauthorized.')
if changed:
u.put()
cache.set_keys([u])
self.render('account.html', {
'u': u,
'backup':
{
'dropbox': {
'auth_text': 'authorize' if not u.dropbox_token else 'deauthorize',
'auth_url': webapp2.uri_for('dropbox', action='authorize') if not u.dropbox_token else webapp2.uri_for('account', deauthorize='dropbox'),
'enable_class': 'disabled' if not u.dropbox_token else '',
'enable_text': 'enable' if not u.dropbox_enable or not u.dropbox_token else 'disable',
'enable_url': '#' if not u.dropbox_token else webapp2.uri_for('account', enable='dropbox') if not u.dropbox_enable else webapp2.uri_for('account', disable='dropbox'),
'label_class': 'warning' if not u.dropbox_token else 'success' if u.dropbox_enable else 'important',
'label_text': 'not authorized' if not u.dropbox_token else 'enabled' if u.dropbox_enable else 'disabled',
},
'google docs': {
'auth_text': 'authorize' if not u.google_docs_token else 'deauthorize',
'auth_url': webapp2.uri_for('google', action='authorize') if not u.google_docs_token else webapp2.uri_for('account', deauthorize='google_docs'),
'enable_class': 'disabled' if not u.google_docs_token else '',
'enable_text': 'enable' if not u.google_docs_enable or not u.google_docs_token else 'disable',
'enable_url': '#' if not u.google_docs_token else webapp2.uri_for('account', enable='google_docs') if not u.google_docs_enable else webapp2.uri_for('account', disable='google_docs'),
'label_class': 'warning' if not u.google_docs_token else 'success' if u.google_docs_enable else 'important',
'label_text': 'not authorized' if not u.google_docs_token else 'enabled' if u.google_docs_enable else 'disabled',
},
},
'social': {
'facebook': {
'auth_text': 'authorize' if not u.facebook_token else 'deauthorize',
'auth_url': facebook.oauth_url({'local_redirect': 'account'}, {'scope': 'publish_stream,offline_access'}) if not u.facebook_token else webapp2.uri_for('account', deauthorize='facebook'),
'enable_class': 'disabled' if not u.facebook_token else '',
'enable_text': 'enable' if not u.facebook_enable or not u.facebook_token else 'disable',
'enable_url': '#' if not u.facebook_token else webapp2.uri_for('account', enable='facebook') if not u.facebook_enable else webapp2.uri_for('account', disable='facebook'),
'label_class': 'warning' if not u.facebook_token else 'success' if u.facebook_enable else 'important',
'label_text': 'not authorized' if not u.facebook_token else 'enabled' if u.facebook_enable else 'disabled',
},
'twitter': {
'auth_text': 'authorize' if not u.twitter_key else 'deauthorize',
'auth_url': webapp2.uri_for('twitter', action='login') if not u.twitter_key else webapp2.uri_for('account', deauthorize='twitter'),
'enable_class': 'disabled' if not u.twitter_key else '',
'enable_text': 'enable' if not u.twitter_enable or not u.twitter_key else 'disable',
'enable_url': '#' if not u.twitter_key else webapp2.uri_for('account', enable='twitter') if not u.twitter_enable else webapp2.uri_for('account', disable='twitter'),
'label_class': 'warning' if not u.twitter_key else 'success' if u.twitter_enable else 'important',
'label_text': 'not authorized' if not u.twitter_key else 'enabled' if u.twitter_enable else 'disabled',
},
},
})
def post(self):
changed = False
u = cache.get_user(self.session['user']['name'])
if 'settings' in self.request.POST:
if 'email' in self.request.POST:
email = self.request.get('email')
if not email:
email = None
self.add_message('success', 'Email address updated.')
if self.session['user']['email'] != email:
u.email = email
changed = True
if 'social' in self.request.POST:
self.add_message('success', 'Social media settings saved.')
facebook_enable = 'facebook' in self.request.POST and self.request.get('facebook') == 'on'
if u.facebook_enable != facebook_enable:
u.facebook_enable = facebook_enable
changed = True
if changed:
u.put()
cache.set_keys([u])
self.populate_user_session()
self.redirect(webapp2.uri_for('account'))
class NewJournal(BaseHandler):
def get(self):
self.render('new-journal.html')
def post(self):
name = self.request.get('name')
if len(self.session['journals']) >= models.Journal.MAX_JOURNALS:
self.add_message('error', 'Only %i journals allowed.' %models.Journal.MAX_JOURNALS)
elif not name:
self.add_message('error', 'Your journal needs a name.')
else:
journal = models.Journal(parent=db.Key(self.session['user']['key']), name=name)
for journal_url, journal_name in self.session['journals']:
if journal.name == journal_name:
self.add_message('error', 'You already have a journal called %s.' %name)
break
else:
def txn(user_key, journal):
user = db.get(user_key)
user.journal_count += 1
db.put([user, journal])
return user, journal
user, journal = db.run_in_transaction(txn, self.session['user']['key'], journal)
cache.clear_journal_cache(user.key())
cache.set(cache.pack(user), cache.C_KEY, user.key())
self.populate_user_session()
counters.increment(counters.COUNTER_JOURNALS)
models.Activity.create(user, models.ACTIVITY_NEW_JOURNAL, journal.key())
self.add_message('success', 'Created your journal %s.' %name)
self.redirect(webapp2.uri_for('new-entry', username=self.session['user']['name'], journal_name=journal.name))
return
self.render('new-journal.html')
class ViewJournal(BaseHandler):
def get(self, username, journal_name):
page = int(self.request.get('page', 1))
journal = cache.get_journal(username, journal_name)
if not journal or page < 1 or page > journal.pages or username != self.session['user']['name']:
self.error(404)
return
if not journal:
self.error(404)
else:
self.render('view-journal.html', {
'username': username,
'journal': journal,
'entries': cache.get_entries_page(username, journal_name, page, journal.key()),
'page': page,
'pagelist': utils.page_list(page, journal.pages),
})
class AboutHandler(BaseHandler):
def get(self):
self.render('about.html')
class StatsHandler(BaseHandler):
def get(self):
self.render('stats.html', {'stats': cache.get_stats()})
class ActivityHandler(BaseHandler):
def get(self):
self.render('activity.html', {'activities': cache.get_activities()})
class FeedsHandler(BaseHandler):
def get(self, feed):
token = self.request.get('token')
xml = cache.get_feed(feed, token)
if not xml:
self.error(404)
else:
self.response.out.write(xml)
class UserHandler(BaseHandler):
def get(self, username):
u = cache.get_user(username)
if not u:
self.error(404)
return
journals = cache.get_journals(u.key())
activities = cache.get_activities(username=username)
following = cache.get_following(username)
followers = cache.get_followers(username)
if 'user' in self.session:
is_following = username in cache.get_following(self.session['user']['name'])
thisuser = self.session['user']['name'] == u.name
else:
is_following = False
thisuser = False
self.render('user.html', {
'u': u,
'journals': journals,
'activities': activities,
'following': following,
'followers': followers,
'is_following': is_following,
'thisuser': thisuser,
})
class FollowHandler(BaseHandler):
def get(self, username):
user = cache.get_user(username)
if not user or 'user' not in self.session:
self.error(404)
return
thisuser = self.session['user']['name']
self.redirect(webapp2.uri_for('user', username=username))
# don't allow users to follow themselves
if thisuser == username:
return
if 'unfollow' in self.request.GET:
op = 'del'
unop = 'add'
else:
op = 'add'
unop = 'del'
xg_on = db.create_transaction_options(xg=True)
def txn(thisuser, otheruser, op):
tu, ou = db.get([thisuser, otheruser])
if not tu:
tu = models.UserFollowingIndex(key=thisuser)
if not ou:
ou = models.UserFollowersIndex(key=otheruser)
changed = []
if op == 'add':
if thisuser.name() not in ou.users:
ou.users.append(thisuser.name())
changed.append(ou)
if otheruser.name() not in tu.users:
tu.users.append(otheruser.name())
changed.append(tu)
elif op == 'del':
if thisuser.name() in ou.users:
ou.users.remove(thisuser.name())
changed.append(ou)
if otheruser.name() in tu.users:
tu.users.remove(otheruser.name())
changed.append(tu)
db.put(changed)
return tu, ou
followers_key = db.Key.from_path('User', username, 'UserFollowersIndex', username)
following_key = db.Key.from_path('User', thisuser, 'UserFollowingIndex', thisuser)
following, followers = db.run_in_transaction_options(xg_on, txn, following_key, followers_key, op)
if op == 'add':
self.add_message('success', 'You are now following %s.' %username)
models.Activity.create(cache.get_by_key(self.session['user']['key']), models.ACTIVITY_FOLLOWING, user)
elif op == 'del':
self.add_message('success', 'You are no longer following %s.' %username)
cache.set_multi({
cache.C_FOLLOWERS %username: followers.users,
cache.C_FOLLOWING %thisuser: following.users,
})
class NewEntryHandler(BaseHandler):
def get(self, username, journal_name):
if username != self.session['user']['name']:
self.error(404)
return
journal_key = cache.get_journal_key(username, journal_name)
if not journal_key:
self.error(404)
return
def txn(user_key, journal_key, entry, content):
user, journal = db.get([user_key, journal_key])
journal.entry_count += 1
user.entry_count += 1
db.put([user, journal, entry, content])
return user, journal
handmade_key = db.Key.from_path('Entry', 1, parent=journal_key)
entry_id = db.allocate_ids(handmade_key, 1)[0]
entry_key = db.Key.from_path('Entry', entry_id, parent=journal_key)
handmade_key = db.Key.from_path('EntryContent', 1, parent=entry_key)
content_id = db.allocate_ids(handmade_key, 1)[0]
content_key = db.Key.from_path('EntryContent', content_id, parent=entry_key)
content = models.EntryContent(key=content_key)
entry = models.Entry(key=entry_key, content=content_id)
user, journal = db.run_in_transaction(txn, self.session['user']['key'], journal_key, entry, content)
# move this to new entry saving for first time
models.Activity.create(user, models.ACTIVITY_NEW_ENTRY, entry.key())
counters.increment(counters.COUNTER_ENTRIES)
cache.clear_entries_cache(journal.key())
cache.set_keys([user, journal, entry, content])
cache.set(cache.pack(journal), cache.C_JOURNAL, username, journal_name)
if user.facebook_token and user.facebook_enable:
taskqueue.add(queue_name='retry-limit', url=webapp2.uri_for('social-post'), params={'entry_key': entry_key, 'network': models.USER_SOURCE_FACEBOOK, 'username': user.name})
if user.twitter_key and user.twitter_enable:
taskqueue.add(queue_name='retry-limit', url=webapp2.uri_for('social-post'), params={'entry_key': entry_key, 'network': models.USER_SOURCE_TWITTER, 'username': user.name})
self.redirect(webapp2.uri_for('view-entry', username=username, journal_name=journal_name, entry_id=entry_id))
class ViewEntryHandler(BaseHandler):
def get(self, username, journal_name, entry_id):
journal_name = journal_name.decode('utf-8')
if self.session['user']['name'] != username:
self.error(404) # should probably be change to 401 or 403
return
entry, content, blobs = cache.get_entry(username, journal_name, entry_id)
if not entry:
self.error(404)
return
user = cache.get_user(username)
if 'pdf' in self.request.GET:
pdf_blob = models.Blob.get_by_key_name('pdf', parent=entry)
error = None
# either no cached entry, or it's outdated
if not pdf_blob or pdf_blob.date < entry.last_edited:
if pdf_blob:
pdf_blob.blob.delete()
file_name = files.blobstore.create(mime_type='application/pdf')
subject = content.subject if content.subject else filters.jdate(entry.date)
with files.open(file_name, 'a') as f:
error = utils.convert_html(f, subject, [(entry, content, blobs)])
files.finalize(file_name)
pdf_blob = models.Blob(
key_name='pdf',
parent=entry,
blob=files.blobstore.get_blob_key(file_name),
type=models.BLOB_TYPE_PDF,
name='%s - %s - %s' %(username, utils.deunicode(journal_name), subject),
date=entry.last_edited,
)
if error:
pdf_blob.blob.delete()
self.add_message('error', 'Error while converting to PDF: %s' %error)
else:
pdf_blob.put()
if not error:
self.redirect(pdf_blob.get_url(name=True))
return
self.render('entry.html', {
'blobs': blobs,
'content': content,
'entry': entry,
'journal_name': journal_name,
'render': cache.get_entry_render(username, journal_name, entry_id),
'username': username,
'upload_url': webapp2.uri_for('upload-url', username=username, journal_name=journal_name, entry_id=entry_id),
'can_upload': user.can_upload(),
'markup_options': utils.render_options(models.CONTENT_TYPE_CHOICES, content.markup),
})
class GetUploadURL(BaseHandler):
def get(self, username, journal_name, entry_id):
user = cache.get_by_key(self.session['user']['key'])
if user.can_upload() and user.name == username:
self.response.out.write(blobstore.create_upload_url(
webapp2.uri_for('upload-file',
username=username,
journal_name=journal_name,
entry_id=entry_id
),
max_bytes_per_blob=models.Blob.MAXSIZE
))
else:
self.response.out.write('')
class SaveEntryHandler(BaseHandler):
def post(self):
username = self.request.get('username')
journal_name = self.request.get('journal_name')
entry_id = long(self.request.get('entry_id'))
delete = self.request.get('delete')
if username != self.session['user']['name']:
self.error(404)
return
self.redirect(webapp2.uri_for('view-entry', username=username, journal_name=journal_name, entry_id=entry_id))
entry, content, blobs = cache.get_entry(username, journal_name, entry_id)
if delete == 'delete':
journal_key = entry.key().parent()
user_key = journal_key.parent()
def txn(user_key, journal_key, entry_key, content_key, blobs):
entry = db.get(entry_key)
delete = [entry_key, content_key]
delete.extend([i.key() for i in blobs])
db.delete_async(delete)
user, journal = db.get([user_key, journal_key])
journal.entry_count -= 1
user.entry_count -= 1
journal.chars -= entry.chars
journal.words -= entry.words
journal.sentences -= entry.sentences
user.chars -= entry.chars
user.words -= entry.words
user.sentences -= entry.sentences
for i in blobs:
user.used_data -= i.size
user.count()
db.put_async(user)
# just deleted the last journal entry
if journal.entry_count == 0:
journal.last_entry = None
journal.first_entry = None
# only 1 left (but there are 2 in the datastore still)
else:
# find last entry
entries = models.Entry.all().ancestor(journal).order('-date').fetch(2)
logging.info('%s last entries returned', len(entries))
for e in entries:
if e.key() != entry.key():
journal.last_entry = e.date
break
else:
logging.error('Did not find n last entry not %s', entry.key())
# find first entry
entries = models.Entry.all().ancestor(journal).order('date').fetch(2)
logging.info('%s first entries returned', len(entries))
for e in entries:
if e.key() != entry.key():
journal.first_entry = e.date
break
else:
logging.error('Did not find n first entry not %s', entry.key())
journal.count()
db.put(journal)
return user, journal
user, journal = db.run_in_transaction(txn, user_key, journal_key, entry.key(), content.key(), blobs)
blobstore.delete([i.get_key('blob') for i in blobs])
db.delete([entry, content])
counters.increment(counters.COUNTER_ENTRIES, -1)
counters.increment(counters.COUNTER_CHARS, -entry.chars)
counters.increment(counters.COUNTER_SENTENCES, -entry.sentences)
counters.increment(counters.COUNTER_WORDS, -entry.words)
cache.clear_entries_cache(journal_key)
cache.set_keys([user, journal])
cache.set(cache.pack(journal), cache.C_JOURNAL, username, journal_name)
self.add_message('success', 'Entry deleted.')
self.redirect(webapp2.uri_for('view-journal', username=username, journal_name=journal_name))
else:
subject = self.request.get('subject').strip()
tags = self.request.get('tags').strip()
text = self.request.get('text').strip()
markup = self.request.get('markup')
blob_list = self.request.get_all('blob')
date = self.request.get('date').strip()
time = self.request.get('time').strip()
if not time:
time = '12:00 AM'
try:
newdate = datetime.datetime.strptime('%s %s' %(date, time), '%m/%d/%Y %I:%M %p')
except:
self.add_message('error', 'Couldn\'t understand that date: %s %s' %(date, time))
newdate = entry.date
if tags:
tags = [i.strip() for i in self.request.get('tags').split(',')]
else:
tags = []
def txn(entry_key, content_key, rm_blobs, subject, tags, text, markup, rendered, chars, words, sentences, date):
db.delete_async(rm_blobs)
user, journal, entry = db.get([entry_key.parent().parent(), entry_key.parent(), entry_key])
dchars = -entry.chars + chars
dwords = -entry.words + words
dsentences = -entry.sentences + sentences
journal.chars += dchars
journal.words += dwords
journal.sentences += dsentences
user.chars += dchars
user.words += dwords
user.sentences += dsentences
entry.chars = chars
entry.words = words
entry.sentences = sentences
entry.date = date
user.set_dates()
user.count()
content = models.EntryContent(key=content_key)
content.subject = subject
content.tags = tags
content.text = text
content.markup = markup
content.rendered = rendered
for i in rm_blobs:
user.used_data -= i.size
entry.blobs.remove(str(i.key().id()))
db.put_async([user, entry, content])
# just added the first journal entry
if journal.entry_count == 1:
journal.last_entry = date
journal.first_entry = date
else:
# find last entry
entries = models.Entry.all().ancestor(journal).order('-date').fetch(2)
logging.info('%s last entries returned', len(entries))
for e in entries:
if e.key() != entry.key():
if date > e.date:
journal.last_entry = date
else:
journal.last_entry = e.date
break
else:
logging.error('Did not find n last entry not %s', entry.key())
# find first entry
entries = models.Entry.all().ancestor(journal).order('date').fetch(2)
logging.info('%s first entries returned', len(entries))
for e in entries:
if e.key() != entry.key():
if date < e.date:
journal.first_entry = date
else:
journal.first_entry = e.date
break
else:
logging.error('Did not find n first entry not %s', entry.key())
journal.count()
db.put(journal)
return user, journal, entry, content, dchars, dwords, dsentences
rm_blobs = []
for b in blobs:
bid = str(b.key().id())
if bid not in blob_list:
b.delete()
rm_blobs.append(b)
for b in rm_blobs:
blobs.remove(b)
rendered = utils.markup(text, markup)
if text:
nohtml = html.strip_tags(rendered)
chars = len(nohtml)
words = len(entry.WORD_RE.findall(nohtml))
sentences = len(entry.SENTENCE_RE.split(nohtml))
else:
chars = 0
words = 0
sentences = 0
user, journal, entry, content, dchars, dwords, dsentences = db.run_in_transaction(txn, entry.key(), content.key(), rm_blobs, subject, tags, text, markup, rendered, chars, words, sentences, newdate)
models.Activity.create(cache.get_user(username), models.ACTIVITY_SAVE_ENTRY, entry.key())
counters.increment(counters.COUNTER_CHARS, dchars)
counters.increment(counters.COUNTER_SENTENCES, dsentences)
counters.increment(counters.COUNTER_WORDS, dwords)
entry_render = utils.render('entry-render.html', {
'blobs': blobs,
'content': content,
'entry': entry,
'entry_url': webapp2.uri_for('view-entry', username=username, journal_name=journal_name, entry_id=entry_id),
})
cache.set(entry_render, cache.C_ENTRY_RENDER, username, journal_name, entry_id)
cache.set_keys([user, journal])
cache.set_multi({
cache.C_KEY %user.key(): cache.pack(user),
cache.C_ENTRY_RENDER %(username, journal_name, entry_id): entry_render,
cache.C_ENTRY %(username, journal_name, entry_id): (cache.pack(entry), cache.pack(content), cache.pack(blobs)),
})
if user.dropbox_enable and user.dropbox_token:
taskqueue.add(queue_name='retry-limit', url=webapp2.uri_for('backup'), params={'entry_key': entry.key(), 'network': models.USER_BACKUP_DROPBOX, 'journal_name': journal_name, 'username': username})
if user.google_docs_enable and user.google_docs_token:
taskqueue.add(queue_name='retry-limit', url=webapp2.uri_for('backup'), params={'entry_key': entry.key(), 'network': models.USER_BACKUP_GOOGLE_DOCS, 'journal_name': journal_name, 'username': username})
self.add_message('success', 'Your entry has been saved.')
cache.clear_entries_cache(entry.key().parent())
cache.set((cache.pack(entry), cache.pack(content), cache.pack(blobs)), cache.C_ENTRY, username, journal_name, entry_id)
class UploadHandler(BaseUploadHandler):
def post(self, username, journal_name, entry_id):
if username != self.session['user']['name']:
self.error(404)
return
entry_key = cache.get_entry_key(username, journal_name, entry_id)
uploads = self.get_uploads()
blob_type = -1
if len(uploads) == 1:
blob = uploads[0]
if blob.content_type.startswith('image/'):
blob_type = models.BLOB_TYPE_IMAGE
if not entry_key or self.session['user']['name'] != username or blob_type == -1:
for upload in uploads:
upload.delete()
return
def txn(user_key, entry_key, blob):
user, entry = db.get([user_key, entry_key])
user.used_data += blob.size
entry.blobs.append(str(blob.key().id()))
db.put([user, entry, blob])
return user, entry
handmade_key = db.Key.from_path('Blob', 1, parent=entry_key)
blob_id = db.allocate_ids(handmade_key, 1)[0]
blob_key = db.Key.from_path('Blob', blob_id, parent=entry_key)
new_blob = models.Blob(key=blob_key, blob=blob, type=blob_type, name=blob.filename, size=blob.size)
new_blob.get_url()
user, entry = db.run_in_transaction(txn, entry_key.parent().parent(), entry_key, new_blob)
cache.delete([
cache.C_KEY %user.key(),
cache.C_KEY %entry.key(),
cache.C_ENTRY %(username, journal_name, entry_id),
cache.C_ENTRY_RENDER %(username, journal_name, entry_id),
])
cache.clear_entries_cache(entry.key().parent())
self.redirect(webapp2.uri_for('upload-success', blob_id=blob_id, name=new_blob.name, size=new_blob.size, url=new_blob.get_url()))
class UploadSuccess(BaseHandler):
def get(self):
d = dict([(i, self.request.get(i)) for i in [
'blob_id',
'name',
'size',
'url',
]])
self.response.out.write(json.dumps(d))
class FlushMemcache(BaseHandler):
def get(self):
cache.flush()
self.render('admin.html', {'msg': 'memcache flushed'})
class NewBlogHandler(BaseHandler):
def get(self):
b = models.BlogEntry(user=self.session['user']['name'], avatar=self.session['user']['avatar'])
b.put()
self.redirect(webapp2.uri_for('edit-blog', blog_id=b.key().id()))
class EditBlogHandler(BaseHandler):
def get(self, blog_id):
b = models.BlogEntry.get_by_id(long(blog_id))
if not b:
self.error(404)
return
self.render('edit-blog.html', {
'b': b,
'markup_options': utils.render_options(models.RENDER_TYPE_CHOICES, b.markup),
})
def post(self, blog_id):
b = models.BlogEntry.get_by_id(long(blog_id))
delete = self.request.get('delete')
if not b:
self.error(404)
return
if delete == 'Delete entry':
b.delete()
if not b.draft:
def txn():
c = models.Config.get_by_key_name('blog_count')
c.count -= 1
c.put()
db.run_in_transaction(txn)
cache.clear_blog_entries_cache()
self.add_message('success', 'Blog entry deleted.')
self.redirect(webapp2.uri_for('blog-drafts'))
return
title = self.request.get('title').strip()
if not title:
self.add_message('error', 'Must specify a title.')
else:
b.title = title
b.text = self.request.get('text').strip()
b.markup = self.request.get('markup')
b.slug = '%s-%s' %(blog_id, utils.slugify(b.title))
draft = self.request.get('draft') == 'on'
# new post
if not draft and b.draft:
blog_count = 1
# was post, now draft
elif draft and not b.draft:
blog_count = -1
else:
blog_count = 0
if blog_count:
def txn(config_key, blog_count):
c = db.get(config_key)
c.count += blog_count
c.put()
c = models.Config.get_or_insert('blog_count', count=0)
db.run_in_transaction(txn, c.key(), blog_count)
cache.clear_blog_entries_cache()
b.draft = draft
date = self.request.get('date').strip()
time = self.request.get('time').strip()
try:
b.date = datetime.datetime.strptime('%s %s' %(date, time), '%m/%d/%Y %I:%M %p')
except:
self.add_message('error', 'Couldn\'t understand that date: %s %s' %(date, time))
b.rendered = utils.markup(b.text, b.markup)
b.put()
self.add_message('success', 'Blog entry saved.')
self.redirect(webapp2.uri_for('edit-blog', blog_id=blog_id))
class BlogHandler(BaseHandler):
def get(self):
page = int(self.request.get('page', 1))
entries = cache.get_blog_entries_page(page)
pages = cache.get_blog_count() / models.BlogEntry.ENTRIES_PER_PAGE
if pages < 1:
pages = 1
if page < 1 or page > pages:
self.error(404)
return
self.render('blog.html', {
'entries': entries,
'page': page,
'pages': pages,
'pagelist': utils.page_list(page, pages),
'top': cache.get_blog_top(),
})
class BlogEntryHandler(BaseHandler):
def get(self, entry):
blog_id = long(entry.partition('-')[0])
entry = models.BlogEntry.get_by_id(blog_id)
self.render('blog-entry.html', {
'entry': entry,
'top': cache.get_blog_top(),
})
class BlogDraftsHandler(BaseHandler):
def get(self):
entries = models.BlogEntry.all().filter('draft', True).order('-date').fetch(500)
self.render('blog-drafts.html', {
'entries': entries,
})
class MarkupHandler(BaseHandler):
def get(self):
self.render('markup.html')
class SecurityHandler(BaseHandler):
def get(self):
self.render('security.html')
class UpdateUsersHandler(BaseHandler):
def get(self):
q = models.User.all(keys_only=True)
cursor = self.request.get('cursor')
if cursor:
q.with_cursor(cursor)
def txn(user_key):
u = db.get(user_key)
# custom update code here
u.put()
return u
LIMIT = 10
ukeys = q.fetch(LIMIT)
for u in ukeys:
user = db.run_in_transaction(txn, u)
self.response.out.write('<br>updated %s: %s' %(user.name, user.lname))
if len(ukeys) == LIMIT:
self.response.out.write('<br><a href="%s">next</a>' %webapp2.uri_for('update-users', cursor=q.cursor()))
else:
self.response.out.write('<br>done')
class BlobHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key):
blob_info = blobstore.BlobInfo.get(key)
name = self.request.get('name')
if name == 'True':
name = True
if not blob_info:
self.error(404)
else:
self.send_blob(blob_info, save_as=name)
# from https://github.com/ryanwi/twitteroauth/blob/master/source/main.py
class TwitterHandler(BaseHandler):
def get(self, action):
if 'user' not in self.session:
self.redirect(webapp2.uri_for('main'))
return
self._client = twitter.oauth_client(self)
if action == 'login':
self.login()
elif action == 'callback':
self.callback()
def login(self):
# get a request token
raw_request_token = self._client.get_request_token()
self.session['twitter_token'] = raw_request_token.key
self.session['twitter_secret'] = raw_request_token.secret
# get the authorize url and redirect to twitter
authorize_url = self._client.get_authorize_url(raw_request_token)
self.redirect(authorize_url)
def callback(self):
if 'denied' in self.request.GET:
self.redirect(webapp2.uri_for('account'))
# lookup request token
raw_oauth_token = self.request.get('oauth_token')
# get an access token for the authorized user
oauth_token = twitter.oauth_token(self.session['twitter_token'], self.session['twitter_secret'])
raw_access_token = self._client.get_access_token(oauth_token)
# get the screen_name
self._client = twitter.oauth_client(self, raw_access_token)
screen_name = self._client.get('/account/verify_credentials')['screen_name']
# store access token
def txn(user_key, screen_name, key, secret):
u = db.get(user_key)
u.twitter_id = screen_name
u.twitter_key = key
u.twitter_secret = secret
u.twitter_enable = True
u.put()
return u
user = db.run_in_transaction(txn, self.session['user']['key'], screen_name, raw_access_token.key, raw_access_token.secret)
cache.set_keys([user])
self.redirect(webapp2.uri_for('account'))
class SocialPost(BaseHandler):
def post(self):
entry_key = db.Key(self.request.get('entry_key'))
network = self.request.get('network')
username = self.request.get('username')
MESSAGE = 'Wrote a new entry on journalr.'
NAME = 'my journalr account'
link = utils.absolute_uri('user', username=username)
user = cache.get_by_key(entry_key.parent().parent())
if network == models.USER_SOURCE_FACEBOOK and all([user.facebook_token, user.facebook_enable]):
data = facebook.graph_request(user.facebook_token, method='POST', path='/feed', payload_dict={
'message': MESSAGE,
'link': link,
'name': NAME,
})
if network == models.USER_SOURCE_TWITTER and all([user.twitter_id, user.twitter_key, user.twitter_secret]):
oauth_token = twitter.oauth_token(user.twitter_key, user.twitter_secret)
client = twitter.oauth_client(None, oauth_token)
status = client.post('/statuses/update', status='%s %s' %(MESSAGE, link))
class FollowingHandler(BaseHandler):
def get(self, username):
u = cache.get_user(username)
following = cache.get_by_keys(cache.get_following(username), 'User')
followers = cache.get_by_keys(cache.get_followers(username), 'User')
self.render('following.html', {'u': u, 'following': following, 'followers': followers})
class DownloadJournalHandler(BaseHandler):
def get(self, username, journal_name):
if username != self.session['user']['name']:
self.error(404)
return
journal_key = cache.get_journal_key(username, journal_name)
if not journal_key:
self.error(404)
return
journal = cache.get_by_key(journal_key)
DATE_FORMAT = '%m/%d/%Y'
errors = []
error = None
try:
from_date = datetime.datetime.strptime(self.request.get('from'), DATE_FORMAT)
except ValueError:
if 'from' in self.request.GET:
errors.append('from')
from_date = journal.first_entry
try:
to_date = datetime.datetime.strptime(self.request.get('to'), DATE_FORMAT)
except ValueError:
if 'to' in self.request.GET:
errors.append('to')
to_date = journal.last_entry
if not errors and 'format' in self.request.GET and from_date and to_date:
key_name = 'pdf-%s-%s' %(from_date, to_date)
key = db.Key.from_path('Blob', key_name, parent=journal_key)
pdf_blob = db.get(key)
# either no cached entry, or it's outdated
if not pdf_blob or pdf_blob.date < journal.last_modified:
if pdf_blob:
pdf_blob.blob.delete()
file_name = files.blobstore.create(mime_type='application/pdf')
title = '%s: %s to %s' %(journal.name, from_date.strftime(DATE_FORMAT), to_date.strftime(DATE_FORMAT))
entries = []
for entry_key in models.Entry.all(keys_only=True).ancestor(journal).filter('date >=', from_date).filter('date <', to_date + datetime.timedelta(1)).order('date'):
entries.append(cache.get_entry(username, journal_name, entry_key.id(), entry_key))
with files.open(file_name, 'a') as f:
error = utils.convert_html(f, title, entries)
files.finalize(file_name)
pdf_blob = models.Blob(
key=key,
blob=files.blobstore.get_blob_key(file_name),
type=models.BLOB_TYPE_PDF,
name='%s - %s - %s to %s' %(username, utils.deunicode(journal_name.decode('utf-8')), from_date.strftime(DATE_FORMAT), to_date.strftime(DATE_FORMAT)),
date=journal.last_modified,
)
if error:
pdf_blob.blob.delete()
self.add_message('error', 'Error while converting to PDF: %s' %error)
else:
pdf_blob.put()
if not error:
self.redirect(pdf_blob.get_url(name=True))
return
self.render('download-journal.html', {
'journal': journal,
'username': username,
'errors': errors,
'from': self.request.get('from', from_date.strftime(DATE_FORMAT)),
'to': self.request.get('to', to_date.strftime(DATE_FORMAT)),
})
class DropboxCallback(BaseHandler):
def get(self):
if 'user' not in self.session:
return
if self.request.get('action') == 'authorize':
token, url = utils.dropbox_url()
self.session['dropbox_token'] = token
self.redirect(url)
return
if 'dropbox_token' not in self.session:
return
def txn(user_key, dropbox_token, dropbox_uid):
u = db.get(user_key)
u.dropbox_token = dropbox_token
u.dropbox_id = dropbox_uid
u.dropbox_enable = True
u.put()
return u
try:
access_token = utils.dropbox_token(self.session['dropbox_token'])
u = db.run_in_transaction(txn, self.session['user']['key'], str(access_token), self.request.get('uid'))
cache.set_keys([u])
self.add_message('success', 'Dropbox authorized.')
except Exception, e:
self.add_message('error', 'An error occurred with Dropbox. Try again.')
logging.error('Dropbox error: %s', e)
self.redirect(webapp2.uri_for('account'))
class BackupHandler(BaseHandler):
def post(self):
entry_key = db.Key(self.request.get('entry_key'))
network = self.request.get('network')
username = self.request.get('username')
journal_name = self.request.get('journal_name')
user = cache.get_user(username)
entry, content, blobs = cache.get_entry(username, journal_name, entry_key.id(), entry_key)
path = '%s/%s.html' %(journal_name.replace('/', '_'), entry_key.id())
rendered = utils.render('pdf.html', {'entries': [(entry, content, [])]})
rendered = rendered.encode('utf-8')
if network == models.USER_BACKUP_DROPBOX:
try:
put = utils.dropbox_put(user.dropbox_token, path, rendered, entry.dropbox_rev)
except: # maybe a parent_rev problem? try again without
try:
put = utils.dropbox_put(user.dropbox_token, path, rendered) # no parent rev
except Exception, e:
logging.error('Dropbox put error: %s', e)
return
def txn(entry_key, rev):
e = db.get(entry_key)
e.dropbox_rev = rev
e.put()
return e
entry = db.run_in_transaction(txn, entry_key, put['rev'])
elif network == models.USER_BACKUP_GOOGLE_DOCS:
try:
doc_id = utils.google_upload(user.google_docs_token, utils.deunicode(path), rendered, entry.google_docs_id)
if doc_id and doc_id != entry.google_docs_id:
def txn(entry_key, doc_id):
e = db.get(entry_key)
e.google_docs_id = doc_id
e.put()
return e
entry = db.run_in_transaction(txn, entry_key, doc_id)
except Exception, e:
logging.error('Google Docs upload error: %s', e)
class GoogleSiteVerification(BaseHandler):
def get(self):
self.response.out.write('google-site-verification: %s.html' %settings.GOOGLE_SITE_VERIFICATION)
class GoogleCallback(BaseHandler):
def get(self):
if 'user' not in self.session:
return
if self.request.get('action') == 'authorize':
self.redirect(str(utils.google_url()))
return
if 'token' in self.request.GET:
def txn(user_key, token):
u = db.get(user_key)
u.google_docs_token = token
u.google_docs_enable = True
u.put()
return u
try:
session_token = utils.google_session_token(self.request.get('token'))
user = db.run_in_transaction(txn, self.session['user']['key'], session_token.get_token_string())
cache.set_keys([user])
self.add_message('success', 'Google Docs authorized.')
except Exception, e:
self.add_message('error', 'An error occurred with Google Docs. Try again.')
logging.error('Google Docs error: %s', e)
self.redirect(webapp2.uri_for('account'))
SECS_PER_WEEK = 60 * 60 * 24 * 7
config = {
'webapp2_extras.sessions': {
'secret_key': settings.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
},
}
app = webapp2.WSGIApplication([
webapp2.Route(r'/', handler=MainPage, name='main'),
webapp2.Route(r'/about', handler=AboutHandler, name='about'),
webapp2.Route(r'/account', handler=AccountHandler, name='account'),
webapp2.Route(r'/activity', handler=ActivityHandler, name='activity'),
webapp2.Route(r'/admin/blog/<blog_id>', handler=EditBlogHandler, name='edit-blog'),
webapp2.Route(r'/admin/drafts', handler=BlogDraftsHandler, name='blog-drafts'),
webapp2.Route(r'/admin/flush', handler=FlushMemcache, name='flush-memcache'),
webapp2.Route(r'/admin/new/blog', handler=NewBlogHandler, name='new-blog'),
webapp2.Route(r'/admin/update/users', handler=UpdateUsersHandler, name='update-users'),
webapp2.Route(r'/blob/<key>', handler=BlobHandler, name='blob'),
webapp2.Route(r'/blog', handler=BlogHandler, name='blog'),
webapp2.Route(r'/blog/<entry>', handler=BlogEntryHandler, name='blog-entry'),
webapp2.Route(r'/dropbox', handler=DropboxCallback, name='dropbox'),
webapp2.Route(r'/facebook', handler=FacebookCallback, name='facebook'),
webapp2.Route(r'/google', handler=GoogleCallback, name='google'),
webapp2.Route(r'/feeds/<feed>', handler=FeedsHandler, name='feeds'),
webapp2.Route(r'/follow/<username>', handler=FollowHandler, name='follow'),
webapp2.Route(r'/following/<username>', handler=FollowingHandler, name='following'),
webapp2.Route(r'/login/facebook', handler=FacebookLogin, name='login-facebook'),
webapp2.Route(r'/login/google', handler=GoogleLogin, name='login-google'),
webapp2.Route(r'/logout', handler=Logout, name='logout'),
webapp2.Route(r'/logout/google', handler=GoogleSwitch, name='logout-google'),
webapp2.Route(r'/markup', handler=MarkupHandler, name='markup'),
webapp2.Route(r'/new/journal', handler=NewJournal, name='new-journal'),
webapp2.Route(r'/register', handler=Register, name='register'),
webapp2.Route(r'/save', handler=SaveEntryHandler, name='entry-save'),
webapp2.Route(r'/security', handler=SecurityHandler, name='security'),
webapp2.Route(r'/stats', handler=StatsHandler, name='stats'),
webapp2.Route(r'/twitter/<action>', handler=TwitterHandler, name='twitter'),
webapp2.Route(r'/upload/file/<username>/<journal_name>/<entry_id>', handler=UploadHandler, name='upload-file'),
webapp2.Route(r'/upload/success', handler=UploadSuccess, name='upload-success'),
webapp2.Route(r'/upload/url/<username>/<journal_name>/<entry_id>', handler=GetUploadURL, name='upload-url'),
# taskqueue
webapp2.Route(r'/tasks/social_post', handler=SocialPost, name='social-post'),
webapp2.Route(r'/tasks/backup', handler=BackupHandler, name='backup'),
# google site verification
webapp2.Route(r'/%s.html' %settings.GOOGLE_SITE_VERIFICATION, handler=GoogleSiteVerification),
# this section must be last, since the regexes below will match one and two -level URLs
webapp2.Route(r'/<username>', handler=UserHandler, name='user'),
webapp2.Route(r'/<username>/<journal_name>', handler=ViewJournal, name='view-journal'),
webapp2.Route(r'/<username>/<journal_name>/<entry_id:\d+>', handler=ViewEntryHandler, name='view-entry'),
webapp2.Route(r'/<username>/<journal_name>/download', handler=DownloadJournalHandler, name='download-journal'),
webapp2.Route(r'/<username>/<journal_name>/new', handler=NewEntryHandler, name='new-entry'),
], debug=True, config=config)
RESERVED_NAMES = set([
'',
'<username>',
'about',
'account',
'activity',
'admin',
'backup',
'blob',
'blog',
'contact',
'docs',
'dropbox',
'entry',
'facebook',
'features',
'feeds',
'file',
'follow',
'followers',
'following',
'google',
'googledocs',
'googleplus',
'help',
'journal',
'journaler',
'journalr',
'journals',
'login',
'logout',
'markup',
'new',
'news',
'oauth',
'openid',
'privacy',
'register',
'save',
'security',
'site',
'stats',
'tasks',
'terms',
'twitter',
'upload',
'user',
'users',
])
# assert that all routes are listed in RESERVED_NAMES
for i in app.router.build_routes.values():
name = i.template.partition('/')[2].partition('/')[0]
if name not in RESERVED_NAMES:
import sys
logging.critical('%s not in RESERVED_NAMES', name)
print '%s not in RESERVED_NAMES' %name
sys.exit(1)
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import datetime
import logging
import webapp2
def url(ob, name=''):
if ob == 'feeds':
return webapp2.uri_for(ob, feed=name)
elif ob == 'user':
return webapp2.uri_for(ob, username=name)
elif ob == 'user-feeds':
return webapp2.uri_for('feeds', feed='user-%s' %name)
elif ob == 'follow':
return webapp2.uri_for(ob, username=name)
elif ob in ['new-entry', 'download-journal']:
return webapp2.uri_for(ob, username=name.key().parent().name(), journal_name=name.name)
elif ob == 'blog-entry':
return webapp2.uri_for(ob, entry=name)
elif ob == 'edit-blog':
return webapp2.uri_for(ob, blog_id=name)
elif ob == 'following':
return webapp2.uri_for(ob, username=name)
else:
return webapp2.uri_for(ob)
def user_journal_url(username, journal_name):
return webapp2.uri_for('view-journal', username=username, journal_name=journal_name)
def journal_url(journal, page=1):
return journal.url(page)
def journal_prev(ob, page):
return journal_url(ob, str(page - 1))
def journal_next(ob, page):
return journal_url(ob, str(page + 1))
def blog_url(page=1):
return webapp2.uri_for('blog', page=page)
def blog_prev(page):
return blog_url(page - 1)
def blog_next(page):
return blog_url(page + 1)
JDATE_FMT = '%A, %b %d, %Y %I:%M %p'
JDATE_NOTIME_FMT = '%A, %b %d, %Y'
def jdate(date):
if not date.hour and not date.minute and not date.second:
fmt = JDATE_NOTIME_FMT
else:
fmt = JDATE_FMT
return date.strftime(fmt)
SDATE_FMT = '%B %d, %Y'
def sdate(date):
return date.strftime(SDATE_FMT)
def entry_subject(sub, date):
if sub:
return sub
return date.strftime(JDATE_FMT)
def timesince(value, default='just now'):
now = datetime.datetime.utcnow()
diff = now - value
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if period:
return '%d %s ago' % (period, singular if period == 1 else plural)
return default
def floatformat(value):
return '%.1f' %value
def pluralize(value, ext='s'):
return ext if value != 1 else ''
def date(value, fmt):
return value.strftime(fmt)
# filesizeformat in jinja 2.6 is broken, use this from their current github
def filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
(binary and 'KiB' or 'kB'),
(binary and 'MiB' or 'MB'),
(binary and 'GiB' or 'GB'),
(binary and 'TiB' or 'TB'),
(binary and 'PiB' or 'PB'),
(binary and 'EiB' or 'EB'),
(binary and 'ZiB' or 'ZB'),
(binary and 'YiB' or 'YB')
]
if bytes == 1:
return '1 Byte'
elif bytes < base:
return '%d Bytes' % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return '%.1f %s' % ((base * bytes / unit), prefix)
return '%.1f %s' % ((base * bytes / unit), prefix)
filters = dict([(i, globals()[i]) for i in [
'blog_next',
'blog_prev',
'blog_url',
'date',
'entry_subject',
'filesizeformat',
'floatformat',
'jdate',
'journal_next',
'journal_prev',
'journal_url',
'pluralize',
'sdate',
'timesince',
'url',
'user_journal_url',
]])
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import datetime
import logging
import os
from google.appengine.ext import db
import cache
import utils
import webapp2
def feed(feed, token):
if feed == 'activity':
title = 'journalr user activity'
link = webapp2.uri_for('activity')
subtitle = 'Recent activity by journalr users'
items = []
for i in cache.get_activities():
items.append(mk_item(
'%s %s' %(i.user, i.get_action()),
None,
'%s %s' %(i.user, i.get_action()),
i.key().id(),
i.date
))
elif feed == 'blog':
title = 'journalr blog'
link = webapp2.uri_for('blog')
subtitle = 'Recent journalr blog posts'
items = []
for i in cache.get_blog_entries_page(1):
items.append(mk_item(
i.title,
i.url,
i.rendered,
i.key().id(),
i.date
))
elif feed.startswith('user-'):
username = feed.partition('-')[2]
user_key = db.Key.from_path('User', username)
user = cache.get_by_key(user_key)
if user.token == token:
title = '%s\'s journalr feed' %username
link = webapp2.uri_for('user', username=username)
subtitle = 'Recent activity by followed by %s' %username
items = []
for i in cache.get_activities_follower(username):
items.append(mk_item(
'%s %s' %(i.user, i.get_action()),
None,
'%s %s' %(i.user, i.get_action()),
i.key().id(),
i.date
))
else:
title = '%s activity feed' %username
link = webapp2.uri_for('user', username=username)
subtitle = 'Recent activity by %s' %username
items = []
for i in cache.get_activities(username=username):
items.append(mk_item(
'%s %s' %(i.user, i.get_action()),
None,
'%s %s' %(i.user, i.get_action()),
i.key().id(),
i.date
))
else:
return ''
d = {
'title': title,
'link': mk_link(link),
'subtitle': subtitle,
'updated': datetime.datetime.utcnow(),
'items': items,
'host': os.environ['HTTP_HOST'],
'journal_url': mk_link(webapp2.uri_for('main')),
'self_link': mk_link(webapp2.uri_for('feeds', feed=feed)),
}
return utils.render('atom.xml', d)
def mk_link(link):
if link:
return 'http://' + os.environ['HTTP_HOST'] + link
else:
return ''
def mk_item(title, link, desc, uid, date):
return {
'title': title,
'link': mk_link(link),
'content': desc,
'id': uid,
'date': date,
}
| Python |
#!/usr/bin/python
#
# library for accessing a web service (API) with the OAuth protocol
# (trying to make web service and web app server independent, not there yet)
#
# This library is a derivative of the tweetapp framework by tav@espians.com available at:
# http://github.com/tav/tweetapp/tree/master
#
# Other credits include:
# The "official" OAuth python library: http://oauth.googlecode.com/svn/code/python/
# The fftogo application: http://github.com/bgolub/fftogo/tree/master
# The FriendFeed python library: http://code.google.com/p/friendfeed-api/
#
""""OAuth library for making RESTful API calls using the OAuth protocol"""
import cgi
import logging
import urllib
import time
from hashlib import sha1
from hmac import new as hmac
from random import getrandbits
from google.appengine.api import urlfetch
# We require a JSON parsing library. These seem to be the most popular.
try:
import cjson
decode_json = lambda s: cjson.decode(s.decode("utf-8"), True)
except ImportError:
try:
# Django includes simplejson
from django.utils import simplejson
decode_json = lambda s: simplejson.loads(s.decode("utf-8"))
except ImportError:
import json
decode_json = lambda s: _unicodify(json.read(s))
# ------------------------------------------------------------------------------
# oauth client
# ------------------------------------------------------------------------------
class OAuthToken(object):
'''OAuthToken is a data type that represents an End User via either an access or request token.'''
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
# @staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthClient(object):
"""OAuth client."""
def __init__(self, webapp_api, service_info, token=None):
self.service_info = service_info
self.service_key = None
self.oauth_callback = service_info['oauth_callback']
self.token = token
# public methods
def get(self, api_method, **extra_params):
if not (api_method.startswith('http://') or api_method.startswith('https://')):
api_method = '%s%s%s' % (
self.service_info['default_api_prefix'], api_method,
self.service_info['default_api_suffix']
)
fetch = urlfetch.fetch(self.get_signed_url(
api_method, self.token, **extra_params
))
if fetch.status_code != 200:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
return decode_json(fetch.content)
def post(self, api_method, **extra_params):
if not (api_method.startswith('http://') or api_method.startswith('https://')):
api_method = '%s%s%s' % (
self.service_info['default_api_prefix'], api_method,
self.service_info['default_api_suffix']
)
payload = self.get_signed_payload(
api_method, self.token, **extra_params
)
headers = {}
headers["Content-Type"] = "application/x-www-form-urlencoded"
fetch = urlfetch.fetch(api_method, payload=payload, method=urlfetch.POST, headers=headers)
if fetch.status_code != 200:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
return decode_json(fetch.content)
# oauth workflow
def get_request_token(self):
token_info = self.get_data_from_signed_url(self.service_info['request_token_url'])
token = OAuthToken.from_string(token_info)
return token
def get_access_token(self, oauth_token):
token_info = self.get_data_from_signed_url(
self.service_info['access_token_url'], oauth_token
)
token = OAuthToken.from_string(token_info)
return token
def get_authorize_url(self, oauth_token):
if self.oauth_callback:
oauth_callback = {'oauth_callback': self.oauth_callback}
else:
oauth_callback = {}
return self.get_signed_url(
self.service_info['user_auth_url'], oauth_token, **oauth_callback
)
# request marshalling
def get_data_from_signed_url(self, __url, __token=None, __meth='GET', **extra_params):
signed_url = self.get_signed_url(__url, __token, __meth, **extra_params)
fetch = urlfetch.fetch(signed_url)
if fetch.status_code != 200:
raise ValueError(
"Error calling... Got return status: %i [%r]" %
(fetch.status_code, fetch.content)
)
data = fetch.content
#logging.debug(data)
return data
def get_signed_url(self, __url, __token=None, __meth='GET', **extra_params):
service_info = self.service_info
kwargs = {
'oauth_consumer_key': service_info['consumer_key'],
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_timestamp': int(time.time()),
'oauth_nonce': getrandbits(64),
}
kwargs.update(extra_params)
if self.service_key is None:
self.service_key = self.service_info['consumer_secret']+'&'
if __token is not None:
kwargs['oauth_token'] = __token.key
key = self.service_key + encode(__token.secret)
else:
key = self.service_key
message = '&'.join(map(encode, [
__meth.upper(), __url, '&'.join(
'%s=%s' % (encode(k), encode(kwargs[k])) for k in sorted(kwargs)
)
]))
kwargs['oauth_signature'] = hmac(
key, message, sha1
).digest().encode('base64')[:-1]
return '%s?%s' % (__url, urllib.urlencode(kwargs))
def get_signed_payload(self, __url, __token=None, __meth='POST', **extra_params):
service_info = self.service_info
kwargs = {
'oauth_consumer_key': service_info['consumer_key'],
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_timestamp': int(time.time()),
'oauth_nonce': getrandbits(64),
}
kwargs.update(extra_params)
if self.service_key is None:
self.service_key = self.service_info['consumer_secret']+'&'
if __token is not None:
kwargs['oauth_token'] = __token.key
key = self.service_key + encode(__token.secret)
else:
key = self.service_key
message = '&'.join(map(encode, [
__meth.upper(), __url, '&'.join(
'%s=%s' % (encode(k), encode(kwargs[k])) for k in sorted(kwargs)
)
]))
kwargs['oauth_signature'] = hmac(
key, message, sha1
).digest().encode('base64')[:-1]
return urllib.urlencode(kwargs)
# ------------------------------------------------------------------------------
# utility functions
# ------------------------------------------------------------------------------
def encode(text):
return urllib.quote(str(text), '')
def _encodify(s):
return unicode(s).encode('utf-8')
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import json
import logging
import os
import urllib
from google.appengine.api import urlfetch
import settings
import webapp2
OAUTH_URL = 'https://www.facebook.com/dialog/oauth'
TOKEN_ENDPOINT = 'https://graph.facebook.com/oauth/access_token'
GRAPH_URL = 'https://graph.facebook.com/me'
def redirect_uri(payload_dict={}):
url = webapp2.uri_for('facebook')
payload = urllib.urlencode(payload_dict)
return 'http://%s%s?%s' %(os.environ['HTTP_HOST'], url, payload)
def oauth_url(redirect_dict={}, payload_dict={}):
oauth_dict = {
'client_id': settings.FACEBOOK_KEY,
'redirect_uri': redirect_uri(redirect_dict),
}
oauth_dict.update(payload_dict)
payload = urllib.urlencode(oauth_dict)
return '%s?%s' %(OAUTH_URL, payload)
def access_dict(code, redirect_dict={}):
payload = urllib.urlencode({
'client_id': settings.FACEBOOK_KEY,
'redirect_uri': redirect_uri(redirect_dict),
'client_secret': settings.FACEBOOK_SECRET,
'code': code,
})
result = urlfetch.fetch(TOKEN_ENDPOINT + '?' + payload)
if result.status_code == 200:
try:
content = dict([i.split('=') for i in result.content.split('&')])
return content
except:
logging.error('facebook bad content: %s', result.content)
return False
else:
logging.error('facebook bad status code: %s, %s', result.status_code, result.content)
return False
def graph_request(access_token, method=urlfetch.GET, path='', payload_dict={}):
payload_dict['access_token'] = access_token
payload = urllib.urlencode(payload_dict)
url = GRAPH_URL + path
if method in ['GET', urlfetch.GET]:
url += '?' + payload
payload = None
result = urlfetch.fetch(
url=url,
payload=payload,
method=method,
)
if result.status_code == 200:
return json.loads(result.content)
else:
logging.error('facebook graph request error: %s, %s', result.status_code, result.content)
return False
| Python |
# blobstore utf-8 issue patched from comment #21 at: http://code.google.com/p/googleappengine/issues/detail?id=2749
import base64
import quopri
from webob import multidict
def from_fieldstorage(cls, fs):
"""
Create a dict from a cgi.FieldStorage instance
"""
obj = cls()
if fs.list:
# fs.list can be None when there's nothing to parse
for field in fs.list:
if field.filename:
obj.add(field.name, field)
else:
# first, set a common charset to utf-8.
common_charset = 'utf-8'
# second, check Content-Transfer-Encoding and decode
# the value appropriately
field_value = field.value
transfer_encoding = field.headers.get(
'Content-Transfer-Encoding', None)
if transfer_encoding == 'base64':
field_value = base64.b64decode(field_value)
if transfer_encoding == 'quoted-printable':
field_value = quopri.decodestring(field_value)
if field.type_options.has_key('charset') and \
field.type_options['charset'] != common_charset:
# decode with a charset specified in each
# multipart, and then encode it again with a
# charset specified in top level FieldStorage
field_value = field_value.decode(
field.type_options['charset']).encode(common_charset)
# TODO: Should we take care of field.name here?
obj.add(field.name, field_value)
return obj
multidict.MultiDict.from_fieldstorage = classmethod(from_fieldstorage)
def webapp_add_wsgi_middleware(app):
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app
| Python |
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib'))
# see issue772: http://code.google.com/p/googleappengine/issues/detail?id=772
ultimate_sys_path = None
def fix_sys_path():
global ultimate_sys_path
if ultimate_sys_path is None:
ultimate_sys_path = list(sys.path)
else:
sys.path[:] = ultimate_sys_path
| Python |
# Copyright (c) 2011 Matt Jibson <matt.jibson@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging
import oauth
import settings
import utils
import webapp2
URL = 'https://api.twitter.com'
OAUTH_APP_SETTINGS = {
'consumer_key': settings.TWITTER_KEY,
'consumer_secret': settings.TWITTER_SECRET,
'request_token_url': URL + '/oauth/request_token',
'access_token_url': URL + '/oauth/access_token',
'user_auth_url': URL + '/oauth/authorize',
'default_api_prefix': URL,
'default_api_suffix': '.json',
'oauth_callback': None, # set later, after webapp2 is configured
}
def oauth_client(app, *args):
if not OAUTH_APP_SETTINGS['oauth_callback']:
OAUTH_APP_SETTINGS['oauth_callback'] = utils.absolute_uri('twitter', action='callback')
return oauth.OAuthClient(app, OAUTH_APP_SETTINGS, *args)
def oauth_token(*args):
return oauth.OAuthToken(*args)
| Python |
# adapted from http://code.google.com/appengine/articles/sharding_counters.html
import random
from google.appengine.api import memcache
from google.appengine.ext import db
COUNTER_CHARS = 'characters'
COUNTER_ENTRIES = 'entries'
COUNTER_JOURNALS = 'journals'
COUNTER_SENTENCES = 'sentences'
COUNTER_USERS = 'users'
COUNTER_WORDS = 'words'
class GeneralCounterShardConfig(db.Model):
"""Tracks the number of shards for each named counter."""
name = db.StringProperty(required=True)
num_shards = db.IntegerProperty(indexed=False, required=True, default=20)
class GeneralCounterShard(db.Model):
"""Shards for each named counter"""
name = db.StringProperty(required=True)
count = db.IntegerProperty(indexed=False, required=True, default=0)
def get_count(name):
"""Retrieve the value for a given sharded counter.
Parameters:
name - The name of the counter
"""
total = memcache.get(name)
if total is None:
total = 0
for counter in GeneralCounterShard.all().filter('name = ', name):
total += counter.count
memcache.add(name, total, 60)
return total
def increment(name, incr=1):
"""Increment the value for a given sharded counter.
Parameters:
name - The name of the counter
"""
config = GeneralCounterShardConfig.get_or_insert(name, name=name)
def txn():
index = random.randint(0, config.num_shards - 1)
shard_name = name + str(index)
counter = GeneralCounterShard.get_by_key_name(shard_name)
if counter is None:
counter = GeneralCounterShard(key_name=shard_name, name=name)
counter.count += incr
counter.put()
db.run_in_transaction(txn)
# does nothing if the key does not exist
memcache.incr(name)
def increase_shards(name, num):
"""Increase the number of shards for a given sharded counter.
Will never decrease the number of shards.
Parameters:
name - The name of the counter
num - How many shards to use
"""
config = GeneralCounterShardConfig.get_or_insert(name, name=name)
def txn():
if config.num_shards < num:
config.num_shards = num
config.put()
db.run_in_transaction(txn)
| Python |
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | Python |
#!/usr/bin/env python
"""
PyTextile
A Humane Web Text Generator
"""
__version__ = '2.1.3'
__date__ = '2009/02/07'
__copyright__ = """
Copyright (c) 2009, Jason Samsa, http://jsamsa.com/
Copyright (c) 2004, Roberto A. F. De Almeida, http://dealmeida.net/
Copyright (c) 2003, Mark Pilgrim, http://diveintomark.org/
Original PHP Version:
Copyright (c) 2003-2004, Dean Allen <dean@textism.com>
All rights reserved.
Thanks to Carlo Zottmann <carlo@g-blog.net> for refactoring
Textile's procedural code into a class framework
Additions and fixes Copyright (c) 2006 Alex Shiels http://thresholdstate.com/
"""
__license__ = """
L I C E N S E
=============
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Textile nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import uuid
from urlparse import urlparse
import sgmllib
def _normalize_newlines(string):
import re
out = re.sub(r'\r\n', '\n', string)
out = re.sub(r'\n{3,}', '\n\n', out)
out = re.sub(r'\n\s*\n', '\n\n', out)
out = re.sub(r'"$', '" ', out)
return out
# PyTextile can optionally sanitize the generated XHTML,
# which is good for weblog comments. This code is from
# Mark Pilgrim's feedparser.
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v).strip()) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class="screen">, tag="pre", attrs=[("class", "screen")]
strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs])
if tag in self.elements_no_end_tag:
self.pieces.append("<%(tag)s%(strattrs)s />" % locals())
else:
self.pieces.append("<%(tag)s%(strattrs)s>" % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be "pre"
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for " ", ref will be "160"
# Reconstruct the original character reference.
self.pieces.append("&#%(ref)s;" % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for "©", ref will be "copy"
# Reconstruct the original entity reference.
self.pieces.append("&%(ref)s;" % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append("<!--%(text)s-->" % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append("<?%(text)s>" % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append("<!%(text)s>" % locals())
def output(self):
"""Return processed HTML as a single string"""
return "".join(self.pieces)
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
# This if for MathML.
mathml_elements = ['math', 'mi', 'mn', 'mo', 'mrow', 'msup']
mathml_attributes = ['mode', 'xmlns']
acceptable_elements = acceptable_elements + mathml_elements
acceptable_attributes = acceptable_attributes + mathml_attributes
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
# PyTextile can optionally validate the generated
# XHTML code using either mxTidy or uTidyLib.
try:
# This is mxTidy.
from mx.Tidy import Tidy
def _tidy1(text):
"""mxTidy's XHTML validator.
This function is a wrapper to mxTidy's validator.
"""
nerrors, nwarnings, text, errortext = Tidy.tidy(text, output_xhtml=1, numeric_entities=1, wrap=0)
return _in_tag(text, 'body')
_tidy = _tidy1
except ImportError:
try:
# This is uTidyLib.
import tidy
def _tidy2(text):
"""uTidyLib's XHTML validator.
This function is a wrapper to uTidyLib's validator.
"""
text = tidy.parseString(text, output_xhtml=1, add_xml_decl=0, indent=0, tidy_mark=0)
return _in_tag(str(text), 'body')
_tidy = _tidy2
except ImportError:
_tidy = None
class Textile(object):
hlgn = r'(?:\<(?!>)|(?<!<)\>|\<\>|\=|[()]+(?! ))'
vlgn = r'[\-^~]'
clas = r'(?:\([^)]+\))'
lnge = r'(?:\[[^\]]+\])'
styl = r'(?:\{[^}]+\})'
cspn = r'(?:\\\d+)'
rspn = r'(?:\/\d+)'
a = r'(?:%s|%s)*' % (hlgn, vlgn)
s = r'(?:%s|%s)*' % (cspn, rspn)
c = r'(?:%s)*' % '|'.join([clas, styl, lnge, hlgn])
pnct = r'[-!"#$%&()*+,/:;<=>?@\'\[\\\]\.^_`{|}~]'
# urlch = r'[\w"$\-_.+!*\'(),";/?:@=&%#{}|\\^~\[\]`]'
urlch = '[\w"$\-_.+*\'(),";\/?:@=&%#{}|\\^~\[\]`]'
url_schemes = ('http','https','ftp','mailto')
btag = ('bq', 'bc', 'notextile', 'pre', 'h[1-6]', 'fn\d+', 'p')
noimage = False
hu = ''
glyph_defaults = (
('txt_quote_single_open', '‘'),
('txt_quote_single_close', '’'),
('txt_quote_double_open', '“'),
('txt_quote_double_close', '”'),
('txt_apostrophe', '’'),
('txt_prime', '′'),
('txt_prime_double', '″'),
('txt_ellipsis', '…'),
('txt_emdash', '—'),
('txt_endash', '–'),
('txt_dimension', '×'),
('txt_trademark', '™'),
('txt_registered', '®'),
('txt_copyright', '©'),
)
def __init__(self, restricted=False, lite=False):
"""docstring for __init__"""
self.restricted = restricted
self.lite = lite
self.fn = {}
self.urlrefs = {}
self.shelf = {}
self.rel = ''
def textile(self, text, rel=None, encoding='utf8', output='utf8', validate=False, sanitize=False, head_offset='ignored'):
"""
>>> import textile
>>> textile.textile('some textile')
'\\t<p>some textile</p>'
"""
text = _normalize_newlines(text)
if rel:
self.rel = ' rel="%s"' % rel
text = self.getRefs(text)
if not self.lite:
text = self.block(text)
text = self.retrieve(text)
# Convert to desired output.
if isinstance(text, str):
text = unicode(text, encoding)
text = text.encode(output, 'xmlcharrefreplace')
# Sanitize?
if sanitize:
p = _HTMLSanitizer()
p.feed(text)
text = p.output()
# Validate output.
if _tidy and validate:
text = _tidy(text)
return text
def pba(self, input, element=None):
"""
>>> t = Textile()
>>> t.pba(r'\3')
''
>>> t.pba(r'\\3', element='td')
' colspan="3"'
>>> t.pba(r'/4', element='td')
' rowspan="4"'
>>> t.pba(r'\\3/4', element='td')
' colspan="3" rowspan="4"'
>>> t.vAlign('^')
'top'
>>> t.pba('^', element='td')
' style="vertical-align:top;"'
>>> t.pba('{line-height:18px}')
' style="line-height:18px;"'
>>> t.pba('(foo-bar)')
' class="foo-bar"'
>>> t.pba('(#myid)')
' id="myid"'
>>> t.pba('(foo-bar#myid)')
' class="foo-bar" id="myid"'
>>> t.pba('((((')
' style="padding-left:4em;"'
>>> t.pba(')))')
' style="padding-right:3em;"'
>>> t.pba('[fr]')
' lang="fr"'
"""
style = []
aclass = ''
lang = ''
colspan = ''
rowspan = ''
id = ''
atts = ''
if not input: return ''
matched = input
if element == 'td':
m = re.search(r'\\(\d+)', matched)
if m:
colspan = m.group(1)
m = re.search(r'/(\d+)', matched)
if m:
rowspan = m.group(1)
if element == 'td' or element == 'tr':
m = re.search(r'(%s)' % self.vlgn, matched)
if m: style.append("vertical-align:%s;" % self.vAlign(m.group(1)))
m = re.search(r'\{([^}]*)\}', matched)
if m:
style.append(m.group(1).rstrip(';') + ';')
matched = matched.replace(m.group(0), '')
m = re.search(r'\[([^\]]+)\]', matched, re.U)
if m:
lang = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'\(([^()]+)\)', matched, re.U)
if m:
aclass = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'([(]+)', matched)
if m:
style.append("padding-left:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'([)]+)', matched)
if m:
style.append("padding-right:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'(%s)' % self.hlgn, matched)
if m:
style.append("text-align:%s;" % self.hAlign(m.group(1)))
m = re.search(r'^(.*)#(.*)$', aclass)
if m:
id = m.group(2)
aclass = m.group(1)
if self.restricted:
if lang: return ' lang="%s"'
else: return ''
result = []
if style: result.append(' style="%s"' % "".join(style))
if aclass: result.append(' class="%s"' % aclass)
if lang: result.append(' lang="%s"' % lang)
if id: result.append(' id="%s"' % id)
if colspan: result.append(' colspan="%s"' % colspan)
if rowspan: result.append(' rowspan="%s"' % rowspan)
return ''.join(result)
def hasRawText(self, text):
"""
checks whether the text has text not already enclosed by a block tag
>>> t = Textile()
>>> t.hasRawText('<p>foo bar biz baz</p>')
False
>>> t.hasRawText(' why yes, yes it does')
True
"""
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
return '' != r
def table(self, text):
r"""
>>> t = Textile()
>>> t.table('|one|two|three|\n|a|b|c|')
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
"""
text = text + "\n\n"
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
return pattern.sub(self.fTable, text)
def fTable(self, match):
tatts = self.pba(match.group(1), 'table')
rows = []
for row in [ x for x in match.group(2).split('\n') if x]:
rmtch = re.search(r'^(%s%s\. )(.*)' % (self.a, self.c), row.lstrip())
if rmtch:
ratts = self.pba(rmtch.group(1), 'tr')
row = rmtch.group(2)
else: ratts = ''
cells = []
for cell in row.split('|'):
ctyp = 'd'
if re.search(r'^_', cell): ctyp = "h"
cmtch = re.search(r'^(_?%s%s%s\. )(.*)' % (self.s, self.a, self.c), cell)
if cmtch:
catts = self.pba(cmtch.group(1), 'td')
cell = cmtch.group(2)
else: catts = ''
cell = self.graf(self.span(cell))
if cell.strip() != '':
cells.append('\t\t\t<t%s%s>%s</t%s>' % (ctyp, catts, cell, ctyp))
rows.append("\t\t<tr%s>\n%s\n\t\t</tr>" % (ratts, '\n'.join(cells)))
cells = []
catts = None
return "\t<table%s>\n%s\n\t</table>\n\n" % (tatts, '\n'.join(rows))
def lists(self, text):
"""
>>> t = Textile()
>>> t.lists("* one\\n* two\\n* three")
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
"""
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
return pattern.sub(self.fList, text)
def fList(self, match):
text = match.group(0).split("\n")
result = []
lists = []
for i, line in enumerate(text):
try:
nextline = text[i+1]
except IndexError:
nextline = ''
m = re.search(r"^([#*]+)(%s%s) (.*)$" % (self.a, self.c), line, re.S)
if m:
tl, atts, content = m.groups()
nl = ''
nm = re.search(r'^([#*]+)\s.*', nextline)
if nm:
nl = nm.group(1)
if tl not in lists:
lists.append(tl)
atts = self.pba(atts)
line = "\t<%sl%s>\n\t\t<li>%s" % (self.lT(tl), atts, self.graf(content))
else:
line = "\t\t<li>" + self.graf(content)
if len(nl) <= len(tl): line = line + "</li>"
for k in reversed(lists):
if len(k) > len(nl):
line = line + "\n\t</%sl>" % self.lT(k)
if len(k) > 1:
line = line + "</li>"
lists.remove(k)
result.append(line)
return "\n".join(result)
def lT(self, input):
if re.search(r'^#+', input):
return 'o'
else:
return 'u'
def doPBr(self, in_):
return re.compile(r'<(p)([^>]*?)>(.*)(</\1>)', re.S).sub(self.doBr, in_)
def doBr(self, match):
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br />', match.group(3))
return '<%s%s>%s%s' % (match.group(1), match.group(2), content, match.group(4))
def block(self, text):
"""
>>> t = Textile()
>>> t.block('h1. foobar baby')
'\\t<h1>foobar baby</h1>'
"""
tre = '|'.join(self.btag)
text = text.split('\n\n')
tag = 'p'
atts = cite = graf = ext = ''
out = []
anon = False
for line in text:
pattern = r'^(%s)(%s%s)\.(\.?)(?::(\S+))? (.*)$' % (tre, self.a, self.c)
match = re.search(pattern, line, re.S)
if match:
if ext:
out.append(out.pop() + c1)
tag,atts,ext,cite,graf = match.groups()
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, graf)
# leave off c1 if this block is extended, we'll close it at the start of the next block
if ext:
line = "%s%s%s%s" % (o1, o2, content, c2)
else:
line = "%s%s%s%s%s" % (o1, o2, content, c2, c1)
else:
anon = True
if ext or not re.search(r'^\s', line):
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, line)
# skip $o1/$c1 because this is part of a continuing extended block
if tag == 'p' and not self.hasRawText(content):
line = content
else:
line = "%s%s%s" % (o2, content, c2)
else:
line = self.graf(line)
line = self.doPBr(line)
line = re.sub(r'<br>', '<br />', line)
if ext and anon:
out.append(out.pop() + "\n" + line)
else:
out.append(line)
if not ext:
tag = 'p'
atts = ''
cite = ''
graf = ''
if ext:
out.append(out.pop() + c1)
return '\n\n'.join(out)
def fBlock(self, tag, atts, ext, cite, content):
"""
>>> t = Textile()
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bq", "", None, "http://google.com", "Hello BlockQuote")
('\\t<blockquote cite="http://google.com">\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bc", "", None, "", 'printf "Hello, World";') # doctest: +ELLIPSIS
('<pre>', '<code>', ..., '</code>', '</pre>')
>>> t.fBlock("h1", "", None, "", "foobar")
('', '\\t<h1>', 'foobar', '</h1>', '')
"""
atts = self.pba(atts)
o1 = o2 = c2 = c1 = ''
m = re.search(r'fn(\d+)', tag)
if m:
tag = 'p'
if m.group(1) in self.fn:
fnid = self.fn[m.group(1)]
else:
fnid = m.group(1)
atts = atts + ' id="fn%s"' % fnid
if atts.find('class=') < 0:
atts = atts + ' class="footnote"'
content = ('<sup>%s</sup>' % m.group(1)) + content
if tag == 'bq':
cite = self.checkRefs(cite)
if cite:
cite = ' cite="%s"' % cite
else:
cite = ''
o1 = "\t<blockquote%s%s>\n" % (cite, atts)
o2 = "\t\t<p%s>" % atts
c2 = "</p>"
c1 = "\n\t</blockquote>"
elif tag == 'bc':
o1 = "<pre%s>" % atts
o2 = "<code%s>" % atts
c2 = "</code>"
c1 = "</pre>"
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
elif tag == 'notextile':
content = self.shelve(content)
o1 = o2 = ''
c1 = c2 = ''
elif tag == 'pre':
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
o1 = "<pre%s>" % atts
o2 = c2 = ''
c1 = '</pre>'
else:
o2 = "\t<%s%s>" % (tag, atts)
c2 = "</%s>" % tag
content = self.graf(content)
return o1, o2, content, c2, c1
def footnoteRef(self, text):
"""
>>> t = Textile()
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
"""
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
def footnoteID(self, match):
id, t = match.groups()
if id not in self.fn:
self.fn[id] = str(uuid.uuid4())
fnid = self.fn[id]
if not t: t = ''
return '<sup class="footnote"><a href="#fn%s">%s</a></sup>%s' % (fnid, id, t)
def glyphs(self, text):
"""
>>> t = Textile()
>>> t.glyphs("apostrophe's")
'apostrophe’s'
>>> t.glyphs("back in '88")
'back in ’88'
>>> t.glyphs('foo ...')
'foo …'
>>> t.glyphs('--')
'—'
>>> t.glyphs('FooBar[tm]')
'FooBar™'
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
"""
# fix: hackish
text = re.sub(r'"\z', '\" ', text)
glyph_search = (
re.compile(r"(\w)\'(\w)"), # apostrophe's
re.compile(r'(\s)\'(\d+\w?)\b(?!\')'), # back in '88
re.compile(r'(\S)\'(?=\s|'+self.pnct+'|<|$)'), # single closing
re.compile(r'\'/'), # single opening
re.compile(r'(\S)\"(?=\s|'+self.pnct+'|<|$)'), # double closing
re.compile(r'"'), # double opening
re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'), # 3+ uppercase acronym
re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'), # 3+ uppercase
re.compile(r'\b(\s{0,1})?\.{3}'), # ellipsis
re.compile(r'(\s?)--(\s?)'), # em dash
re.compile(r'\s-(?:\s|$)'), # en dash
re.compile(r'(\d+)( ?)x( ?)(?=\d+)'), # dimension sign
re.compile(r'\b ?[([]TM[])]', re.I), # trademark
re.compile(r'\b ?[([]R[])]', re.I), # registered
re.compile(r'\b ?[([]C[])]', re.I), # copyright
)
glyph_replace = [x % dict(self.glyph_defaults) for x in (
r'\1%(txt_apostrophe)s\2', # apostrophe's
r'\1%(txt_apostrophe)s\2', # back in '88
r'\1%(txt_quote_single_close)s', # single closing
r'%(txt_quote_single_open)s', # single opening
r'\1%(txt_quote_double_close)s', # double closing
r'%(txt_quote_double_open)s', # double opening
r'<acronym title="\2">\1</acronym>', # 3+ uppercase acronym
r'<span class="caps">\1</span>', # 3+ uppercase
r'\1%(txt_ellipsis)s', # ellipsis
r'\1%(txt_emdash)s\2', # em dash
r' %(txt_endash)s ', # en dash
r'\1\2%(txt_dimension)s\3', # dimension sign
r'%(txt_trademark)s', # trademark
r'%(txt_registered)s', # registered
r'%(txt_copyright)s', # copyright
)]
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
for s, r in zip(glyph_search, glyph_replace):
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def iAlign(self, input):
d = {'<':'left', '=':'center', '>':'right'}
return d.get(input, '')
def vAlign(self, input):
d = {'^':'top', '-':'middle', '~':'bottom'}
return d.get(input, '')
def hAlign(self, input):
d = {'<':'left', '=':'center', '>':'right', '<>': 'justify'}
return d.get(input, '')
def getRefs(self, text):
"""
what is this for?
"""
pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http:\/\/|\/)\S+)(?=\s|$)', re.U)
text = pattern.sub(self.refs, text)
return text
def refs(self, match):
flag, url = match.groups()
self.urlrefs[flag] = url
return ''
def checkRefs(self, url):
return self.urlrefs.get(url, url)
def relURL(self, url):
o = urlparse(url)
(scheme,netloc,path,params,query,fragment) = o[0:6]
if (not scheme or scheme == 'http') and not netloc and re.search(r'^\w', path):
url = self.hu + url
if self.restricted and scheme and scheme not in self.url_schemes:
return '#'
return url
def shelve(self, text):
id = str(uuid.uuid4())
self.shelf[id] = text
return id
def retrieve(self, text):
"""
>>> t = Textile()
>>> id = t.shelve("foobar")
>>> t.retrieve(id)
'foobar'
"""
while True:
old = text
for k,v in self.shelf.items():
text = text.replace(k,v)
if text == old: break
return text
def encode_html(self, text, quotes=True):
a = (
('&', '&'),
('<', '<'),
('>', '>')
)
if quotes:
a = a + (
("'", '''),
('"', '"')
)
for k,v in a:
text = text.replace(k,v)
return text
def graf(self, text):
if not self.lite:
text = self.noTextile(text)
text = self.code(text)
text = self.links(text)
if not self.noimage:
text = self.image(text)
if not self.lite:
text = self.lists(text)
text = self.table(text)
text = self.span(text)
text = self.footnoteRef(text)
text = self.glyphs(text)
return text.rstrip('\n')
def links(self, text):
"""
>>> t = Textile()
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
'fooobar ... and hello world ...'
"""
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
pattern = r'''
([\s\[{(]|[%s])? # $pre
" # start
(%s) # $atts
([^"]+?) # $text
\s?
(?:\(([^)]+?)\)(?="))? # $title
":
(\S+?) # $url
(\/)? # $slash
([^\w\/;]*?) # $post
(?=<|\s|$)
''' % (re.escape(punct), self.c)
text = re.compile(pattern, re.X).sub(self.fLink, text)
return text
def fLink(self, match):
pre, atts, text, title, url, slash, post = match.groups()
if pre == None:
pre = ''
url = self.checkRefs(url)
atts = self.pba(atts)
if title: atts = atts + ' title="%s"' % self.encode_html(title)
if not self.noimage:
text = self.image(text)
text = self.span(text)
text = self.glyphs(text)
url = self.relURL(url)
if slash: url = url + slash
out = '<a href="%s"%s%s>%s</a>' % (self.encode_html(url), atts, self.rel, text)
out = self.shelve(out)
return ''.join([pre, out, post])
def span(self, text):
"""
>>> t = Textile()
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
"""
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
pnct = ".,\"'?!;:"
for qtag in qtags:
pattern = re.compile(r"""
(?:^|(?<=[\s>%(pnct)s])|([\]}]))
(%(qtag)s)(?!%(qtag)s)
(%(c)s)
(?::(\S+))?
([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n])
([%(pnct)s]*)
%(qtag)s
(?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s))
""" % {'qtag':qtag,'c':self.c,'pnct':pnct,'selfpnct':self.pnct}, re.X)
text = pattern.sub(self.fSpan, text)
return text
def fSpan(self, match):
_, tag, atts, cite, content, end, _ = match.groups()
qtags = {
'*': 'strong',
'**': 'b',
'??': 'cite',
'_' : 'em',
'__': 'i',
'-' : 'del',
'%' : 'span',
'+' : 'ins',
'~' : 'sub',
'^' : 'sup'
}
tag = qtags[tag]
atts = self.pba(atts)
if cite:
atts = atts + 'cite="%s"' % cite
out = "<%s%s>%s%s</%s>" % (tag, atts, content, end, tag)
return out;
def image(self, text):
"""
>>> t = Textile()
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
"""
pattern = re.compile(r"""
(?:[\[{])? # pre
\! # opening !
(\<|\=|\>)?? # optional alignment atts
(%s) # optional style,class atts
(?:\. )? # optional dot-space
([^\s(!]+) # presume this is the src
\s? # optional space
(?:\(([^\)]+)\))? # optional title
\! # closing
(?::(\S+))? # optional href
(?:[\]}]|(?=\s|$)) # lookahead: space or end of string
""" % self.c, re.U|re.X)
return pattern.sub(self.fImage, text)
def fImage(self, match):
# (None, '', '/imgs/myphoto.jpg', None, None)
algn, atts, url, title, href = match.groups()
atts = self.pba(atts)
if algn:
atts = atts + ' align="%s"' % self.iAlign(algn)
if title:
atts = atts + ' title="%s" alt="%s"' % (title, title)
else:
atts = atts + ' alt=""'
# TODO how to do this in python?
# size = @getimagesize(url)
# if (size) atts .= " size[3]"
if href:
href = self.checkRefs(href)
url = self.checkRefs(url)
url = self.relURL(url)
out = []
if href: out.append('<a href="%s">' % href)
out.append('<img src="%s"%s />' % (url, atts))
if href: out.append('</a>')
return ''.join(out)
def code(self, text):
text = self.doSpecial(text, '<code>', '</code>', self.fCode)
text = self.doSpecial(text, '@', '@', self.fCode)
text = self.doSpecial(text, '<pre>', '</pre>', self.fPre)
return text
def fCode(self, match):
before, text, after = match.groups()
if after == None: after = ''
# text needs to be escaped
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, self.shelve('<code>%s</code>' % text), after])
def fPre(self, match):
before, text, after = match.groups()
if after == None: after = ''
# text needs to be escapedd
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, '<pre>', self.shelve(text), '</pre>', after])
def doSpecial(self, text, start, end, method=None):
if method == None:
method = self.fSpecial
pattern = re.compile(r'(^|\s|[\[({>])%s(.*?)%s(\s|$|[\])}])?' % (re.escape(start), re.escape(end)), re.M|re.S)
return pattern.sub(method, text)
def fSpecial(self, match):
"""
special blocks like notextile or code
"""
before, text, after = match.groups()
if after == None: after = ''
return ''.join([before, self.shelve(self.encode_html(text)), after])
def noTextile(self, text):
text = self.doSpecial(text, '<notextile>', '</notextile>', self.fTextile)
return self.doSpecial(text, '==', '==', self.fTextile)
def fTextile(self, match):
before, notextile, after = match.groups()
if after == None: after = ''
return ''.join([before, self.shelve(notextile), after])
def textile(text, **args):
"""
this function takes additional parameters:
encoding - input encoding (default: 'utf-8')
output - output encoding (default: 'utf-8')
validate - perform mxTidy or uTidyLib validation (default: False)
sanitize - sanitize output good for weblog comments (default: False)
head_offset - ignored
"""
return Textile().textile(text, **args)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
import sys
if len(sys.argv) == 2:
f = open(sys.argv[1])
text = ''.join(f.readlines())
print Textile().textile(text)
else:
_test()
| Python |
"""Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) != n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| Python |
# $Id: nodes.py 6011 2009-07-09 10:00:07Z gbrandl $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import warnings
import types
import unicodedata
# ==============================
# Functional Node Base Classes
# ==============================
class Node(object):
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return True
if sys.version_info < (3,):
# on 2.x, str(node) will be a byte string with Unicode
# characters > 255 escaped; on 3.x this is no longer necessary
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
Return true if we should stop the traversal.
"""
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return stop
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
if child.walk(visitor):
stop = 1
break
except SkipSiblings:
pass
except StopTraversal:
stop = 1
return stop
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
Return true if we should stop the traversal.
"""
call_depart = 1
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return stop
except SkipDeparture:
call_depart = 0
children = self.children
try:
for child in children[:]:
if child.walkabout(visitor):
stop = 1
break
except SkipSiblings:
pass
except SkipChildren:
pass
except StopTraversal:
stop = 1
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
return stop
def _fast_traverse(self, cls):
"""Specialized traverse() that only supports instance checks."""
result = []
if isinstance(self, cls):
result.append(self)
for child in self.children:
result.extend(child._fast_traverse(cls))
return result
def _all_traverse(self):
"""Specialized traverse() that doesn't check for a condition."""
result = []
result.append(self)
for child in self.children:
result.extend(child._all_traverse())
return result
def traverse(self, condition=None,
include_self=1, descend=1, siblings=0, ascend=0):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For example, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=1)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
if ascend:
siblings=1
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(
include_self=1, descend=1, siblings=0, ascend=0,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=1, descend=descend,
siblings=0, ascend=0,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None,
include_self=0, descend=1, siblings=0, ascend=0):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
if sys.version_info < (3,):
class reprunicode(unicode):
"""
A class that removes the initial u from unicode's repr.
"""
def __repr__(self):
return unicode.__repr__(self)[1:]
else:
reprunicode = unicode
class Text(Node, reprunicode):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
if sys.version_info > (3,):
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
if isinstance(data, bytes):
raise TypeError('expecting str data, not bytes')
return reprunicode.__new__(cls, data)
else:
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
return reprunicode.__new__(cls, data)
def __init__(self, data, rawsource=''):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def __repr__(self):
data = reprunicode.__repr__(self)
if len(data) > 70:
data = reprunicode.__repr__(self[:64] + ' ...')
return '<%s: %s>' % (self.tagname, data)
def shortrepr(self):
data = reprunicode.__repr__(self)
if len(data) > 20:
data = reprunicode.__repr__(self[:16] + ' ...')
return '<%s: %s>' % (self.tagname, data)
def _dom_node(self, domroot):
return domroot.createTextNode(unicode(self))
def astext(self):
return reprunicode(self)
# Note about __unicode__: The implementation of __unicode__ here,
# and the one raising NotImplemented in the superclass Node had
# to be removed when changing Text to a subclass of unicode instead
# of UserString, since there is no way to delegate the __unicode__
# call to the superclass unicode:
# unicode itself does not have __unicode__ method to delegate to
# and calling unicode(self) or unicode.__new__ directly creates
# an infinite loop
def copy(self):
return self.__class__(reprunicode(self), rawsource=self.rawsource)
def deepcopy(self):
return self.copy()
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
# rstrip and lstrip are used by substitution definitions where
# they are expected to return a Text instance, this was formerly
# taken care of by UserString. Note that then and now the
# rawsource member is lost.
def rstrip(self, chars=None):
return self.__class__(reprunicode.rstrip(self, chars))
def lstrip(self, chars=None):
return self.__class__(reprunicode.lstrip(self, chars))
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
list_attributes = ('ids', 'classes', 'names', 'dupnames', 'backrefs')
"""List attributes, automatically initialized to empty lists for
all nodes."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Initialize list attributes.
for att in self.list_attributes:
self.attributes[att] = []
for att, value in attributes.items():
att = att.lower()
if att in self.list_attributes:
# mutable list; make a copy for this node
self.attributes[att] = value[:]
else:
self.attributes[att] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, list):
value = ' '.join([serial_escape('%s' % v) for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join(self['names']))
else:
return '<%s...>' % self.tagname
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([unicode(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
if sys.version_info > (3,):
# 2to3 doesn't convert __unicode__ to __str__
__str__ = __unicode__
def starttag(self):
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append(name)
elif isinstance(value, list):
values = [serial_escape('%s' % v) for v in value]
parts.append('%s="%s"' % (name, ' '.join(values)))
else:
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __contains__(self, key):
# support both membership test for children and attributes
# (has_key is translated to "in" by 2to3)
if isinstance(key, basestring):
return key in self.attributes
return key in self.children
def __getitem__(self, key):
if isinstance(key, basestring):
return self.attributes[key]
elif isinstance(key, int):
return self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, basestring):
self.attributes[str(key)] = item
elif isinstance(key, int):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, basestring):
del self.attributes[key]
elif isinstance(key, int):
del self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return attr in self.attributes
def delattr(self, attr):
if attr in self.attributes:
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
# support operator in
__contains__ = hasattr
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
if self[key] == [] and key in self.list_attributes:
return 0
else:
return 1
def update_basic_atts(self, dict):
"""
Update basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') from node or dictionary `dict`.
"""
if isinstance(dict, Node):
dict = dict.attributes
for att in ('ids', 'classes', 'names', 'dupnames'):
for value in dict.get(att, []):
if not value in self[att]:
self[att].append(value)
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def replace_self(self, new):
"""
Replace `self` node with `new`, where `new` is a node or a
list of nodes.
"""
update = new
if not isinstance(new, Node):
# `new` is a list; update first child.
try:
update = new[0]
except IndexError:
update = None
if isinstance(update, Element):
update.update_basic_atts(self)
else:
# `update` is a Text node or `new` is an empty list.
# Assert that we aren't losing any attributes.
for att in ('ids', 'names', 'classes', 'dupnames'):
assert not self[att], \
'Losing "%s" attribute: %s' % (att, self[att])
self.parent.replace(self, new)
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(**self.attributes)
def deepcopy(self):
copy = self.copy()
copy.extend([child.deepcopy() for child in self.children])
return copy
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
indirect_reference_name = None
"""Holds the whitespace_normalized_name (contains mixed case) of a target.
Required for MoinMoin/reST compatibility."""
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def __getstate__(self):
"""
Return dict with unpicklable references removed.
"""
state = self.__dict__.copy()
state['reporter'] = None
state['transformer'] = None
return state
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if id in self.ids and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and id not in self.ids:
break
else:
id = ''
while not id or id in self.ids:
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed ("implicit"-type system
messages are INFO/1, and "explicit"-type system messages are ERROR/3):
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
- - explicit - - new True
- - implicit - - new False
None False explicit - - new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if name in self.nameids:
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
if old_node['names'] \
and 'refuri' in old_node \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return name in self.nameids
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=1)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
subref['refname'] = whitespace_normalize_name(refname)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class container(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print 'system_message: children=%r' % (children,)
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For example, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, list) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact container copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
class StopTraversal(TreePruningException):
"""
Stop the traversal alltogether. The current node's ``depart_...`` method
is not affected. The parent nodes ``depart_...`` methods are also called
as usual. No other nodes are visited. This is an alternative to
NodeFound that does not cause exception handling to trickle up to the
caller.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = string.lower()
if not isinstance(id, unicode):
id = id.decode()
id = id.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters
id = unicodedata.normalize('NFKD', id).\
encode('ASCII', 'ignore').decode('ASCII')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
_non_id_translate = {
0x00f8: u'o', # o with stroke
0x0111: u'd', # d with stroke
0x0127: u'h', # h with stroke
0x0131: u'i', # dotless i
0x0142: u'l', # l with stroke
0x0167: u't', # t with stroke
0x0180: u'b', # b with stroke
0x0183: u'b', # b with topbar
0x0188: u'c', # c with hook
0x018c: u'd', # d with topbar
0x0192: u'f', # f with hook
0x0199: u'k', # k with hook
0x019a: u'l', # l with bar
0x019e: u'n', # n with long right leg
0x01a5: u'p', # p with hook
0x01ab: u't', # t with palatal hook
0x01ad: u't', # t with hook
0x01b4: u'y', # y with hook
0x01b6: u'z', # z with stroke
0x01e5: u'g', # g with stroke
0x0225: u'z', # z with hook
0x0234: u'l', # l with curl
0x0235: u'n', # n with curl
0x0236: u't', # t with curl
0x0237: u'j', # dotless j
0x023c: u'c', # c with stroke
0x023f: u's', # s with swash tail
0x0240: u'z', # z with swash tail
0x0247: u'e', # e with stroke
0x0249: u'j', # j with stroke
0x024b: u'q', # q with hook tail
0x024d: u'r', # r with stroke
0x024f: u'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00df: u'sz', # ligature sz
0x00e6: u'ae', # ae
0x0153: u'oe', # ligature oe
0x0238: u'db', # db digraph
0x0239: u'qp', # qp digraph
}
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
def serial_escape(value):
"""Escape string values that are elements of a list, for serialization."""
return value.replace('\\', r'\\').replace(' ', r'\ ')
#
#
# Local Variables:
# indent-tabs-mode: nil
# sentence-end-double-space: t
# fill-column: 78
# End:
| Python |
# $Id: utils.py 6120 2009-09-10 11:02:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import b
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='replace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif type(stream) in (str, unicode):
# Leave stream untouched if it's ''.
if stream != '':
if type(stream) == str:
stream = open(stream, 'w')
elif type(stream) == unicode:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
if encoding is None:
try:
encoding = stream.encoding
except AttributeError:
pass
self.encoding = encoding or 'ascii'
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
msgtext = msg.astext().encode(self.encoding, self.error_handler)
self.stream.write(msgtext)
self.stream.write(b('\n'))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Decode file/path string. Return `nodes.reprunicode` object.
Provides a conversion to unicode without the UnicodeDecode error of the
implicit 'ascii:strict' decoding.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except UnicodeDecodeError:
path = path.decode('utf-8', 'strict')
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none provided, a default set will be used.
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
return settings.stylesheet_path.split(",")
elif settings.stylesheet:
return settings.stylesheet.split(",")
else:
return []
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| Python |
# -*- coding: utf8 -*-
# $Id: __init__.py 6156 2009-10-08 09:42:38Z milde $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
from docutils import frontend, nodes, languages, writers, utils, transforms
from docutils.writers.newlatex2e import unicode_map
# compatibility module for Python <= 2.4
if not hasattr(string, 'Template'):
import docutils._string_template_compat
string.Template = docutils._string_template_compat.Template
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(__file__)
settings_spec = (
'LaTeX-Specific Options',
'The LaTeX "--output-encoding" default is "latin-1:strict".',
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Use LaTeX footnotes. (default)',
['--use-latex-footnotes'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for footnote text.',
['--figure-footnotes'],
{'dest': 'use_latex_footnotes', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Like --stylesheet, but the path is rewritten '
'relative to the output file. ',
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet'}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines', 'borderless'],
'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'output_encoding': 'latin-1',
'sectnum_depth': 0 # updated by SectNum transform
}
relative_path_settings = ('stylesheet_path',)
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'stylesheet',
'fallbacks', 'pdfsetup', 'title', 'subtitle')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
## def get_transforms(self):
## # call the parent class' method
## transforms = writers.Writer.get_transforms(self)
## # print transforms
## # TODO: footnote collection transform
## # transforms.append(footnotes.collect)
## return transforms
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
file = open(self.document.settings.template, 'rb')
except IOError:
file = open(os.path.join(os.path.dirname(__file__),
self.document.settings.template), 'rb')
template = string.Template(unicode(file.read(), 'utf-8'))
file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
def __init__(self, lang):
self.language = lang
self.quote_index = 0
self.quotes = ('``', "''")
self.setup = '' # language dependent configuration code
# double quotes are "active" in some languages (e.g. German).
# TODO: use \textquotedbl in OT1 font encoding?
self.literal_double_quote = u'"'
if self.language.startswith('de'):
self.quotes = (r'\glqq{}', r'\grqq{}')
self.literal_double_quote = ur'\dq{}'
if self.language.startswith('it'):
self.literal_double_quote = ur'{\char`\"}'
if self.language.startswith('es'):
# reset tilde ~ to the original binding (nobreakspace):
self.setup = ('\n'
r'\addto\shorthandsspanish{\spanishdeactivate{."~<>}}')
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1) % 2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def get_language(self):
lang = self.language.split('_')[0] # filter dialects
return self._ISO639_TO_BABEL.get(lang, "")
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = self.keys()
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centerline{\textbf{#1}}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\textwidth}{#2}}
\end{center}
\fi
}"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\textwidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.embedded_package_wrapper = r"""\makeatletter
%% embedded stylesheet: %s
%s
\makeatother"""
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.footnote_floats = r"""% settings for footnotes as floats:
\setlength{\floatsep}{0.5em}
\setlength{\textfloatsep}{\fill}
\addtolength{\textfloatsep}{3em}
\renewcommand{\textfraction}{0.5}
\renewcommand{\topfraction}{0.5}
\renewcommand{\bottomfraction}{0.5}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi'))"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\textwidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.table = r"""\usepackage{longtable}
\usepackage{array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
PreambleCmds.documenttitle = r"""
%% Document title
\title{%s}
\author{%s}
\date{%s}
\maketitle
"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else:
return self.sections[-1]
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:nolines: (or borderless) no lines
"""
def __init__(self,translator,latex_type,table_style):
self._translator = translator
self._latex_type = latex_type
self._table_style = table_style
self._open = 0
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self._in_thead = 0
def open(self):
self._open = 1
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = 0 # maybe context with search
def close(self):
self._open = 0
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
def is_open(self):
return self._open
def set_table_style(self, table_style):
if not table_style in ('standard','booktabs','borderless','nolines'):
return
self._table_style = table_style
def get_latex_type(self):
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
if self._latex_type == 'longtable':
# otherwise longtable might move before paragraph and subparagraph
prefix = '\\leavevmode\n'
else:
prefix = ''
prefix += '\setlength{\DUtablewidth}{\linewidth}'
return '%s\n\\begin{%s}[c]' % (prefix, self._latex_type)
def get_closing(self):
line = ''
if self._table_style == 'booktabs':
line = '\\bottomrule\n'
elif self._table_style == 'standard':
lines = '\\hline\n'
return '%s\\end{%s}' % (line,self._latex_type)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row-1]
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while 1:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row-1]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = 0
# If using compound enumerations, include section information.
section_prefix_for_enumerators = 0
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# default link color
hyperlink_color = 'blue'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # teletype: replace underscores
literal_block = False # inside literal block: no quote mangling
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self.use_latex_footnotes = settings.use_latex_footnotes
self._use_latex_citations = settings.use_latex_citations
self.embed_stylesheet = settings.embed_stylesheet
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = settings.font_encoding
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', '\\_'))
# literal blocks:
self.literal_block_env = ''
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language:
# (labels, bibliographic_fields, and author_separators)
self.language = languages.get_language(settings.language_code)
self.babel = Babel(settings.language_code)
self.author_separator = self.language.author_separators[0]
self.d_options = [self.settings.documentoptions,
self.babel.get_language()]
self.d_options = ','.join([opt for opt in self.d_options if opt])
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.d_options, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.stylesheet = []
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # title data and \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
# date (the default supresses the "auto-date" feature of \maketitle)
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
self.literal_block_stack = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable', settings.table_style)
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# persistent requirements
if self.font_encoding == '':
fontenc_header = r'%\usepackage[OT1]{fontenc}'
else:
fontenc_header = r'\usepackage[%s]{fontenc}' % self.font_encoding
self.requirements['_persistent'] = '\n'.join([
# multi-language support (language is in document settings)
'\\usepackage{babel}%s' % self.babel.setup,
fontenc_header,
r'\usepackage[%s]{inputenc}' % self.latex_encoding,
r'\usepackage{ifthen}',
])
# page layout with typearea (if there are relevant document options).
if (settings.documentclass.find('scr') == -1 and
(self.d_options.find('DIV') != -1 or
self.d_options.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# get list of style sheets from settings
styles = utils.get_stylesheet_list(settings)
# adapt path if --stylesheet_path is used
if settings.stylesheet_path and not(self.embed_stylesheet):
styles = [utils.relative_path(settings._destination, sheet)
for sheet in styles]
for sheet in styles:
(base, ext) = os.path.splitext(sheet)
is_package = ext in ['.sty', '']
if self.embed_stylesheet:
if is_package:
sheet = base + '.sty' # adapt package name
# wrap in \makeatletter, \makeatother
wrapper = PreambleCmds.embedded_package_wrapper
else:
wrapper = '%% embedded stylesheet: %s\n%s'
settings.record_dependencies.add(sheet)
self.stylesheet.append(wrapper %
(sheet, unicode(open(sheet).read(), 'utf-8')))
else: # link to style sheet
if is_package:
self.stylesheet.append(r'\usepackage{%s}' % base)
else:
self.stylesheet.append(r'\input{%s}' % sheet)
# PDF setup
if self.hyperlink_color == '0':
self.hyperlink_color = 'black'
self.colorlinks = 'false'
else:
self.colorlinks = 'true'
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# LaTeX section numbering
if not self.settings.sectnum_xform: # section numbering by LaTeX:
# sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# else value of the "depth" argument -> limit to supported
# section levels
if settings.sectnum_depth is not None:
sectnum_depth = min(settings.sectnum_depth,
len(self.d_class.sections))
self.requirements['sectnum_depth'] = (
r'\setcounter{secnumdepth}{%d}' % sectnum_depth)
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# convert: latin-1, latin_1, utf-8 and similar things
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language.labels[docutil_label]
def ensure_math(self, text):
if not hasattr(self, 'ensure_math_re'):
chars = { # lnot,pm,twosuperior,threesuperior,mu,onesuperior,times,div
'latin1' : '\xac\xb1\xb2\xb3\xb5\xb9\xd7\xf7' , # ¬±²³µ¹×÷
# TODO?: use texcomp instead.
}
self.ensure_math_re = re.compile('([%s])' % chars['latin1'])
text = self.ensure_math_re.sub(r'\\ensuremath{\1}', text)
return text
def encode(self, text):
"""Return text with 'problematic' characters escaped.
Escape the ten special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
Separate ``-`` (and more in literal text) to prevent input ligatures.
Translate non-supported Unicode characters.
"""
if self.verbatim:
return text
# Separate compound characters, e.g. '--' to '-{}-'.
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal_block or self.literal:
separate_chars += ',`\'"<>'
# LaTeX encoding maps:
special_chars = {
ord('#'): ur'\#',
ord('$'): ur'\$',
ord('%'): ur'\%',
ord('&'): ur'\&',
ord('~'): ur'\textasciitilde{}',
ord('_'): ur'\_',
ord('^'): ur'\textasciicircum{}',
ord('\\'): ur'\textbackslash{}',
ord('{'): ur'\{',
ord('}'): ur'\}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): ur'{[}',
ord(']'): ur'{]}'
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode_chars = {
0x00A0: ur'~', # NO-BREAK SPACE
0x00AD: ur'\-', # SOFT HYPHEN
0x21d4: ur'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: ur'$\spadesuit$',
0x2663: ur'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
unicode_chars = {
0x2013: ur'\textendash{}',
0x2014: ur'\textemdash{}',
0x2018: ur'`',
0x2019: ur"'",
0x201A: ur'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: ur'\textquotedblleft{}',
0x201D: ur'\textquotedblright{}',
0x201E: ur'\quotedblbase', # DOUBLE LOW-9 QUOTATION MARK
0x2020: ur'\dag{}',
0x2021: ur'\ddag{}',
0x2026: ur'\dots{}',
0x2122: ur'\texttrademark{}',
}
# Unicode chars that require a feature/package to render
pifont_chars = {
0x2665: ur'\ding{170}', # black heartsuit
0x2666: ur'\ding{169}', # black diamondsuit
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
# set up the translation table:
table = special_chars
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', '']:
# * out-of-order characters in cmtt
if self.literal_block or self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = ur'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = ur'\textbar{}'
table[ord('<')] = ur'\textless{}'
table[ord('>')] = ur'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = ur'~'
if self.literal_block or self.literal:
# double quotes are 'active' in some languages
table[ord('"')] = self.babel.literal_double_quote
else:
text = self.babel.quote_quotes(text)
# Unicode chars:
table.update(unsupported_unicode_chars)
if not self.latex_encoding.startswith('utf8'):
table.update(unicode_chars)
# Unicode chars that require a feature/package to render
if [ch for ch in pifont_chars.keys() if unichr(ch) in text]:
self.requirements['pifont'] = '\\usepackage{pifont}'
table.update(pifont_chars)
text = text.translate(table)
# Break up input ligatures
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline or self.literal_block:
# for blank lines, insert a protected space, to avoid
# ! LaTeX Error: There's no line here to end.
textlines = [line + '~'*(not line.lstrip())
for line in text.split('\n')]
text = '\\\\\n'.join(textlines)
if not self.latex_encoding.startswith('utf8'):
text = self.ensure_math(text)
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? (update or delete)
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if node.tagname is 'admonition':
classes = ','.join(node['classes'])
title = ''
else: # specific admonitions
self.fallbacks['title'] = PreambleCmds.title
classes = node.tagname.replace('_', '-')
title = '\\DUtitle[%s]{%s}\n' % (
classes, self.language.labels.get(classes, classes))
self.out.append('\n\\DUadmonition[%s]{\n%s' % (classes, title))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_attention(self, node):
self.visit_admonition(node)
def depart_attention(self, node):
self.depart_admonition()
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
def depart_block_quote(self, node):
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
def depart_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append( '\\caption{' )
def depart_caption(self, node):
self.out.append('}\n')
def visit_caution(self, node):
self.visit_admonition(node)
def depart_caution(self, node):
self.depart_admonition()
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.context.append(len(self.body))
else:
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
size = self.context.pop()
label = self.body[size]
text = ''.join(self.body[size+1:])
del self.body[size:]
self._bibitems.append([label, text])
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=0, siblings=1,
include_self=0)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append('}]')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_danger(self, node):
self.visit_admonition(node)
def depart_danger(self, node):
self.depart_admonition()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = 1
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node)
def depart_document(self, node):
# Complete header with information gained from walkabout
# a) conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# b) coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# c) PDF properties
self.pdfsetup.append(PreambleCmds.linking % (self.colorlinks,
self.hyperlink_color,
self.hyperlink_color))
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# a) document title (part 'body_prefix'):
# NOTE: Docutils puts author/date into docinfo, so normally
# we do not want LaTeX author/date handling (via \maketitle).
# To deactivate it, we add \title, \author, \date,
# even if the arguments are empty strings.
if self.title or self.author_stack or self.date:
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
title = self.title + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\large{%s}' % self.subtitle[0]
] + self.subtitle_labels
self.body_pre_docinfo.append(PreambleCmds.documenttitle % (
'%\n '.join(title),
' \\and\n'.join(authors),
', '.join(self.date)))
# b) bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# c) make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
self.literal_block_stack.append('\\emph{')
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
self.literal_block_stack.pop()
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
# BUG: the following fails, with more than one multirow
# starting in the second column (or later) see
# ../../../test/functional/input/data/latex.txt
if self.active_table.get_entry_number() == 1:
# if the first row is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.out.append(' & ')
# multirow, multicolumn
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
count = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number()-1,count)
self.out.append('\\multirow{%d}{%s}{%%' %
(count,self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%sl%s}{' %
(count, bar1, self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
self.out.append('\\textbf{%')
self.context.append('}')
elif self.active_table.is_stub_column():
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ''
if 'suffix' in node:
enum_suffix = node['suffix']
enum_prefix = ''
if 'prefix' in node:
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ''
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for ctype, cname in self._enumeration_counters:
enum_prefix += '\\%s{%s}.' % (ctype, cname)
enum_type = 'arabic'
if 'enumtype' in node:
enum_type = node['enumtype']
if enum_type in enum_style:
enum_type = enum_style[enum_type]
counter_name = 'listcnt%d' % len(self._enumeration_counters)
self._enumeration_counters.append((enum_type, counter_name))
# If we haven't used this counter name before, then create a
# new counter; otherwise, reset & reuse the old counter.
if len(self._enumeration_counters) > self._max_enumeration_counters:
self._max_enumeration_counters = len(self._enumeration_counters)
self.out.append('\\newcounter{%s}\n' % counter_name)
else:
self.out.append('\\setcounter{%s}{0}\n' % counter_name)
self.out.append('\\begin{list}{%s\\%s{%s}%s}\n' %
(enum_prefix,enum_type,counter_name,enum_suffix))
self.out.append('{\n')
self.out.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if 'start' in node:
self.out.append('\\addtocounter{%s}{%d}\n' %
(counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.out.append('}\n')
def depart_enumerated_list(self, node):
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_error(self, node):
self.fallbacks['error'] = PreambleCmds.error
self.visit_admonition(node)
def depart_error(self, node):
self.depart_admonition()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# ! the 'align' attribute should set "outer alignment" !
# For "inner alignment" use LaTeX default alignment (similar to HTML)
## if ('align' not in node.attributes or
## node.attributes['align'] == 'center'):
## align = '\n\\centering'
## align_end = ''
## else:
## # TODO non vertical space for other alignments.
## align = '\\begin{flush%s}' % node.attributes['align']
## align_end = '\\end{flush%s}' % node.attributes['align']
## self.out.append( '\\begin{figure}%s\n' % align )
## self.context.append( '%s\\end{figure}\n' % align_end )
self.out.append('\\begin{figure}')
self.context.append('\\end{figure}\n')
def depart_figure(self, node):
self.out.append(self.context.pop())
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.use_latex_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num,text = node.astext().split(None,1)
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
else:
# use key starting with ~ for sorting after small letters
self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append('\\begin{figure}[b]')
self.append_hypertargets(node)
if node.get('id') == node.get('name'): # explicite label
self.out += self.ids_to_labels(node)
def depart_footnote(self, node):
if self.use_latex_footnotes:
self.out.append('}\n')
else:
self.out.append('\\end{figure}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if self.use_latex_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
# TODO: second argument = backlink id
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if self.use_latex_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.out.append(bracket)
else:
self.out.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def visit_hint(self, node):
self.visit_admonition(node)
def depart_hint(self, node):
self.depart_admonition()
def to_latex_length(self, length_str):
"""Convert string with rst lenght to LaTeX"""
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Add image URI to dependency list, assuming that it's
# referring to a local file.
self.settings.record_dependencies.add(attrs['uri'])
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
is_inline = self.is_inline(node)
align_prepost = {
# key == (<is_inline>, <align>)
# By default latex aligns the bottom of an image.
(True, 'bottom'): ('', ''),
(True, 'middle'): (r'\raisebox{-0.5\height}{', '}'),
(True, 'top'): (r'\raisebox{-\height}{', '}'),
(False, 'center'): (r'\noindent\makebox[\textwidth][c]{', '}'),
(False, 'left'): (r'\noindent{', r'\hfill}'),
(False, 'right'): (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
try:
pre.append(align_prepost[is_inline, attrs['align']][0])
post.append(align_prepost[is_inline, attrs['align']][1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
## # Could also be done with ``scale`` option to
## # ``\includegraphics``; doing it this way for consistency.
## pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
## post.append('}')
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not is_inline:
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
self.append_hypertargets(node)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, attrs['uri']))
self.out.extend(post)
def depart_image(self, node):
pass
def visit_important(self, node):
self.visit_admonition(node)
def depart_important(self, node):
self.depart_admonition()
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
def depart_line_block(self, node):
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# BUG: fails if normal text preceeds the literal block.
self.out.append('%\n\\begin{quote}')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.literal_block_env != '' and self.is_plaintext(node):
self.requirements['literal_block'] = packages.get(
self.literal_block_env, '')
self.verbatim = True
self.out.append('\\begin{%s}%s\n' % (self.literal_block_env,
self.literal_block_options))
else:
self.literal_block = 1
self.insert_non_breaking_blanks = 1
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
def depart_literal_block(self, node):
if self.verbatim:
self.out.append('\n\\end{%s}\n' % self.literal_block_env)
self.verbatim = False
else:
self.out.append('\n}')
self.insert_non_breaking_blanks = False
self.literal_block = False
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_note(self, node):
self.visit_admonition(node)
def depart_note(self, node):
self.depart_admonition()
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag tha the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# no newline if the paragraph is first in a list item
if ((isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description)) and
node is node.parent[0]):
return
index = node.parent.index(node)
if (isinstance(node.parent, nodes.compound) and
index > 0 and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
return
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_paragraph(self, node):
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
def visit_reference(self, node):
# BUG: hash_char '#' is troublesome in LaTeX.
# mbox and other environments do not like the '#'.
hash_char = '\\#'
if 'refuri' in node:
href = node['refuri'].replace('#', hash_char)
self.out.append('\\href{%s}{' % href.replace('%', '\\%'))
return
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label and 'refuri' not in node:
self.out.append('\\%s{%s}}' % (self._reference_label,
href.replace(hash_char, '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': ('---', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\n\\begin{flushright}\n')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
self.out.append('\\end{flushright}\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
self.literal_block_stack.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
self.literal_block_stack.pop()
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.subtitle += [self.encode(node.astext())]
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
raise nodes.SkipNode
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['error'] = PreambleCmds.error
self.visit_admonition(node) # error or warning
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular',self.settings.table_style)
self.active_table.open()
for cls in node['classes']:
self.active_table.set_table_style(cls)
if self.active_table._table_style == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.out.append('\n' + self.active_table.get_opening())
def depart_table(self, node):
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
else:
self.active_table.set_table_style(self.settings.table_style)
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs())
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def visit_tip(self, node):
self.visit_admonition(node)
def depart_tip(self, node):
self.depart_admonition()
def bookmark(self, node):
"""Return label and pdfbookmark string for titles."""
result = ['']
if self.settings.sectnum_xform: # "starred" section cmd
# add to the toc and pdfbookmarks
section_name = self.d_class.section(max(self.section_level, 1))
section_title = self.encode(node.astext())
result.append(r'\phantomsection')
result.append(r'\addcontentsline{toc}{%s}{%s}' %
(section_name, section_title))
result += self.ids_to_labels(node.parent, set_anchor=False)
return '%\n '.join(result) + '%\n'
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
title = self.encode(node.astext())
self.title.append(title)
self.pdfinfo.append(' pdftitle={%s},' % title)
raise nodes.SkipNode
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
self.out.append('\n\n')
self.out.append('%' + '_' * 75)
self.out.append('\n\n')
#
section_name = self.d_class.section(self.section_level)
# number sections?
if (self.settings.sectnum_xform # numbering by Docutils
or (self.section_level > len(self.d_class.sections))):
section_star = '*'
else: # LaTeX numbered sections
section_star = ''
self.out.append(r'\%s%s{' % (section_name, section_star))
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
self.out.append('\color{red}')
# label and ToC entry:
self.context.append(self.bookmark(node) + '}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if isinstance(node.parent, nodes.table):
self.pop_output_collector()
def minitoc(self, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
warn = self.document.reporter.warning
warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"')
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(title, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_inline(self, node): # <span>, i.e. custom roles
# insert fallback definition
self.fallbacks['inline'] = PreambleCmds.inline
self.out += [r'\DUrole{%s}{' % cls for cls in node['classes']]
self.context.append('}' * (len(node['classes'])))
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def visit_warning(self, node):
self.visit_admonition(node)
def depart_warning(self, node):
self.depart_admonition()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
| Python |
# -*- coding: utf-8 -*-
# $Id: manpage.py 6110 2009-08-31 14:40:33Z grubert $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module is put into the public domain.
"""
Simple man page writer for reStructuredText.
Man pages (short for "manual pages") contain system documentation on unix-like
systems. The pages are grouped in numbered sections:
1 executable programs and shell commands
2 system calls
3 library functions
4 special files
5 file formats
6 games
7 miscellaneous
8 system administration
Man pages are written *troff*, a text file formatting system.
See http://www.tldp.org/HOWTO/Man-Page for a start.
Man pages have no subsection only parts.
Standard parts
NAME ,
SYNOPSIS ,
DESCRIPTION ,
OPTIONS ,
FILES ,
SEE ALSO ,
BUGS ,
and
AUTHOR .
A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
by the command whatis or apropos.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import time
import re
from types import ListType
import docutils
from docutils import nodes, utils, writers, languages
import roman
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
OPTION_LIST_INDENT = 7
BLOCKQOUTE_INDENT = 3.5
# Define two macros so man/roff can calculate the
# indent/unindent margins by itself
MACRO_DEF = (r""".
.nr rst2man-indent-level 0
.
.de1 rstReportMargin
\\$1 \\n[an-margin]
level \\n[rst2man-indent-level]
level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
-
\\n[rst2man-indent0]
\\n[rst2man-indent1]
\\n[rst2man-indent2]
..
.de1 INDENT
.\" .rstReportMargin pre:
. RS \\$1
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
. nr rst2man-indent-level +1
.\" .rstReportMargin post:
..
.de UNINDENT
. RE
.\" indent \\n[an-margin]
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
.nr rst2man-indent-level -1
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
""")
class Writer(writers.Writer):
supported = ('manpage')
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class Table:
def __init__(self):
self._rows = []
self._options = ['center', ]
self._tab_char = '\t'
self._coldefs = []
def new_row(self):
self._rows.append([])
def append_separator(self, separator):
"""Append the separator for table head."""
self._rows.append([separator])
def append_cell(self, cell_lines):
"""cell_lines is an array of lines"""
start = 0
if len(cell_lines)>0 and cell_lines[0] == '.sp\n':
start = 1
self._rows[-1].append(cell_lines[start:])
if len(self._coldefs) < len(self._rows[-1]):
self._coldefs.append('l')
def _minimize_cell(self, cell_lines):
"""Remove leading and trailing blank and ``.sp`` lines"""
while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
del cell_lines[0]
while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
del cell_lines[-1]
def as_list(self):
text = ['.TS\n']
text.append(' '.join(self._options) + ';\n')
text.append('|%s|.\n' % ('|'.join(self._coldefs)))
for row in self._rows:
# row = array of cells. cell = array of lines.
text.append('_\n') # line above
text.append('T{\n')
for i in range(len(row)):
cell = row[i]
self._minimize_cell(cell)
text.extend(cell)
if not text[-1].endswith('\n'):
text[-1] += '\n'
if i < len(row)-1:
text.append('T}'+self._tab_char+'T{\n')
else:
text.append('T}\n')
text.append('_\n')
text.append('.TE\n')
return text
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
document_start = """Man page generated from reStructeredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode)
self.head = []
self.body = []
self.foot = []
self.section_level = 0
self.context = []
self.topic_class = ''
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
# the list style "*" bullet or "#" numbered
self._list_char = []
# writing the header .TH and .SH NAME is postboned after
# docinfo.
self._docinfo = {
"title" : "", "title_upper": "",
"subtitle" : "",
"manual_section" : "", "manual_group" : "",
"author" : [],
"date" : "",
"copyright" : "",
"version" : "",
}
self._docinfo_keys = [] # a list to keep the sequence as in source.
self._docinfo_names = {} # to get name from text not normalized.
self._in_docinfo = None
self._active_table = None
self._in_literal = False
self.header_written = 0
self._line_block = 0
self.authors = []
self.section_level = 0
self._indent = [0]
# central definition of simple processing rules
# what to output on : visit, depart
# Do not use paragraph requests ``.PP`` because these set indentation.
# use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
#
# Fonts are put on a stack, the top one is used.
# ``.ft P`` or ``\\fP`` pop from stack.
# ``B`` bold, ``I`` italic, ``R`` roman should be available.
# Hopefully ``C`` courier too.
self.defs = {
'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
'definition_list_item' : ('.TP', ''),
'field_name' : ('.TP\n.B ', '\n'),
'literal' : ('\\fC', '\\fP'),
'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
'option_list_item' : ('.TP\n', ''),
'reference' : (r'\fI\%', r'\fP'),
'emphasis': ('\\fI', '\\fP'),
'strong' : ('\\fB', '\\fP'),
'term' : ('\n.B ', '\n'),
'title_reference' : ('\\fI', '\\fP'),
'topic-title' : ('.SS ', ),
'sidebar-title' : ('.SS ', ),
'problematic' : ('\n.nf\n', '\n.fi\n'),
}
# NOTE dont specify the newline before a dot-command, but ensure
# it is there.
def comment_begin(self, text):
"""Return commented version of the passed text WITHOUT end of
line/comment."""
prefix = '.\\" '
out_text = ''.join(
[(prefix + in_line + '\n')
for in_line in text.split('\n')])
return out_text
def comment(self, text):
"""Return commented version of the passed text."""
return self.comment_begin(text)+'.\n'
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if self.body[-1][-1] != '\n':
self.body.append('\n')
def astext(self):
"""Return the final formatted document as a string."""
if not self.header_written:
# ensure we get a ".TH" as viewers require it.
self.head.append(self.header())
# filter body
for i in xrange(len(self.body)-1,0,-1):
# remove superfluous vertical gaps.
if self.body[i] == '.sp\n':
if self.body[i-1][:4] in ('.BI ','.IP '):
self.body[i] = '.\n'
elif (self.body[i-1][:3] == '.B ' and
self.body[i-2][:4] == '.TP\n'):
self.body[i] = '.\n'
elif (self.body[i-1] == '\n' and
self.body[i-2][0] != '.' and
(self.body[i-3][:7] == '.TP\n.B '
or self.body[i-3][:4] == '\n.B ')
):
self.body[i] = '.\n'
return ''.join(self.head + self.body + self.foot)
def deunicode(self, text):
text = text.replace(u'\xa0', '\\ ')
text = text.replace(u'\u2020', '\\(dg')
return text
def visit_Text(self, node):
text = node.astext()
text = text.replace('\\','\\e')
replace_pairs = [
(u'-', ur'\-'),
(u'\'', ur'\(aq'),
(u'´', ur'\''),
(u'`', ur'\(ga'),
]
for (in_char, out_markup) in replace_pairs:
text = text.replace(in_char, out_markup)
# unicode
text = self.deunicode(text)
if self._in_literal:
# prevent interpretation of "." at line start
if text[0] == '.':
text = '\\&' + text
text = text.replace('\n.', '\n\\&.')
self.body.append(text)
def depart_Text(self, node):
pass
def list_start(self, node):
class enum_char:
enum_style = {
'bullet' : '\\(bu',
'emdash' : '\\(em',
}
def __init__(self, style):
self._style = style
if node.has_key('start'):
self._cnt = node['start'] - 1
else:
self._cnt = 0
self._indent = 2
if style == 'arabic':
# indentation depends on number of childrens
# and start value.
self._indent = len(str(len(node.children)))
self._indent += len(str(self._cnt)) + 1
elif style == 'loweralpha':
self._cnt += ord('a') - 1
self._indent = 3
elif style == 'upperalpha':
self._cnt += ord('A') - 1
self._indent = 3
elif style.endswith('roman'):
self._indent = 5
def next(self):
if self._style == 'bullet':
return self.enum_style[self._style]
elif self._style == 'emdash':
return self.enum_style[self._style]
self._cnt += 1
# TODO add prefix postfix
if self._style == 'arabic':
return "%d." % self._cnt
elif self._style in ('loweralpha', 'upperalpha'):
return "%c." % self._cnt
elif self._style.endswith('roman'):
res = roman.toRoman(self._cnt) + '.'
if self._style.startswith('upper'):
return res.upper()
return res.lower()
else:
return "%d." % self._cnt
def get_width(self):
return self._indent
def __repr__(self):
return 'enum_style-%s' % list(self._style)
if node.has_key('enumtype'):
self._list_char.append(enum_char(node['enumtype']))
else:
self._list_char.append(enum_char('bullet'))
if len(self._list_char) > 1:
# indent nested lists
self.indent(self._list_char[-2].get_width())
else:
self.indent(self._list_char[-1].get_width())
def list_end(self):
self.dedent()
self._list_char.pop()
def header(self):
tmpl = (".TH %(title_upper)s %(manual_section)s"
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def append_header(self):
"""append header with .TH and .SH NAME"""
# NOTE before everything
# .TH title_upper section date source manual
if self.header_written:
return
self.body.append(self.header())
self.body.append(MACRO_DEF)
self.header_written = 1
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
pass
def visit_admonition(self, node, name=None):
if name:
self.body.append('.IP %s\n' %
self.language.labels.get(name, name))
def depart_admonition(self, node):
self.body.append('.RE\n')
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
depart_attention = depart_admonition
def visit_docinfo_item(self, node, name):
if name == 'author':
self._docinfo[name].append(node.astext())
else:
self._docinfo[name] = node.astext()
self._docinfo_keys.append(name)
raise nodes.SkipNode
def depart_docinfo_item(self, node):
pass
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
depart_author = depart_docinfo_item
def visit_authors(self, node):
# _author is called anyway.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
# BUG/HACK: indent alway uses the _last_ indention,
# thus we need two of them.
self.indent(BLOCKQOUTE_INDENT)
self.indent(0)
def depart_block_quote(self, node):
self.dedent()
self.dedent()
def visit_bullet_list(self, node):
self.list_start(node)
def depart_bullet_list(self, node):
self.list_end()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
depart_caution = depart_admonition
def visit_citation(self, node):
num,text = node.astext().split(None,1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % num)
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
self.body.append('['+node.astext()+']')
raise nodes.SkipNode
def visit_classifier(self, node):
pass
def depart_classifier(self, node):
pass
def visit_colspec(self, node):
self.colspecs.append(node)
def depart_colspec(self, node):
pass
def write_colspecs(self):
self.body.append("%s.\n" % ('L '*len(self.colspecs)))
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
self.body.append(self.comment(node.astext()))
raise nodes.SkipNode
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
depart_contact = depart_docinfo_item
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
depart_danger = depart_admonition
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
pass
def visit_definition_list(self, node):
self.indent(DEFINITION_LIST_INDENT)
def depart_definition_list(self, node):
self.dedent()
def visit_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][0])
def depart_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][1])
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self._in_docinfo = 1
def depart_docinfo(self, node):
self._in_docinfo = None
# NOTE nothing should be written before this
self.append_header()
def visit_doctest_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_doctest_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_document(self, node):
# no blank line between comment and header.
self.body.append(self.comment(self.document_start).rstrip()+'\n')
# writing header is postboned
self.header_written = 0
def depart_document(self, node):
if self._docinfo['author']:
self.body.append('.SH AUTHOR\n%s\n'
% ', '.join(self._docinfo['author']))
skip = ('author', 'copyright', 'date',
'manual_group', 'manual_section',
'subtitle',
'title', 'title_upper', 'version')
for name in self._docinfo_keys:
if name == 'address':
self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
self.language.labels.get(name, name),
self.defs['indent'][0] % 0,
self.defs['indent'][0] % BLOCKQOUTE_INDENT,
self._docinfo[name],
self.defs['indent'][1],
self.defs['indent'][1],
) )
elif not name in skip:
if name in self._docinfo_names:
label = self._docinfo_names[name]
else:
label = self.language.labels.get(name, name)
self.body.append("\n%s: %s\n" % (label, self._docinfo[name]) )
if self._docinfo['copyright']:
self.body.append('.SH COPYRIGHT\n%s\n'
% self._docinfo['copyright'])
self.body.append( self.comment(
'Generated by docutils manpage writer.\n' ) )
def visit_emphasis(self, node):
self.body.append(self.defs['emphasis'][0])
def depart_emphasis(self, node):
self.body.append(self.defs['emphasis'][1])
def visit_entry(self, node):
# a cell in a table row
if 'morerows' in node:
self.document.reporter.warning('"table row spanning" not supported',
base_node=node)
if 'morecols' in node:
self.document.reporter.warning(
'"table cell spanning" not supported', base_node=node)
self.context.append(len(self.body))
def depart_entry(self, node):
start = self.context.pop()
self._active_table.append_cell(self.body[start:])
del self.body[start:]
def visit_enumerated_list(self, node):
self.list_start(node)
def depart_enumerated_list(self, node):
self.list_end()
def visit_error(self, node):
self.visit_admonition(node, 'error')
depart_error = depart_admonition
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_body(self, node):
if self._in_docinfo:
name_normalized = self._field_name.lower().replace(" ","_")
self._docinfo_names[name_normalized] = self._field_name
self.visit_docinfo_item(node, name_normalized)
raise nodes.SkipNode
def depart_field_body(self, node):
pass
def visit_field_list(self, node):
self.indent(FIELD_LIST_INDENT)
def depart_field_list(self, node):
self.dedent()
def visit_field_name(self, node):
if self._in_docinfo:
self._field_name = node.astext()
raise nodes.SkipNode
else:
self.body.append(self.defs['field_name'][0])
def depart_field_name(self, node):
self.body.append(self.defs['field_name'][1])
def visit_figure(self, node):
self.indent(2.5)
self.indent(0)
def depart_figure(self, node):
self.dedent()
self.dedent()
def visit_footer(self, node):
self.document.reporter.warning('"footer" not supported',
base_node=node)
def depart_footer(self, node):
pass
def visit_footnote(self, node):
num,text = node.astext().split(None,1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % self.deunicode(num))
def depart_footnote(self, node):
pass
def footnote_backrefs(self, node):
self.document.reporter.warning('"footnote_backrefs" not supported',
base_node=node)
def visit_footnote_reference(self, node):
self.body.append('['+self.deunicode(node.astext())+']')
raise nodes.SkipNode
def depart_footnote_reference(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
raise NotImplementedError, node.astext()
def depart_header(self, node):
pass
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
depart_hint = depart_admonition
def visit_subscript(self, node):
self.body.append('\\s-2\\d')
def depart_subscript(self, node):
self.body.append('\\u\\s0')
def visit_superscript(self, node):
self.body.append('\\s-2\\u')
def depart_superscript(self, node):
self.body.append('\\d\\s0')
def visit_attribution(self, node):
self.body.append('\\(em ')
def depart_attribution(self, node):
self.body.append('\n')
def visit_image(self, node):
self.document.reporter.warning('"image" not supported',
base_node=node)
text = []
if 'alt' in node.attributes:
text.append(node.attributes['alt'])
if 'uri' in node.attributes:
text.append(node.attributes['uri'])
self.body.append('[image: %s]\n' % ('/'.join(text)))
raise nodes.SkipNode
def visit_important(self, node):
self.visit_admonition(node, 'important')
depart_important = depart_admonition
def visit_label(self, node):
# footnote and citation
if (isinstance(node.parent, nodes.footnote)
or isinstance(node.parent, nodes.citation)):
raise nodes.SkipNode
self.document.reporter.warning('"unsupported "label"',
base_node=node)
self.body.append('[')
def depart_label(self, node):
self.body.append(']\n')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# WHAT should we use .INDENT, .UNINDENT ?
def visit_line_block(self, node):
self._line_block += 1
if self._line_block == 1:
self.body.append('.nf\n')
else:
self.body.append('.in +2\n')
def depart_line_block(self, node):
self._line_block -= 1
if self._line_block == 0:
self.body.append('.fi\n')
self.body.append('.sp\n')
else:
self.body.append('.in -2\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.body.append('\n')
def visit_list_item(self, node):
# man 7 man argues to use ".IP" instead of ".TP"
self.body.append('.IP %s %d\n' % (
self._list_char[-1].next(),
self._list_char[-1].get_width(),) )
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.body.append(self.defs['literal'][0])
def depart_literal(self, node):
self.body.append(self.defs['literal'][1])
def visit_literal_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_literal_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_meta(self, node):
raise NotImplementedError, node.astext()
def depart_meta(self, node):
pass
def visit_note(self, node):
self.visit_admonition(node, 'note')
depart_note = depart_admonition
def indent(self, by=0.5):
# if we are in a section ".SH" there already is a .RS
step = self._indent[-1]
self._indent.append(by)
self.body.append(self.defs['indent'][0] % step)
def dedent(self):
self._indent.pop()
self.body.append(self.defs['indent'][1])
def visit_option_list(self, node):
self.indent(OPTION_LIST_INDENT)
def depart_option_list(self, node):
self.dedent()
def visit_option_list_item(self, node):
# one item of the list
self.body.append(self.defs['option_list_item'][0])
def depart_option_list_item(self, node):
self.body.append(self.defs['option_list_item'][1])
def visit_option_group(self, node):
# as one option could have several forms it is a group
# options without parameter bold only, .B, -v
# options with parameter bold italic, .BI, -f file
#
# we do not know if .B or .BI
self.context.append('.B') # blind guess
self.context.append(len(self.body)) # to be able to insert later
self.context.append(0) # option counter
def depart_option_group(self, node):
self.context.pop() # the counter
start_position = self.context.pop()
text = self.body[start_position:]
del self.body[start_position:]
self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
def visit_option(self, node):
# each form of the option will be presented separately
if self.context[-1]>0:
self.body.append(', ')
if self.context[-3] == '.BI':
self.body.append('\\')
self.body.append(' ')
def depart_option(self, node):
self.context[-1] += 1
def visit_option_string(self, node):
# do not know if .B or .BI
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.context[-3] = '.BI' # bold/italic alternate
if node['delimiter'] != ' ':
self.body.append('\\fB%s ' % node['delimiter'] )
elif self.body[len(self.body)-1].endswith('='):
# a blank only means no blank in output, just changing font
self.body.append(' ')
else:
# blank backslash blank, switch font then a blank
self.body.append(' \\ ')
def depart_option_argument(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
pass
def visit_paragraph(self, node):
# ``.PP`` : Start standard indented paragraph.
# ``.LP`` : Start block paragraph, all except the first.
# ``.P [type]`` : Start paragraph type.
# NOTE dont use paragraph starts because they reset indentation.
# ``.sp`` is only vertical space
self.ensure_eol()
self.body.append('.sp\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_problematic(self, node):
self.body.append(self.defs['problematic'][0])
def depart_problematic(self, node):
self.body.append(self.defs['problematic'][1])
def visit_raw(self, node):
if node.get('format') == 'manpage':
self.body.append(node.astext() + "\n")
# Keep non-manpage raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
"""E.g. link or email address."""
self.body.append(self.defs['reference'][0])
def depart_reference(self, node):
self.body.append(self.defs['reference'][1])
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
depart_revision = depart_docinfo_item
def visit_row(self, node):
self._active_table.new_row()
def depart_row(self, node):
pass
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
depart_status = depart_docinfo_item
def visit_strong(self, node):
self.body.append(self.defs['strong'][0])
def depart_strong(self, node):
self.body.append(self.defs['strong'][1])
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.document.reporter.warning('"substitution_reference" not supported',
base_node=node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['strong'][0])
elif isinstance(node.parent, nodes.document):
self.visit_docinfo_item(node, 'subtitle')
elif isinstance(node.parent, nodes.section):
self.body.append(self.defs['strong'][0])
def depart_subtitle(self, node):
# document subtitle calls SkipNode
self.body.append(self.defs['strong'][1]+'\n.PP\n')
def visit_system_message(self, node):
# TODO add report_level
#if node['level'] < self.document.reporter['writer'].report_level:
# Level is too low to display:
# raise nodes.SkipNode
attr = {}
backref_text = ''
if node.hasattr('id'):
attr['name'] = node['id']
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
% (node['type'], node['level'], node['source'], line))
def depart_system_message(self, node):
pass
def visit_table(self, node):
self._active_table = Table()
def depart_table(self, node):
self.ensure_eol()
self.body.extend(self._active_table.as_list())
self._active_table = None
def visit_target(self, node):
# targets are in-document hyper targets, without any use for man-pages.
raise nodes.SkipNode
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append(self.defs['term'][0])
def depart_term(self, node):
self.body.append(self.defs['term'][1])
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
# MAYBE double line '='
pass
def depart_thead(self, node):
# MAYBE double line '='
pass
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
depart_tip = depart_admonition
def visit_title(self, node):
if isinstance(node.parent, nodes.topic):
self.body.append(self.defs['topic-title'][0])
elif isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['sidebar-title'][0])
elif isinstance(node.parent, nodes.admonition):
self.body.append('.IP "')
elif self.section_level == 0:
self._docinfo['title'] = node.astext()
# document title for .TH
self._docinfo['title_upper'] = node.astext().upper()
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH ')
else:
self.body.append('.SS ')
def depart_title(self, node):
if isinstance(node.parent, nodes.admonition):
self.body.append('"')
self.body.append('\n')
def visit_title_reference(self, node):
"""inline citation reference"""
self.body.append(self.defs['title_reference'][0])
def depart_title_reference(self, node):
self.body.append(self.defs['title_reference'][1])
def visit_topic(self, node):
pass
def depart_topic(self, node):
pass
def visit_sidebar(self, node):
pass
def depart_sidebar(self, node):
pass
def visit_rubric(self, node):
pass
def depart_rubric(self, node):
pass
def visit_transition(self, node):
# .PP Begin a new paragraph and reset prevailing indent.
# .sp N leaves N lines of blank space.
# .ce centers the next line
self.body.append('\n.sp\n.ce\n----\n')
def depart_transition(self, node):
self.body.append('\n.ce 0\n.sp\n')
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
depart_warning = depart_admonition
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# vim: set fileencoding=utf-8 et ts=4 ai :
| Python |
# $Id: __init__.py 5889 2009-04-01 20:00:21Z gbrandl $
# Authors: Chris Liechti <cliechti@gmx.net>;
# David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
S5/HTML Slideshow Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import docutils
from docutils import frontend, nodes, utils
from docutils.writers import html4css1
from docutils.parsers.rst import directives
from docutils._compat import b
themes_dir_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), 'themes'))
def find_theme(name):
# Where else to look for a theme?
# Check working dir? Destination dir? Config dir? Plugins dir?
path = os.path.join(themes_dir_path, name)
if not os.path.isdir(path):
raise docutils.ApplicationError(
'Theme directory not found: %r (path: %r)' % (name, path))
return path
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'S5 Slideshow Specific Options',
'For the S5/HTML writer, the --no-toc-backlinks option '
'(defined in General Docutils Options above) is the default, '
'and should not be changed.',
(('Specify an installed S5 theme by name. Overrides --theme-url. '
'The default theme name is "default". The theme files will be '
'copied into a "ui/<theme>" directory, in the same directory as the '
'destination file (output HTML). Note that existing theme files '
'will not be overwritten (unless --overwrite-theme-files is used).',
['--theme'],
{'default': 'default', 'metavar': '<name>',
'overrides': 'theme_url'}),
('Specify an S5 theme URL. The destination file (output HTML) will '
'link to this theme; nothing will be copied. Overrides --theme.',
['--theme-url'],
{'metavar': '<URL>', 'overrides': 'theme'}),
('Allow existing theme files in the ``ui/<theme>`` directory to be '
'overwritten. The default is not to overwrite theme files.',
['--overwrite-theme-files'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Keep existing theme files in the ``ui/<theme>`` directory; do not '
'overwrite any. This is the default.',
['--keep-theme-files'],
{'dest': 'overwrite_theme_files', 'action': 'store_false'}),
('Set the initial view mode to "slideshow" [default] or "outline".',
['--view-mode'],
{'choices': ['slideshow', 'outline'], 'default': 'slideshow',
'metavar': '<mode>'}),
('Normally hide the presentation controls in slideshow mode. '
'This is the default.',
['--hidden-controls'],
{'action': 'store_true', 'default': True,
'validator': frontend.validate_boolean}),
('Always show the presentation controls in slideshow mode. '
'The default is to hide the controls.',
['--visible-controls'],
{'dest': 'hidden_controls', 'action': 'store_false'}),
('Enable the current slide indicator ("1 / 15"). '
'The default is to disable it.',
['--current-slide'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Disable the current slide indicator. This is the default.',
['--no-current-slide'],
{'dest': 'current_slide', 'action': 'store_false'}),))
settings_default_overrides = {'toc_backlinks': 0}
config_section = 's5_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = S5HTMLTranslator
class S5HTMLTranslator(html4css1.HTMLTranslator):
s5_stylesheet_template = """\
<!-- configuration parameters -->
<meta name="defaultView" content="%(view_mode)s" />
<meta name="controlVis" content="%(control_visibility)s" />
<!-- style sheet links -->
<script src="%(path)s/slides.js" type="text/javascript"></script>
<link rel="stylesheet" href="%(path)s/slides.css"
type="text/css" media="projection" id="slideProj" />
<link rel="stylesheet" href="%(path)s/outline.css"
type="text/css" media="screen" id="outlineStyle" />
<link rel="stylesheet" href="%(path)s/print.css"
type="text/css" media="print" id="slidePrint" />
<link rel="stylesheet" href="%(path)s/opera.css"
type="text/css" media="projection" id="operaFix" />\n"""
# The script element must go in front of the link elements to
# avoid a flash of unstyled content (FOUC), reproducible with
# Firefox.
disable_current_slide = """
<style type="text/css">
#currentSlide {display: none;}
</style>\n"""
layout_template = """\
<div class="layout">
<div id="controls"></div>
<div id="currentSlide"></div>
<div id="header">
%(header)s
</div>
<div id="footer">
%(title)s%(footer)s
</div>
</div>\n"""
# <div class="topleft"></div>
# <div class="topright"></div>
# <div class="bottomleft"></div>
# <div class="bottomright"></div>
default_theme = 'default'
"""Name of the default theme."""
base_theme_file = '__base__'
"""Name of the file containing the name of the base theme."""
direct_theme_files = (
'slides.css', 'outline.css', 'print.css', 'opera.css', 'slides.js')
"""Names of theme files directly linked to in the output HTML"""
indirect_theme_files = (
's5-core.css', 'framing.css', 'pretty.css', 'blank.gif', 'iepngfix.htc')
"""Names of files used indirectly; imported or used by files in
`direct_theme_files`."""
required_theme_files = indirect_theme_files + direct_theme_files
"""Names of mandatory theme files."""
def __init__(self, *args):
html4css1.HTMLTranslator.__init__(self, *args)
#insert S5-specific stylesheet and script stuff:
self.theme_file_path = None
self.setup_theme()
view_mode = self.document.settings.view_mode
control_visibility = ('visible', 'hidden')[self.document.settings
.hidden_controls]
self.stylesheet.append(self.s5_stylesheet_template
% {'path': self.theme_file_path,
'view_mode': view_mode,
'control_visibility': control_visibility})
if not self.document.settings.current_slide:
self.stylesheet.append(self.disable_current_slide)
self.add_meta('<meta name="version" content="S5 1.1" />\n')
self.s5_footer = []
self.s5_header = []
self.section_count = 0
self.theme_files_copied = None
def setup_theme(self):
if self.document.settings.theme:
self.copy_theme()
elif self.document.settings.theme_url:
self.theme_file_path = self.document.settings.theme_url
else:
raise docutils.ApplicationError(
'No theme specified for S5/HTML writer.')
def copy_theme(self):
"""
Locate & copy theme files.
A theme may be explicitly based on another theme via a '__base__'
file. The default base theme is 'default'. Files are accumulated
from the specified theme, any base themes, and 'default'.
"""
settings = self.document.settings
path = find_theme(settings.theme)
theme_paths = [path]
self.theme_files_copied = {}
required_files_copied = {}
# This is a link (URL) in HTML, so we use "/", not os.sep:
self.theme_file_path = '%s/%s' % ('ui', settings.theme)
if settings._destination:
dest = os.path.join(
os.path.dirname(settings._destination), 'ui', settings.theme)
if not os.path.isdir(dest):
os.makedirs(dest)
else:
# no destination, so we can't copy the theme
return
default = 0
while path:
for f in os.listdir(path): # copy all files from each theme
if f == self.base_theme_file:
continue # ... except the "__base__" file
if ( self.copy_file(f, path, dest)
and f in self.required_theme_files):
required_files_copied[f] = 1
if default:
break # "default" theme has no base theme
# Find the "__base__" file in theme directory:
base_theme_file = os.path.join(path, self.base_theme_file)
# If it exists, read it and record the theme path:
if os.path.isfile(base_theme_file):
lines = open(base_theme_file).readlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
path = find_theme(line)
if path in theme_paths: # check for duplicates (cycles)
path = None # if found, use default base
else:
theme_paths.append(path)
break
else: # no theme name found
path = None # use default base
else: # no base theme file found
path = None # use default base
if not path:
path = find_theme(self.default_theme)
theme_paths.append(path)
default = 1
if len(required_files_copied) != len(self.required_theme_files):
# Some required files weren't found & couldn't be copied.
required = list(self.required_theme_files)
for f in required_files_copied.keys():
required.remove(f)
raise docutils.ApplicationError(
'Theme files not found: %s'
% ', '.join(['%r' % f for f in required]))
files_to_skip_pattern = re.compile(r'~$|\.bak$|#$|\.cvsignore$')
def copy_file(self, name, source_dir, dest_dir):
"""
Copy file `name` from `source_dir` to `dest_dir`.
Return 1 if the file exists in either `source_dir` or `dest_dir`.
"""
source = os.path.join(source_dir, name)
dest = os.path.join(dest_dir, name)
if dest in self.theme_files_copied:
return 1
else:
self.theme_files_copied[dest] = 1
if os.path.isfile(source):
if self.files_to_skip_pattern.search(source):
return None
settings = self.document.settings
if os.path.exists(dest) and not settings.overwrite_theme_files:
settings.record_dependencies.add(dest)
else:
src_file = open(source, 'rb')
src_data = src_file.read()
src_file.close()
dest_file = open(dest, 'wb')
dest_dir = dest_dir.replace(os.sep, '/')
dest_file.write(src_data.replace(
b('ui/default'),
dest_dir[dest_dir.rfind('ui/'):].encode(
sys.getfilesystemencoding())))
dest_file.close()
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
return 1
def depart_document(self, node):
header = ''.join(self.s5_header)
footer = ''.join(self.s5_footer)
title = ''.join(self.html_title).replace('<h1 class="title">', '<h1>')
layout = self.layout_template % {'header': header,
'title': title,
'footer': footer}
self.fragment.extend(self.body)
self.body_prefix.extend(layout)
self.body_prefix.append('<div class="presentation">\n')
self.body_prefix.append(
self.starttag({'classes': ['slide'], 'ids': ['slide0']}, 'div'))
if not self.section_count:
self.body.append('</div>\n')
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
def depart_footer(self, node):
start = self.context.pop()
self.s5_footer.append('<h2>')
self.s5_footer.extend(self.body[start:])
self.s5_footer.append('</h2>')
del self.body[start:]
def depart_header(self, node):
start = self.context.pop()
header = ['<div id="header">\n']
header.extend(self.body[start:])
header.append('\n</div>\n')
del self.body[start:]
self.s5_header.extend(header)
def visit_section(self, node):
if not self.section_count:
self.body.append('\n</div>\n')
self.section_count += 1
self.section_level += 1
if self.section_level > 1:
# dummy for matching div's
self.body.append(self.starttag(node, 'div', CLASS='section'))
else:
self.body.append(self.starttag(node, 'div', CLASS='slide'))
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.section):
level = self.section_level + self.initial_header_level - 1
if level == 1:
level = 2
tag = 'h%s' % level
self.body.append(self.starttag(node, tag, ''))
self.context.append('</%s>\n' % tag)
else:
html4css1.HTMLTranslator.visit_subtitle(self, node)
def visit_title(self, node):
html4css1.HTMLTranslator.visit_title(self, node)
| Python |
# $Id: pygmentsformatter.py 5853 2009-01-19 21:02:02Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
| Python |
# $Id: __init__.py 6034 2009-07-20 18:46:51Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Open Document Format (ODF) Writer.
"""
VERSION = '1.0a'
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import tempfile
import zipfile
from xml.dom import minidom
import time
import re
import StringIO
import inspect
import imp
import copy
import docutils
from docutils import frontend, nodes, utils, writers, languages
from docutils.parsers import rst
from docutils.readers import standalone
from docutils.transforms import references
WhichElementTree = ''
try:
# 1. Try to use lxml.
#from lxml import etree
#WhichElementTree = 'lxml'
raise ImportError('Ignoring lxml')
except ImportError, e:
try:
# 2. Try to use ElementTree from the Python standard library.
from xml.etree import ElementTree as etree
WhichElementTree = 'elementtree'
except ImportError, e:
try:
# 3. Try to use a version of ElementTree installed as a separate
# product.
from elementtree import ElementTree as etree
WhichElementTree = 'elementtree'
except ImportError, e:
s1 = 'Must install either a version of Python containing ' \
'ElementTree (Python version >=2.5) or install ElementTree.'
raise ImportError(s1)
#
# Import pygments and odtwriter pygments formatters if possible.
try:
import pygments
import pygments.lexers
from pygmentsformatter import OdtPygmentsProgFormatter, \
OdtPygmentsLaTeXFormatter
except ImportError, exp:
pygments = None
#
# Is the PIL imaging library installed?
try:
import Image
except ImportError, exp:
Image = None
## import warnings
## warnings.warn('importing IPShellEmbed', UserWarning)
## from IPython.Shell import IPShellEmbed
## args = ['-pdb', '-pi1', 'In <\\#>: ', '-pi2', ' .\\D.: ',
## '-po', 'Out<\\#>: ', '-nosep']
## ipshell = IPShellEmbed(args,
## banner = 'Entering IPython. Press Ctrl-D to exit.',
## exit_msg = 'Leaving Interpreter, back to program.')
#
# ElementTree does not support getparent method (lxml does).
# This wrapper class and the following support functions provide
# that support for the ability to get the parent of an element.
#
if WhichElementTree == 'elementtree':
class _ElementInterfaceWrapper(etree._ElementInterface):
def __init__(self, tag, attrib=None):
etree._ElementInterface.__init__(self, tag, attrib)
if attrib is None:
attrib = {}
self.parent = None
def setparent(self, parent):
self.parent = parent
def getparent(self):
return self.parent
#
# Constants and globals
SPACES_PATTERN = re.compile(r'( +)')
TABS_PATTERN = re.compile(r'(\t+)')
FILL_PAT1 = re.compile(r'^ +')
FILL_PAT2 = re.compile(r' {2,}')
TableStylePrefix = 'Table'
GENERATOR_DESC = 'Docutils.org/odf_odt'
NAME_SPACE_1 = 'urn:oasis:names:tc:opendocument:xmlns:office:1.0'
CONTENT_NAMESPACE_DICT = CNSD = {
# 'office:version': '1.0',
'chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'dom': 'http://www.w3.org/2001/xml-events',
'dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'math': 'http://www.w3.org/1998/Math/MathML',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'office': NAME_SPACE_1,
'ooo': 'http://openoffice.org/2004/office',
'oooc': 'http://openoffice.org/2004/calc',
'ooow': 'http://openoffice.org/2004/writer',
'presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xforms': 'http://www.w3.org/2002/xforms',
'xlink': 'http://www.w3.org/1999/xlink',
'xsd': 'http://www.w3.org/2001/XMLSchema',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
STYLES_NAMESPACE_DICT = SNSD = {
# 'office:version': '1.0',
'chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'dom': 'http://www.w3.org/2001/xml-events',
'dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'math': 'http://www.w3.org/1998/Math/MathML',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'office': NAME_SPACE_1,
'presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'ooo': 'http://openoffice.org/2004/office',
'oooc': 'http://openoffice.org/2004/calc',
'ooow': 'http://openoffice.org/2004/writer',
'script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xlink': 'http://www.w3.org/1999/xlink',
}
MANIFEST_NAMESPACE_DICT = MANNSD = {
'manifest': 'urn:oasis:names:tc:opendocument:xmlns:manifest:1.0',
}
META_NAMESPACE_DICT = METNSD = {
# 'office:version': '1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'office': NAME_SPACE_1,
'ooo': 'http://openoffice.org/2004/office',
'xlink': 'http://www.w3.org/1999/xlink',
}
#
# Attribute dictionaries for use with ElementTree (not lxml), which
# does not support use of nsmap parameter on Element() and SubElement().
CONTENT_NAMESPACE_ATTRIB = {
'office:version': '1.0',
'xmlns:chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:dom': 'http://www.w3.org/2001/xml-events',
'xmlns:dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'xmlns:draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xmlns:fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'xmlns:form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'xmlns:math': 'http://www.w3.org/1998/Math/MathML',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:oooc': 'http://openoffice.org/2004/calc',
'xmlns:ooow': 'http://openoffice.org/2004/writer',
'xmlns:script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'xmlns:style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'xmlns:svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'xmlns:table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'xmlns:text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xmlns:xforms': 'http://www.w3.org/2002/xforms',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
STYLES_NAMESPACE_ATTRIB = {
'office:version': '1.0',
'xmlns:chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:dom': 'http://www.w3.org/2001/xml-events',
'xmlns:dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'xmlns:draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xmlns:fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'xmlns:form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'xmlns:math': 'http://www.w3.org/1998/Math/MathML',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:oooc': 'http://openoffice.org/2004/calc',
'xmlns:ooow': 'http://openoffice.org/2004/writer',
'xmlns:script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'xmlns:style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'xmlns:svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'xmlns:table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'xmlns:text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
}
MANIFEST_NAMESPACE_ATTRIB = {
'xmlns:manifest': 'urn:oasis:names:tc:opendocument:xmlns:manifest:1.0',
}
META_NAMESPACE_ATTRIB = {
'office:version': '1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
}
#
# Functions
#
#
# ElementTree support functions.
# In order to be able to get the parent of elements, must use these
# instead of the functions with same name provided by ElementTree.
#
def Element(tag, attrib=None, nsmap=None, nsdict=CNSD):
if attrib is None:
attrib = {}
tag, attrib = fix_ns(tag, attrib, nsdict)
if WhichElementTree == 'lxml':
el = etree.Element(tag, attrib, nsmap=nsmap)
else:
el = _ElementInterfaceWrapper(tag, attrib)
return el
def SubElement(parent, tag, attrib=None, nsmap=None, nsdict=CNSD):
if attrib is None:
attrib = {}
tag, attrib = fix_ns(tag, attrib, nsdict)
if WhichElementTree == 'lxml':
el = etree.SubElement(parent, tag, attrib, nsmap=nsmap)
else:
el = _ElementInterfaceWrapper(tag, attrib)
parent.append(el)
el.setparent(parent)
return el
def fix_ns(tag, attrib, nsdict):
nstag = add_ns(tag, nsdict)
nsattrib = {}
for key, val in attrib.iteritems():
nskey = add_ns(key, nsdict)
nsattrib[nskey] = val
return nstag, nsattrib
def add_ns(tag, nsdict=CNSD):
if WhichElementTree == 'lxml':
nstag, name = tag.split(':')
ns = nsdict.get(nstag)
if ns is None:
raise RuntimeError, 'Invalid namespace prefix: %s' % nstag
tag = '{%s}%s' % (ns, name,)
return tag
def ToString(et):
outstream = StringIO.StringIO()
et.write(outstream)
s1 = outstream.getvalue()
outstream.close()
return s1
def escape_cdata(text):
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
ascii = ''
for char in text:
if ord(char) >= ord("\x7f"):
ascii += "&#x%X;" % ( ord(char), )
else:
ascii += char
return ascii
#
# Classes
#
WORD_SPLIT_PAT1 = re.compile(r'\b(\w*)\b\W*')
def split_words(line):
# We need whitespace at the end of the string for our regexpr.
line += ' '
words = []
pos1 = 0
mo = WORD_SPLIT_PAT1.search(line, pos1)
while mo is not None:
word = mo.groups()[0]
words.append(word)
pos1 = mo.end()
mo = WORD_SPLIT_PAT1.search(line, pos1)
return words
#
# Information about the indentation level for lists nested inside
# other contexts, e.g. dictionary lists.
class ListLevel(object):
def __init__(self, level, sibling_level=True, nested_level=True):
self.level = level
self.sibling_level = sibling_level
self.nested_level = nested_level
def set_sibling(self, sibling_level): self.sibling_level = sibling_level
def get_sibling(self): return self.sibling_level
def set_nested(self, nested_level): self.nested_level = nested_level
def get_nested(self): return self.nested_level
def set_level(self, level): self.level = level
def get_level(self): return self.level
class Writer(writers.Writer):
MIME_TYPE = 'application/vnd.oasis.opendocument.text'
EXTENSION = '.odt'
supported = ('html', 'html4css1', 'xhtml')
"""Formats this writer supports."""
default_stylesheet = 'styles' + EXTENSION
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'ODF-Specific Options',
None,
(
('Specify a stylesheet. '
'Default: "%s"' % default_stylesheet_path,
['--stylesheet'],
{
'default': default_stylesheet_path,
'dest': 'stylesheet'
}),
('Specify a configuration/mapping file relative to the '
'current working '
'directory for additional ODF options. '
'In particular, this file may contain a section named '
'"Formats" that maps default style names to '
'names to be used in the resulting output file allowing for '
'adhering to external standards. '
'For more info and the format of the configuration/mapping file, '
'see the odtwriter doc.',
['--odf-config-file'],
{'metavar': '<file>'}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'default': False,
'action': 'store_true',
'dest': 'cloak_email_addresses',
'validator': frontend.validate_boolean}),
('Do not obfuscate email addresses.',
['--no-cloak-email-addresses'],
{'default': False,
'action': 'store_false',
'dest': 'cloak_email_addresses',
'validator': frontend.validate_boolean}),
('Specify the thickness of table borders in thousands of a cm. '
'Default is 35.',
['--table-border-thickness'],
{'default': 35,
'validator': frontend.validate_nonnegative_int}),
('Add syntax highlighting in literal code blocks.',
['--add-syntax-highlighting'],
{'default': False,
'action': 'store_true',
'dest': 'add_syntax_highlighting',
'validator': frontend.validate_boolean}),
('Do not add syntax highlighting in literal code blocks. (default)',
['--no-syntax-highlighting'],
{'default': False,
'action': 'store_false',
'dest': 'add_syntax_highlighting',
'validator': frontend.validate_boolean}),
('Create sections for headers. (default)',
['--create-sections'],
{'default': True,
'action': 'store_true',
'dest': 'create_sections',
'validator': frontend.validate_boolean}),
('Do not create sections for headers.',
['--no-sections'],
{'default': True,
'action': 'store_false',
'dest': 'create_sections',
'validator': frontend.validate_boolean}),
('Create links.',
['--create-links'],
{'default': False,
'action': 'store_true',
'dest': 'create_links',
'validator': frontend.validate_boolean}),
('Do not create links. (default)',
['--no-links'],
{'default': False,
'action': 'store_false',
'dest': 'create_links',
'validator': frontend.validate_boolean}),
('Generate endnotes at end of document, not footnotes '
'at bottom of page.',
['--endnotes-end-doc'],
{'default': False,
'action': 'store_true',
'dest': 'endnotes_end_doc',
'validator': frontend.validate_boolean}),
('Generate footnotes at bottom of page, not endnotes '
'at end of document. (default)',
['--no-endnotes-end-doc'],
{'default': False,
'action': 'store_false',
'dest': 'endnotes_end_doc',
'validator': frontend.validate_boolean}),
('Generate a bullet list table of contents, not '
'an ODF/oowriter table of contents.',
['--generate-list-toc'],
{'default': True,
'action': 'store_false',
'dest': 'generate_oowriter_toc',
'validator': frontend.validate_boolean}),
('Generate an ODF/oowriter table of contents, not '
'a bullet list. (default)',
['--generate-oowriter-toc'],
{'default': True,
'action': 'store_true',
'dest': 'generate_oowriter_toc',
'validator': frontend.validate_boolean}),
)
)
settings_defaults = {
'output_encoding_error_handler': 'xmlcharrefreplace',
}
relative_path_settings = (
'stylesheet_path',
)
config_section = 'opendocument odf writer'
config_section_dependencies = (
'writers',
)
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = ODFTranslator
def translate(self):
self.settings = self.document.settings
self.visitor = self.translator_class(self.document)
self.document.walkabout(self.visitor)
self.visitor.add_doc_title()
self.assemble_my_parts()
self.output = self.parts['whole']
def assemble_my_parts(self):
"""Assemble the `self.parts` dictionary. Extend in subclasses.
"""
writers.Writer.assemble_parts(self)
f = tempfile.NamedTemporaryFile()
zfile = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED)
content = self.visitor.content_astext()
self.write_zip_str(zfile, 'content.xml', content)
self.write_zip_str(zfile, 'mimetype', self.MIME_TYPE)
s1 = self.create_manifest()
self.write_zip_str(zfile, 'META-INF/manifest.xml', s1)
s1 = self.create_meta()
self.write_zip_str(zfile, 'meta.xml', s1)
s1 = self.get_stylesheet()
self.write_zip_str(zfile, 'styles.xml', s1)
s1 = self.get_settings()
self.write_zip_str(zfile, 'settings.xml', s1)
self.store_embedded_files(zfile)
zfile.close()
f.seek(0)
whole = f.read()
f.close()
self.parts['whole'] = whole
self.parts['encoding'] = self.document.settings.output_encoding
self.parts['version'] = docutils.__version__
def write_zip_str(self, zfile, name, bytes):
localtime = time.localtime(time.time())
zinfo = zipfile.ZipInfo(name, localtime)
# Add some standard UNIX file access permissions (-rw-r--r--).
zinfo.external_attr = (0x81a4 & 0xFFFF) << 16L
zinfo.compress_type = zipfile.ZIP_DEFLATED
zfile.writestr(zinfo, bytes)
def store_embedded_files(self, zfile):
embedded_files = self.visitor.get_embedded_file_list()
for source, destination in embedded_files:
if source is None:
continue
try:
# encode/decode
destination1 = destination.decode('latin-1').encode('utf-8')
zfile.write(source, destination1, zipfile.ZIP_STORED)
except OSError, e:
self.document.reporter.warning(
"Can't open file %s." % (source, ))
def get_settings(self):
"""
modeled after get_stylesheet
"""
stylespath = self.settings.stylesheet
zfile = zipfile.ZipFile(stylespath, 'r')
s1 = zfile.read('settings.xml')
zfile.close()
return s1
def get_stylesheet(self):
"""Retrieve the stylesheet from either a .xml file or from
a .odt (zip) file. Return the content as a string.
"""
stylespath = self.settings.stylesheet
ext = os.path.splitext(stylespath)[1]
if ext == '.xml':
stylesfile = open(stylespath, 'r')
s1 = stylesfile.read()
stylesfile.close()
elif ext == self.EXTENSION:
zfile = zipfile.ZipFile(stylespath, 'r')
s1 = zfile.read('styles.xml')
zfile.close()
else:
raise RuntimeError, 'stylesheet path (%s) must be %s or .xml file' %(stylespath, self.EXTENSION)
s1 = self.visitor.setup_page(s1)
return s1
def assemble_parts(self):
pass
def create_manifest(self):
if WhichElementTree == 'lxml':
root = Element('manifest:manifest',
nsmap=MANIFEST_NAMESPACE_DICT,
nsdict=MANIFEST_NAMESPACE_DICT,
)
else:
root = Element('manifest:manifest',
attrib=MANIFEST_NAMESPACE_ATTRIB,
nsdict=MANIFEST_NAMESPACE_DICT,
)
doc = etree.ElementTree(root)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': self.MIME_TYPE,
'manifest:full-path': '/',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'content.xml',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'styles.xml',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'meta.xml',
}, nsdict=MANNSD)
s1 = ToString(doc)
doc = minidom.parseString(s1)
s1 = doc.toprettyxml(' ')
return s1
def create_meta(self):
if WhichElementTree == 'lxml':
root = Element('office:document-meta',
nsmap=META_NAMESPACE_DICT,
nsdict=META_NAMESPACE_DICT,
)
else:
root = Element('office:document-meta',
attrib=META_NAMESPACE_ATTRIB,
nsdict=META_NAMESPACE_DICT,
)
doc = etree.ElementTree(root)
root = SubElement(root, 'office:meta', nsdict=METNSD)
el1 = SubElement(root, 'meta:generator', nsdict=METNSD)
el1.text = 'Docutils/rst2odf.py/%s' % (VERSION, )
s1 = os.environ.get('USER', '')
el1 = SubElement(root, 'meta:initial-creator', nsdict=METNSD)
el1.text = s1
s2 = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime())
el1 = SubElement(root, 'meta:creation-date', nsdict=METNSD)
el1.text = s2
el1 = SubElement(root, 'dc:creator', nsdict=METNSD)
el1.text = s1
el1 = SubElement(root, 'dc:date', nsdict=METNSD)
el1.text = s2
el1 = SubElement(root, 'dc:language', nsdict=METNSD)
el1.text = 'en-US'
el1 = SubElement(root, 'meta:editing-cycles', nsdict=METNSD)
el1.text = '1'
el1 = SubElement(root, 'meta:editing-duration', nsdict=METNSD)
el1.text = 'PT00M01S'
title = self.visitor.get_title()
el1 = SubElement(root, 'dc:title', nsdict=METNSD)
if title:
el1.text = title
else:
el1.text = '[no title]'
meta_dict = self.visitor.get_meta_dict()
keywordstr = meta_dict.get('keywords')
if keywordstr is not None:
keywords = split_words(keywordstr)
for keyword in keywords:
el1 = SubElement(root, 'meta:keyword', nsdict=METNSD)
el1.text = keyword
description = meta_dict.get('description')
if description is not None:
el1 = SubElement(root, 'dc:description', nsdict=METNSD)
el1.text = description
s1 = ToString(doc)
#doc = minidom.parseString(s1)
#s1 = doc.toprettyxml(' ')
return s1
# class ODFTranslator(nodes.SparseNodeVisitor):
class ODFTranslator(nodes.GenericNodeVisitor):
used_styles = (
'attribution', 'blockindent', 'blockquote', 'blockquote-bulletitem',
'blockquote-bulletlist', 'blockquote-enumitem', 'blockquote-enumlist',
'bulletitem', 'bulletlist', 'caption', 'centeredtextbody', 'codeblock',
'codeblock-classname', 'codeblock-comment', 'codeblock-functionname',
'codeblock-keyword', 'codeblock-name', 'codeblock-number',
'codeblock-operator', 'codeblock-string', 'emphasis', 'enumitem',
'enumlist', 'epigraph', 'epigraph-bulletitem', 'epigraph-bulletlist',
'epigraph-enumitem', 'epigraph-enumlist', 'footer',
'footnote', 'citation',
'header', 'highlights', 'highlights-bulletitem',
'highlights-bulletlist', 'highlights-enumitem', 'highlights-enumlist',
'horizontalline', 'inlineliteral', 'quotation', 'rubric',
'strong', 'table-title', 'textbody', 'tocbulletlist', 'tocenumlist',
'title',
'subtitle',
'heading1',
'heading2',
'heading3',
'heading4',
'heading5',
'heading6',
'heading7',
'admon-attention-hdr',
'admon-attention-body',
'admon-caution-hdr',
'admon-caution-body',
'admon-danger-hdr',
'admon-danger-body',
'admon-error-hdr',
'admon-error-body',
'admon-generic-hdr',
'admon-generic-body',
'admon-hint-hdr',
'admon-hint-body',
'admon-important-hdr',
'admon-important-body',
'admon-note-hdr',
'admon-note-body',
'admon-tip-hdr',
'admon-tip-body',
'admon-warning-hdr',
'admon-warning-body',
'tableoption',
'tableoption.%c', 'tableoption.%c%d', 'Table%d', 'Table%d.%c',
'Table%d.%c%d',
'lineblock1',
'lineblock2',
'lineblock3',
'lineblock4',
'lineblock5',
'lineblock6',
)
def __init__(self, document):
#nodes.SparseNodeVisitor.__init__(self, document)
nodes.GenericNodeVisitor.__init__(self, document)
self.settings = document.settings
self.format_map = { }
if self.settings.odf_config_file:
from ConfigParser import ConfigParser
parser = ConfigParser()
parser.read(self.settings.odf_config_file)
for rststyle, format in parser.items("Formats"):
if rststyle not in self.used_styles:
self.document.reporter.warning(
'Style "%s" is not a style used by odtwriter.' % (
rststyle, ))
self.format_map[rststyle] = format
self.section_level = 0
self.section_count = 0
# Create ElementTree content and styles documents.
if WhichElementTree == 'lxml':
root = Element(
'office:document-content',
nsmap=CONTENT_NAMESPACE_DICT,
)
else:
root = Element(
'office:document-content',
attrib=CONTENT_NAMESPACE_ATTRIB,
)
self.content_tree = etree.ElementTree(element=root)
self.current_element = root
SubElement(root, 'office:scripts')
SubElement(root, 'office:font-face-decls')
el = SubElement(root, 'office:automatic-styles')
self.automatic_styles = el
el = SubElement(root, 'office:body')
el = self.generate_content_element(el)
self.current_element = el
self.body_text_element = el
self.paragraph_style_stack = [self.rststyle('textbody'), ]
self.list_style_stack = []
self.table_count = 0
self.column_count = ord('A') - 1
self.trace_level = -1
self.optiontablestyles_generated = False
self.field_name = None
self.field_element = None
self.title = None
self.image_count = 0
self.image_style_count = 0
self.image_dict = {}
self.embedded_file_list = []
self.syntaxhighlighting = 1
self.syntaxhighlight_lexer = 'python'
self.header_content = []
self.footer_content = []
self.in_header = False
self.in_footer = False
self.blockstyle = ''
self.in_table_of_contents = False
self.table_of_content_index_body = None
self.list_level = 0
self.footnote_ref_dict = {}
self.footnote_list = []
self.footnote_chars_idx = 0
self.footnote_level = 0
self.pending_ids = [ ]
self.in_paragraph = False
self.found_doc_title = False
self.bumped_list_level_stack = []
self.meta_dict = {}
self.line_block_level = 0
self.line_indent_level = 0
self.citation_id = None
def add_doc_title(self):
text = self.settings.title
if text:
self.title = text
if not self.found_doc_title:
el = Element('text:p', attrib = {
'text:style-name': self.rststyle('title'),
})
el.text = text
self.body_text_element.insert(0, el)
def rststyle(self, name, parameters=( )):
"""
Returns the style name to use for the given style.
If `parameters` is given `name` must contain a matching number of ``%`` and
is used as a format expression with `parameters` as the value.
"""
name1 = name % parameters
stylename = self.format_map.get(name1, 'rststyle-%s' % name1)
return stylename
def generate_content_element(self, root):
return SubElement(root, 'office:text')
def setup_page(self, content):
root_el = etree.fromstring(content)
self.setup_paper(root_el)
if len(self.header_content) > 0 or len(self.footer_content) > 0:
self.add_header_footer(root_el)
new_content = etree.tostring(root_el)
return new_content
def setup_paper(self, root_el):
try:
fin = os.popen("paperconf -s 2> /dev/null")
w, h = map(float, fin.read().split())
fin.close()
except:
w, h = 612, 792 # default to Letter
def walk(el):
if el.tag == "{%s}page-layout-properties" % SNSD["style"] and \
not el.attrib.has_key("{%s}page-width" % SNSD["fo"]):
el.attrib["{%s}page-width" % SNSD["fo"]] = "%.3fpt" % w
el.attrib["{%s}page-height" % SNSD["fo"]] = "%.3fpt" % h
el.attrib["{%s}margin-left" % SNSD["fo"]] = \
el.attrib["{%s}margin-right" % SNSD["fo"]] = \
"%.3fpt" % (.1 * w)
el.attrib["{%s}margin-top" % SNSD["fo"]] = \
el.attrib["{%s}margin-bottom" % SNSD["fo"]] = \
"%.3fpt" % (.1 * h)
else:
for subel in el.getchildren(): walk(subel)
walk(root_el)
def add_header_footer(self, root_el):
path = '{%s}master-styles' % (NAME_SPACE_1, )
master_el = root_el.find(path)
if master_el is None:
return
path = '{%s}master-page' % (SNSD['style'], )
master_el = master_el.find(path)
if master_el is None:
return
el1 = master_el
if len(self.header_content) > 0:
if WhichElementTree == 'lxml':
el2 = SubElement(el1, 'style:header', nsdict=SNSD)
else:
el2 = SubElement(el1, 'style:header',
attrib=STYLES_NAMESPACE_ATTRIB,
nsdict=STYLES_NAMESPACE_DICT,
)
for el in self.header_content:
attrkey = add_ns('text:style-name', nsdict=SNSD)
el.attrib[attrkey] = self.rststyle('header')
el2.append(el)
if len(self.footer_content) > 0:
if WhichElementTree == 'lxml':
el2 = SubElement(el1, 'style:footer', nsdict=SNSD)
else:
el2 = SubElement(el1, 'style:footer',
attrib=STYLES_NAMESPACE_ATTRIB,
nsdict=STYLES_NAMESPACE_DICT,
)
for el in self.footer_content:
attrkey = add_ns('text:style-name', nsdict=SNSD)
el.attrib[attrkey] = self.rststyle('footer')
el2.append(el)
def astext(self):
root = self.content_tree.getroot()
et = etree.ElementTree(root)
s1 = ToString(et)
return s1
def content_astext(self):
return self.astext()
def set_title(self, title): self.title = title
def get_title(self): return self.title
def set_embedded_file_list(self, embedded_file_list):
self.embedded_file_list = embedded_file_list
def get_embedded_file_list(self): return self.embedded_file_list
def get_meta_dict(self): return self.meta_dict
def process_footnotes(self):
for node, el1 in self.footnote_list:
backrefs = node.attributes.get('backrefs', [])
first = True
for ref in backrefs:
el2 = self.footnote_ref_dict.get(ref)
if el2 is not None:
if first:
first = False
el3 = copy.deepcopy(el1)
el2.append(el3)
else:
children = el2.getchildren()
if len(children) > 0: # and 'id' in el2.attrib:
child = children[0]
ref1 = child.text
attribkey = add_ns('text:id', nsdict=SNSD)
id1 = el2.get(attribkey, 'footnote-error')
if id1 is None:
id1 = ''
tag = add_ns('text:note-ref', nsdict=SNSD)
el2.tag = tag
if self.settings.endnotes_end_doc:
note_class = 'endnote'
else:
note_class = 'footnote'
el2.attrib.clear()
attribkey = add_ns('text:note-class', nsdict=SNSD)
el2.attrib[attribkey] = note_class
attribkey = add_ns('text:ref-name', nsdict=SNSD)
el2.attrib[attribkey] = id1
attribkey = add_ns('text:reference-format', nsdict=SNSD)
el2.attrib[attribkey] = 'page'
el2.text = ref1
#
# Utility methods
def append_child(self, tag, attrib=None, parent=None):
if parent is None:
parent = self.current_element
if attrib is None:
el = SubElement(parent, tag)
else:
el = SubElement(parent, tag, attrib)
return el
def append_p(self, style, text=None):
result = self.append_child('text:p', attrib={
'text:style-name': self.rststyle(style)})
self.append_pending_ids(result)
if text is not None:
result.text = text
return result
def append_pending_ids(self, el):
if self.settings.create_links:
for id in self.pending_ids:
SubElement(el, 'text:reference-mark', attrib={
'text:name': id})
self.pending_ids = [ ]
def set_current_element(self, el):
self.current_element = el
def set_to_parent(self):
self.current_element = self.current_element.getparent()
def generate_labeled_block(self, node, label):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
el = self.append_p('blockindent')
return el
def generate_labeled_line(self, node, label):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
el1.tail = node.astext()
return el
def encode(self, text):
text = text.replace(u'\u00a0', " ")
return text
#
# Visitor functions
#
# In alphabetic order, more or less.
# See docutils.docutils.nodes.node_class_names.
#
def dispatch_visit(self, node):
"""Override to catch basic attributes which many nodes have."""
self.handle_basic_atts(node)
nodes.GenericNodeVisitor.dispatch_visit(self, node)
def handle_basic_atts(self, node):
if isinstance(node, nodes.Element) and node['ids']:
self.pending_ids += node['ids']
def default_visit(self, node):
self.document.reporter.warning('missing visit_%s' % (node.tagname, ))
def default_departure(self, node):
self.document.reporter.warning('missing depart_%s' % (node.tagname, ))
def visit_Text(self, node):
# Skip nodes whose text has been processed in parent nodes.
if isinstance(node.parent, docutils.nodes.literal_block):
return
text = node.astext()
# Are we in mixed content? If so, add the text to the
# etree tail of the previous sibling element.
if len(self.current_element.getchildren()) > 0:
if self.current_element.getchildren()[-1].tail:
self.current_element.getchildren()[-1].tail += text
else:
self.current_element.getchildren()[-1].tail = text
else:
if self.current_element.text:
self.current_element.text += text
else:
self.current_element.text = text
def depart_Text(self, node):
pass
#
# Pre-defined fields
#
def visit_address(self, node):
el = self.generate_labeled_block(node, 'Address: ')
self.set_current_element(el)
def depart_address(self, node):
self.set_to_parent()
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
el = self.append_p('blockindent')
else:
el = self.generate_labeled_block(node, 'Author: ')
self.set_current_element(el)
def depart_author(self, node):
self.set_to_parent()
def visit_authors(self, node):
label = 'Authors:'
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
def depart_authors(self, node):
pass
def visit_contact(self, node):
el = self.generate_labeled_block(node, 'Contact: ')
self.set_current_element(el)
def depart_contact(self, node):
self.set_to_parent()
def visit_copyright(self, node):
el = self.generate_labeled_block(node, 'Copyright: ')
self.set_current_element(el)
def depart_copyright(self, node):
self.set_to_parent()
def visit_date(self, node):
self.generate_labeled_line(node, 'Date: ')
def depart_date(self, node):
pass
def visit_organization(self, node):
el = self.generate_labeled_block(node, 'Organization: ')
self.set_current_element(el)
def depart_organization(self, node):
self.set_to_parent()
def visit_status(self, node):
el = self.generate_labeled_block(node, 'Status: ')
self.set_current_element(el)
def depart_status(self, node):
self.set_to_parent()
def visit_revision(self, node):
self.generate_labeled_line(node, 'Revision: ')
def depart_revision(self, node):
pass
def visit_version(self, node):
el = self.generate_labeled_line(node, 'Version: ')
#self.set_current_element(el)
def depart_version(self, node):
#self.set_to_parent()
pass
def visit_attribution(self, node):
el = self.append_p('attribution', node.astext())
def depart_attribution(self, node):
pass
def visit_block_quote(self, node):
if 'epigraph' in node.attributes['classes']:
self.paragraph_style_stack.append(self.rststyle('epigraph'))
self.blockstyle = self.rststyle('epigraph')
elif 'highlights' in node.attributes['classes']:
self.paragraph_style_stack.append(self.rststyle('highlights'))
self.blockstyle = self.rststyle('highlights')
else:
self.paragraph_style_stack.append(self.rststyle('blockquote'))
self.blockstyle = self.rststyle('blockquote')
self.line_indent_level += 1
def depart_block_quote(self, node):
self.paragraph_style_stack.pop()
self.blockstyle = ''
self.line_indent_level -= 1
def visit_bullet_list(self, node):
self.list_level +=1
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
pass
else:
if node.has_key('classes') and \
'auto-toc' in node.attributes['classes']:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('tocenumlist'),
})
self.list_style_stack.append(self.rststyle('enumitem'))
else:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('tocbulletlist'),
})
self.list_style_stack.append(self.rststyle('bulletitem'))
self.set_current_element(el)
else:
if self.blockstyle == self.rststyle('blockquote'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('blockquote-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('blockquote-bulletitem'))
elif self.blockstyle == self.rststyle('highlights'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('highlights-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('highlights-bulletitem'))
elif self.blockstyle == self.rststyle('epigraph'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('epigraph-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('epigraph-bulletitem'))
else:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('bulletlist'),
})
self.list_style_stack.append(self.rststyle('bulletitem'))
self.set_current_element(el)
def depart_bullet_list(self, node):
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
pass
else:
self.set_to_parent()
self.list_style_stack.pop()
else:
self.set_to_parent()
self.list_style_stack.pop()
self.list_level -=1
def visit_caption(self, node):
raise nodes.SkipChildren()
pass
def depart_caption(self, node):
pass
def visit_comment(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'office:annotation', attrib={})
el2 = SubElement(el1, 'text:p', attrib={})
el2.text = node.astext()
def depart_comment(self, node):
pass
def visit_compound(self, node):
# The compound directive currently receives no special treatment.
pass
def depart_compound(self, node):
pass
def visit_container(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
self.paragraph_style_stack.append(self.rststyle(styles[0]))
def depart_container(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
self.paragraph_style_stack.pop()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.paragraph_style_stack.append(self.rststyle('blockindent'))
self.bumped_list_level_stack.append(ListLevel(1))
def depart_definition(self, node):
self.paragraph_style_stack.pop()
self.bumped_list_level_stack.pop()
def visit_definition_list(self, node):
pass
def depart_definition_list(self, node):
pass
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
#el1.text = node.astext()
self.set_current_element(el1)
def depart_term(self, node):
self.set_to_parent()
self.set_to_parent()
def visit_classifier(self, node):
els = self.current_element.getchildren()
if len(els) > 0:
el = els[-1]
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('emphasis')
})
el1.text = ' (%s)' % (node.astext(), )
def depart_classifier(self, node):
pass
def visit_document(self, node):
pass
def depart_document(self, node):
self.process_footnotes()
def visit_docinfo(self, node):
self.section_level += 1
self.section_count += 1
if self.settings.create_sections:
el = self.append_child('text:section', attrib={
'text:name': 'Section%d' % self.section_count,
'text:style-name': 'Sect%d' % self.section_level,
})
self.set_current_element(el)
def depart_docinfo(self, node):
self.section_level -= 1
if self.settings.create_sections:
self.set_to_parent()
def visit_emphasis(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('emphasis')})
self.set_current_element(el)
def depart_emphasis(self, node):
self.set_to_parent()
def visit_enumerated_list(self, node):
el1 = self.current_element
if self.blockstyle == self.rststyle('blockquote'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('blockquote-enumlist'),
})
self.list_style_stack.append(self.rststyle('blockquote-enumitem'))
elif self.blockstyle == self.rststyle('highlights'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('highlights-enumlist'),
})
self.list_style_stack.append(self.rststyle('highlights-enumitem'))
elif self.blockstyle == self.rststyle('epigraph'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('epigraph-enumlist'),
})
self.list_style_stack.append(self.rststyle('epigraph-enumitem'))
else:
liststylename = 'enumlist-%s' % (node.get('enumtype', 'arabic'), )
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle(liststylename),
})
self.list_style_stack.append(self.rststyle('enumitem'))
self.set_current_element(el2)
def depart_enumerated_list(self, node):
self.set_to_parent()
self.list_style_stack.pop()
def visit_list_item(self, node):
# If we are in a "bumped" list level, then wrap this
# list in an outer lists in order to increase the
# indentation level.
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
self.paragraph_style_stack.append(
self.rststyle('contents-%d' % (self.list_level, )))
else:
el1 = self.append_child('text:list-item')
self.set_current_element(el1)
else:
el1 = self.append_child('text:list-item')
el3 = el1
if len(self.bumped_list_level_stack) > 0:
level_obj = self.bumped_list_level_stack[-1]
if level_obj.get_sibling():
level_obj.set_nested(False)
for level_obj1 in self.bumped_list_level_stack:
for idx in range(level_obj1.get_level()):
el2 = self.append_child('text:list', parent=el3)
el3 = self.append_child(
'text:list-item', parent=el2)
self.paragraph_style_stack.append(self.list_style_stack[-1])
self.set_current_element(el3)
def depart_list_item(self, node):
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
self.paragraph_style_stack.pop()
else:
self.set_to_parent()
else:
if len(self.bumped_list_level_stack) > 0:
level_obj = self.bumped_list_level_stack[-1]
if level_obj.get_sibling():
level_obj.set_nested(True)
for level_obj1 in self.bumped_list_level_stack:
for idx in range(level_obj1.get_level()):
self.set_to_parent()
self.set_to_parent()
self.paragraph_style_stack.pop()
self.set_to_parent()
def visit_header(self, node):
self.in_header = True
def depart_header(self, node):
self.in_header = False
def visit_footer(self, node):
self.in_footer = True
def depart_footer(self, node):
self.in_footer = False
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field_name(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = node.astext()
def depart_field_name(self, node):
pass
def visit_field_body(self, node):
self.paragraph_style_stack.append(self.rststyle('blockindent'))
def depart_field_body(self, node):
self.paragraph_style_stack.pop()
def visit_figure(self, node):
pass
def depart_figure(self, node):
pass
def visit_footnote(self, node):
self.footnote_level += 1
self.save_footnote_current = self.current_element
el1 = Element('text:note-body')
self.current_element = el1
self.footnote_list.append((node, el1))
if isinstance(node, docutils.nodes.citation):
self.paragraph_style_stack.append(self.rststyle('citation'))
else:
self.paragraph_style_stack.append(self.rststyle('footnote'))
def depart_footnote(self, node):
self.paragraph_style_stack.pop()
self.current_element = self.save_footnote_current
self.footnote_level -= 1
footnote_chars = [
'*', '**', '***',
'++', '+++',
'##', '###',
'@@', '@@@',
]
def visit_footnote_reference(self, node):
if self.footnote_level <= 0:
id = node.attributes['ids'][0]
refid = node.attributes.get('refid')
if refid is None:
refid = ''
if self.settings.endnotes_end_doc:
note_class = 'endnote'
else:
note_class = 'footnote'
el1 = self.append_child('text:note', attrib={
'text:id': '%s' % (refid, ),
'text:note-class': note_class,
})
note_auto = str(node.attributes.get('auto', 1))
if isinstance(node, docutils.nodes.citation_reference):
citation = '[%s]' % node.astext()
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': citation,
})
el2.text = citation
elif note_auto == '1':
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': node.astext(),
})
el2.text = node.astext()
elif note_auto == '*':
if self.footnote_chars_idx >= len(
ODFTranslator.footnote_chars):
self.footnote_chars_idx = 0
footnote_char = ODFTranslator.footnote_chars[
self.footnote_chars_idx]
self.footnote_chars_idx += 1
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': footnote_char,
})
el2.text = footnote_char
self.footnote_ref_dict[id] = el1
raise nodes.SkipChildren()
def depart_footnote_reference(self, node):
pass
def visit_citation(self, node):
for id in node.attributes['ids']:
self.citation_id = id
break
self.paragraph_style_stack.append(self.rststyle('blockindent'))
self.bumped_list_level_stack.append(ListLevel(1))
def depart_citation(self, node):
self.citation_id = None
self.paragraph_style_stack.pop()
self.bumped_list_level_stack.pop()
def visit_citation_reference(self, node):
if self.settings.create_links:
id = node.attributes['refid']
el = self.append_child('text:reference-ref', attrib={
'text:ref-name': '%s' % (id, ),
'text:reference-format': 'text',
})
el.text = '['
self.set_current_element(el)
elif self.current_element.text is None:
self.current_element.text = '['
else:
self.current_element.text += '['
def depart_citation_reference(self, node):
self.current_element.text += ']'
if self.settings.create_links:
self.set_to_parent()
def visit_label(self, node):
if isinstance(node.parent, docutils.nodes.footnote):
raise nodes.SkipChildren()
elif self.citation_id is not None:
el = self.append_p('textbody')
self.set_current_element(el)
el.text = '['
if self.settings.create_links:
el1 = self.append_child('text:reference-mark-start', attrib={
'text:name': '%s' % (self.citation_id, ),
})
def depart_label(self, node):
if isinstance(node.parent, docutils.nodes.footnote):
pass
elif self.citation_id is not None:
self.current_element.text += ']'
if self.settings.create_links:
el = self.append_child('text:reference-mark-end', attrib={
'text:name': '%s' % (self.citation_id, ),
})
self.set_to_parent()
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def check_file_exists(self, path):
if os.path.exists(path):
return 1
else:
return 0
def visit_image(self, node):
# Capture the image file.
if 'uri' in node.attributes:
source = node.attributes['uri']
if not self.check_file_exists(source):
self.document.reporter.warning(
'Cannot find image file %s.' % (source, ))
return
else:
return
if source in self.image_dict:
filename, destination = self.image_dict[source]
else:
self.image_count += 1
filename = os.path.split(source)[1]
destination = 'Pictures/1%08x%s' % (self.image_count, filename, )
spec = (os.path.abspath(source), destination,)
self.embedded_file_list.append(spec)
self.image_dict[source] = (source, destination,)
# Is this a figure (containing an image) or just a plain image?
if self.in_paragraph:
el1 = self.current_element
else:
el1 = SubElement(self.current_element, 'text:p',
attrib={'text:style-name': self.rststyle('textbody')})
el2 = el1
if isinstance(node.parent, docutils.nodes.figure):
el3, el4, caption = self.generate_figure(node, source,
destination, el2)
attrib = {
'draw:blue': '0%',
'draw:color-inversion': 'false',
'draw:color-mode': 'standard',
'draw:contrast': '0%',
'draw:gamma': '100%',
'draw:green': '0%',
'draw:image-opacity': '100%',
'draw:luminance': '0%',
'draw:red': '0%',
'fo:border': 'none',
'fo:clip': 'rect(0in 0in 0in 0in)',
'fo:margin-bottom': '0in',
'fo:margin-left': '0in',
'fo:margin-right': '0in',
'fo:margin-top': '0in',
'fo:padding': '0in',
'style:horizontal-pos': 'from-left',
'style:horizontal-rel': 'paragraph-content',
'style:mirror': 'none',
'style:run-through': 'foreground',
'style:shadow': 'none',
'style:vertical-pos': 'from-top',
'style:vertical-rel': 'paragraph-content',
'style:wrap': 'none',
}
el5, width = self.generate_image(node, source, destination,
el4, attrib)
if caption is not None:
el5.tail = caption
else: #if isinstance(node.parent, docutils.nodes.image):
el3 = self.generate_image(node, source, destination, el2)
def depart_image(self, node):
pass
def get_image_width_height(self, node, attr):
size = None
if attr in node.attributes:
size = node.attributes[attr]
unit = size[-2:]
if unit.isalpha():
size = size[:-2]
else:
unit = 'px'
try:
size = float(size)
except ValueError, e:
self.document.reporter.warning(
'Invalid %s for image: "%s"' % (
attr, node.attributes[attr]))
size = [size, unit]
return size
def get_image_scale(self, node):
if 'scale' in node.attributes:
try:
scale = int(node.attributes['scale'])
if scale < 1: # or scale > 100:
self.document.reporter.warning(
'scale out of range (%s), using 1.' % (scale, ))
scale = 1
scale = scale * 0.01
except ValueError, e:
self.document.reporter.warning(
'Invalid scale for image: "%s"' % (
node.attributes['scale'], ))
else:
scale = 1.0
return scale
def get_image_scaled_width_height(self, node, source):
scale = self.get_image_scale(node)
width = self.get_image_width_height(node, 'width')
height = self.get_image_width_height(node, 'height')
dpi = (72, 72)
if Image is not None and source in self.image_dict:
filename, destination = self.image_dict[source]
imageobj = Image.open(filename, 'r')
dpi = imageobj.info.get('dpi', dpi)
# dpi information can be (xdpi, ydpi) or xydpi
try: iter(dpi)
except: dpi = (dpi, dpi)
else:
imageobj = None
if width is None or height is None:
if imageobj is None:
raise RuntimeError(
'image size not fully specified and PIL not installed')
if width is None: width = [imageobj.size[0], 'px']
if height is None: height = [imageobj.size[1], 'px']
width[0] *= scale
height[0] *= scale
if width[1] == 'px': width = [width[0] / dpi[0], 'in']
if height[1] == 'px': height = [height[0] / dpi[1], 'in']
width[0] = str(width[0])
height[0] = str(height[0])
return ''.join(width), ''.join(height)
def generate_figure(self, node, source, destination, current_element):
caption = None
width, height = self.get_image_scaled_width_height(node, source)
for node1 in node.parent.children:
if node1.tagname == 'caption':
caption = node1.astext()
self.image_style_count += 1
#
# Add the style for the caption.
if caption is not None:
attrib = {
'style:class': 'extra',
'style:family': 'paragraph',
'style:name': 'Caption',
'style:parent-style-name': 'Standard',
}
el1 = SubElement(self.automatic_styles, 'style:style',
attrib=attrib, nsdict=SNSD)
attrib = {
'fo:margin-bottom': '0.0835in',
'fo:margin-top': '0.0835in',
'text:line-number': '0',
'text:number-lines': 'false',
}
el2 = SubElement(el1, 'style:paragraph-properties',
attrib=attrib, nsdict=SNSD)
attrib = {
'fo:font-size': '12pt',
'fo:font-style': 'italic',
'style:font-name': 'Times',
'style:font-name-complex': 'Lucidasans1',
'style:font-size-asian': '12pt',
'style:font-size-complex': '12pt',
'style:font-style-asian': 'italic',
'style:font-style-complex': 'italic',
}
el2 = SubElement(el1, 'style:text-properties',
attrib=attrib, nsdict=SNSD)
style_name = 'rstframestyle%d' % self.image_style_count
# Add the styles
attrib = {
'style:name': style_name,
'style:family': 'graphic',
'style:parent-style-name': 'Frame',
}
el1 = SubElement(self.automatic_styles,
'style:style', attrib=attrib, nsdict=SNSD)
halign = 'center'
valign = 'top'
if 'align' in node.attributes:
align = node.attributes['align'].split()
for val in align:
if val in ('left', 'center', 'right'):
halign = val
elif val in ('top', 'middle', 'bottom'):
valign = val
attrib = {
'fo:margin-left': '0cm',
'fo:margin-right': '0cm',
'fo:margin-top': '0cm',
'fo:margin-bottom': '0cm',
# 'style:wrap': 'dynamic', #vds
'style:number-wrapped-paragraphs': 'no-limit',
'style:vertical-pos': valign,
'style:vertical-rel': 'paragraph',
'style:horizontal-pos': halign,
'style:horizontal-rel': 'paragraph',
'fo:padding': '0cm',
'fo:border': 'none',
}
wrap = False
classes = node.parent.attributes.get('classes')
if classes and 'wrap' in classes:
wrap = True
if wrap:
attrib['style:wrap'] = 'dynamic'
else:
attrib['style:wrap'] = 'none'
el2 = SubElement(el1,
'style:graphic-properties', attrib=attrib, nsdict=SNSD)
attrib = {
'draw:style-name': style_name,
'draw:name': 'Frame1',
'text:anchor-type': 'paragraph',
'draw:z-index': '1',
}
attrib['svg:width'] = width
# dbg
#attrib['svg:height'] = height
el3 = SubElement(current_element, 'draw:frame', attrib=attrib)
attrib = {}
el4 = SubElement(el3, 'draw:text-box', attrib=attrib)
attrib = {
'text:style-name': self.rststyle('caption'),
}
el5 = SubElement(el4, 'text:p', attrib=attrib)
return el3, el5, caption
def generate_image(self, node, source, destination, current_element,
frame_attrs=None):
width, height = self.get_image_scaled_width_height(node, source)
self.image_style_count += 1
style_name = 'rstframestyle%d' % self.image_style_count
# Add the style.
attrib = {
'style:name': style_name,
'style:family': 'graphic',
'style:parent-style-name': 'Graphics',
}
el1 = SubElement(self.automatic_styles,
'style:style', attrib=attrib, nsdict=SNSD)
halign = None
valign = None
if 'align' in node.attributes:
align = node.attributes['align'].split()
for val in align:
if val in ('left', 'center', 'right'):
halign = val
elif val in ('top', 'middle', 'bottom'):
valign = val
if frame_attrs is None:
attrib = {
'style:vertical-pos': 'top',
'style:vertical-rel': 'paragraph',
'style:horizontal-rel': 'paragraph',
'style:mirror': 'none',
'fo:clip': 'rect(0cm 0cm 0cm 0cm)',
'draw:luminance': '0%',
'draw:contrast': '0%',
'draw:red': '0%',
'draw:green': '0%',
'draw:blue': '0%',
'draw:gamma': '100%',
'draw:color-inversion': 'false',
'draw:image-opacity': '100%',
'draw:color-mode': 'standard',
}
else:
attrib = frame_attrs
if halign is not None:
attrib['style:horizontal-pos'] = halign
if valign is not None:
attrib['style:vertical-pos'] = valign
# If there is a classes/wrap directive or we are
# inside a table, add a no-wrap style.
wrap = False
classes = node.attributes.get('classes')
if classes and 'wrap' in classes:
wrap = True
if wrap:
attrib['style:wrap'] = 'dynamic'
else:
attrib['style:wrap'] = 'none'
# If we are inside a table, add a no-wrap style.
if self.is_in_table(node):
attrib['style:wrap'] = 'none'
el2 = SubElement(el1,
'style:graphic-properties', attrib=attrib, nsdict=SNSD)
# Add the content.
#el = SubElement(current_element, 'text:p',
# attrib={'text:style-name': self.rststyle('textbody')})
attrib={
'draw:style-name': style_name,
'draw:name': 'graphics2',
'draw:z-index': '1',
}
if isinstance(node.parent, nodes.TextElement):
attrib['text:anchor-type'] = 'as-char' #vds
else:
attrib['text:anchor-type'] = 'paragraph'
attrib['svg:width'] = width
attrib['svg:height'] = height
el1 = SubElement(current_element, 'draw:frame', attrib=attrib)
el2 = SubElement(el1, 'draw:image', attrib={
'xlink:href': '%s' % (destination, ),
'xlink:type': 'simple',
'xlink:show': 'embed',
'xlink:actuate': 'onLoad',
})
return el1, width
def is_in_table(self, node):
node1 = node.parent
while node1:
if isinstance(node1, docutils.nodes.entry):
return True
node1 = node1.parent
return False
def visit_legend(self, node):
# Currently, the legend receives *no* special treatment.
pass
def depart_legend(self, node):
pass
def visit_line_block(self, node):
self.line_indent_level += 1
self.line_block_level += 1
def depart_line_block(self, node):
if self.line_block_level <= 1:
el1 = SubElement(self.current_element, 'text:p', attrib={
'text:style-name': self.rststyle('lineblock1'),
})
self.line_indent_level -= 1
self.line_block_level -= 1
def visit_line(self, node):
style = 'lineblock%d' % self.line_indent_level
el1 = SubElement(self.current_element, 'text:p', attrib={
'text:style-name': self.rststyle(style),
})
self.current_element = el1
def depart_line(self, node):
self.set_to_parent()
def visit_literal(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('inlineliteral')})
self.set_current_element(el)
def depart_literal(self, node):
self.set_to_parent()
def _calculate_code_block_padding(self, line):
count = 0
matchobj = SPACES_PATTERN.match(line)
if matchobj:
pad = matchobj.group()
count = len(pad)
else:
matchobj = TABS_PATTERN.match(line)
if matchobj:
pad = matchobj.group()
count = len(pad) * 8
return count
def _add_syntax_highlighting(self, insource, language):
lexer = pygments.lexers.get_lexer_by_name(language, stripall=True)
if language in ('latex', 'tex'):
fmtr = OdtPygmentsLaTeXFormatter(lambda name, parameters=():
self.rststyle(name, parameters),
escape_function=escape_cdata)
else:
fmtr = OdtPygmentsProgFormatter(lambda name, parameters=():
self.rststyle(name, parameters),
escape_function=escape_cdata)
outsource = pygments.highlight(insource, lexer, fmtr)
return outsource
def fill_line(self, line):
line = FILL_PAT1.sub(self.fill_func1, line)
line = FILL_PAT2.sub(self.fill_func2, line)
return line
def fill_func1(self, matchobj):
spaces = matchobj.group(0)
repl = '<text:s text:c="%d"/>' % (len(spaces), )
return repl
def fill_func2(self, matchobj):
spaces = matchobj.group(0)
repl = ' <text:s text:c="%d"/>' % (len(spaces) - 1, )
return repl
def visit_literal_block(self, node):
wrapper1 = '<text:p text:style-name="%s">%%s</text:p>' % (
self.rststyle('codeblock'), )
source = node.astext()
if (pygments and
self.settings.add_syntax_highlighting
#and
#node.get('hilight', False)
):
language = node.get('language', 'python')
source = self._add_syntax_highlighting(source, language)
else:
source = escape_cdata(source)
lines = source.split('\n')
lines1 = ['<wrappertag1 xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">']
my_lines = []
for my_line in lines:
my_line = self.fill_line(my_line)
my_line = my_line.replace(" ", "\n")
my_lines.append(my_line)
my_lines_str = '<text:line-break/>'.join(my_lines)
my_lines_str2 = wrapper1 % (my_lines_str, )
lines1.append(my_lines_str2)
lines1.append('</wrappertag1>')
s1 = ''.join(lines1)
if WhichElementTree != "lxml":
s1 = s1.encode("utf-8")
el1 = etree.fromstring(s1)
children = el1.getchildren()
for child in children:
self.current_element.append(child)
def depart_literal_block(self, node):
pass
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_meta(self, node):
name = node.attributes.get('name')
content = node.attributes.get('content')
if name is not None and content is not None:
self.meta_dict[name] = content
def depart_meta(self, node):
pass
def visit_option_list(self, node):
table_name = 'tableoption'
#
# Generate automatic styles
if not self.optiontablestyles_generated:
self.optiontablestyles_generated = True
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(table_name),
'style:family': 'table'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-properties', attrib={
'style:width': '17.59cm',
'table:align': 'left',
'style:shadow': 'none'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle('%s.%%c' % table_name, ( 'A', )),
'style:family': 'table-column'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-column-properties', attrib={
'style:column-width': '4.999cm'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle('%s.%%c' % table_name, ( 'B', )),
'style:family': 'table-column'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-column-properties', attrib={
'style:column-width': '12.587cm'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 1, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:background-color': 'transparent',
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': 'none',
'fo:border-top': '0.035cm solid #000000',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
el2 = SubElement(el1, 'style:background-image', nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 1, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border': '0.035cm solid #000000'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 2, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': 'none',
'fo:border-top': 'none',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 2, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': '0.035cm solid #000000',
'fo:border-top': 'none',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
#
# Generate table data
el = self.append_child('table:table', attrib={
'table:name': self.rststyle(table_name),
'table:style-name': self.rststyle(table_name),
})
el1 = SubElement(el, 'table:table-column', attrib={
'table:style-name': self.rststyle(
'%s.%%c' % table_name, ( 'A', ))})
el1 = SubElement(el, 'table:table-column', attrib={
'table:style-name': self.rststyle(
'%s.%%c' % table_name, ( 'B', ))})
el1 = SubElement(el, 'table:table-header-rows')
el2 = SubElement(el1, 'table:table-row')
el3 = SubElement(el2, 'table:table-cell', attrib={
'table:style-name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 1, )),
'office:value-type': 'string'})
el4 = SubElement(el3, 'text:p', attrib={
'text:style-name': 'Table_20_Heading'})
el4.text= 'Option'
el3 = SubElement(el2, 'table:table-cell', attrib={
'table:style-name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 1, )),
'office:value-type': 'string'})
el4 = SubElement(el3, 'text:p', attrib={
'text:style-name': 'Table_20_Heading'})
el4.text= 'Description'
self.set_current_element(el)
def depart_option_list(self, node):
self.set_to_parent()
def visit_option_list_item(self, node):
el = self.append_child('table:table-row')
self.set_current_element(el)
def depart_option_list_item(self, node):
self.set_to_parent()
def visit_option_group(self, node):
el = self.append_child('table:table-cell', attrib={
'table:style-name': 'Table%d.A2' % self.table_count,
'office:value-type': 'string',
})
self.set_current_element(el)
def depart_option_group(self, node):
self.set_to_parent()
def visit_option(self, node):
el = self.append_child('text:p', attrib={
'text:style-name': 'Table_20_Contents'})
el.text = node.astext()
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
pass
def depart_option_argument(self, node):
pass
def visit_description(self, node):
el = self.append_child('table:table-cell', attrib={
'table:style-name': 'Table%d.B2' % self.table_count,
'office:value-type': 'string',
})
el1 = SubElement(el, 'text:p', attrib={
'text:style-name': 'Table_20_Contents'})
el1.text = node.astext()
raise nodes.SkipChildren()
def depart_description(self, node):
pass
def visit_paragraph(self, node):
self.in_paragraph = True
if self.in_header:
el = self.append_p('header')
elif self.in_footer:
el = self.append_p('footer')
else:
style_name = self.paragraph_style_stack[-1]
el = self.append_child('text:p',
attrib={'text:style-name': style_name})
self.append_pending_ids(el)
self.set_current_element(el)
def depart_paragraph(self, node):
self.in_paragraph = False
self.set_to_parent()
if self.in_header:
self.header_content.append(
self.current_element.getchildren()[-1])
self.current_element.remove(
self.current_element.getchildren()[-1])
elif self.in_footer:
self.footer_content.append(
self.current_element.getchildren()[-1])
self.current_element.remove(
self.current_element.getchildren()[-1])
def visit_problematic(self, node):
pass
def depart_problematic(self, node):
pass
def visit_raw(self, node):
if 'format' in node.attributes:
formats = node.attributes['format']
formatlist = formats.split()
if 'odt' in formatlist:
rawstr = node.astext()
attrstr = ' '.join(['%s="%s"' % (k, v, )
for k,v in CONTENT_NAMESPACE_ATTRIB.items()])
contentstr = '<stuff %s>%s</stuff>' % (attrstr, rawstr, )
if WhichElementTree != "lxml":
contentstr = contentstr.encode("utf-8")
content = etree.fromstring(contentstr)
elements = content.getchildren()
if len(elements) > 0:
el1 = elements[0]
if self.in_header:
pass
elif self.in_footer:
pass
else:
self.current_element.append(el1)
raise nodes.SkipChildren()
def depart_raw(self, node):
if self.in_header:
pass
elif self.in_footer:
pass
else:
pass
def visit_reference(self, node):
text = node.astext()
if self.settings.create_links:
if node.has_key('refuri'):
href = node['refuri']
if ( self.settings.cloak_email_addresses
and href.startswith('mailto:')):
href = self.cloak_mailto(href)
el = self.append_child('text:a', attrib={
'xlink:href': '%s' % href,
'xlink:type': 'simple',
})
self.set_current_element(el)
elif node.has_key('refid'):
if self.settings.create_links:
href = node['refid']
el = self.append_child('text:reference-ref', attrib={
'text:ref-name': '%s' % href,
'text:reference-format': 'text',
})
else:
self.document.reporter.warning(
'References must have "refuri" or "refid" attribute.')
if (self.in_table_of_contents and
len(node.children) >= 1 and
isinstance(node.children[0], docutils.nodes.generated)):
node.remove(node.children[0])
def depart_reference(self, node):
if self.settings.create_links:
if node.has_key('refuri'):
self.set_to_parent()
def visit_rubric(self, node):
style_name = self.rststyle('rubric')
classes = node.get('classes')
if classes:
class1 = classes[0]
if class1:
style_name = class1
el = SubElement(self.current_element, 'text:h', attrib = {
#'text:outline-level': '%d' % section_level,
#'text:style-name': 'Heading_20_%d' % section_level,
'text:style-name': style_name,
})
text = node.astext()
el.text = self.encode(text)
def depart_rubric(self, node):
pass
def visit_section(self, node, move_ids=1):
self.section_level += 1
self.section_count += 1
if self.settings.create_sections:
el = self.append_child('text:section', attrib={
'text:name': 'Section%d' % self.section_count,
'text:style-name': 'Sect%d' % self.section_level,
})
self.set_current_element(el)
def depart_section(self, node):
self.section_level -= 1
if self.settings.create_sections:
self.set_to_parent()
def visit_strong(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
self.set_current_element(el)
def depart_strong(self, node):
self.set_to_parent()
def visit_substitution_definition(self, node):
raise nodes.SkipChildren()
def depart_substitution_definition(self, node):
pass
def visit_system_message(self, node):
pass
def depart_system_message(self, node):
pass
def visit_table(self, node):
self.table_count += 1
table_name = '%s%%d' % TableStylePrefix
el1 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
'style:family': 'table',
}, nsdict=SNSD)
el1_1 = SubElement(el1, 'style:table-properties', attrib={
#'style:width': '17.59cm',
'table:align': 'margins',
'fo:margin-top': '0in',
'fo:margin-bottom': '0.10in',
}, nsdict=SNSD)
# We use a single cell style for all cells in this table.
# That's probably not correct, but seems to work.
el2 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( self.table_count, 'A', 1, )),
'style:family': 'table-cell',
}, nsdict=SNSD)
line_style1 = '0.%03dcm solid #000000' % (
self.settings.table_border_thickness, )
el2_1 = SubElement(el2, 'style:table-cell-properties', attrib={
'fo:padding': '0.049cm',
'fo:border-left': line_style1,
'fo:border-right': line_style1,
'fo:border-top': line_style1,
'fo:border-bottom': line_style1,
}, nsdict=SNSD)
title = None
for child in node.children:
if child.tagname == 'title':
title = child.astext()
break
if title is not None:
el3 = self.append_p('table-title', title)
else:
pass
el4 = SubElement(self.current_element, 'table:table', attrib={
'table:name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
'table:style-name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
})
self.set_current_element(el4)
self.current_table_style = el1
self.table_width = 0
def depart_table(self, node):
attribkey = add_ns('style:width', nsdict=SNSD)
attribval = '%dcm' % self.table_width
self.current_table_style.attrib[attribkey] = attribval
self.set_to_parent()
def visit_tgroup(self, node):
self.column_count = ord('A') - 1
def depart_tgroup(self, node):
pass
def visit_colspec(self, node):
self.column_count += 1
colspec_name = self.rststyle(
'%s%%d.%%s' % TableStylePrefix,
(self.table_count, chr(self.column_count), )
)
colwidth = node['colwidth']
el1 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': colspec_name,
'style:family': 'table-column',
}, nsdict=SNSD)
el1_1 = SubElement(el1, 'style:table-column-properties', attrib={
'style:column-width': '%dcm' % colwidth }, nsdict=SNSD)
el2 = self.append_child('table:table-column', attrib={
'table:style-name': colspec_name,
})
self.table_width += colwidth
def depart_colspec(self, node):
pass
def visit_thead(self, node):
el = self.append_child('table:table-header-rows')
self.set_current_element(el)
self.in_thead = True
self.paragraph_style_stack.append('Table_20_Heading')
def depart_thead(self, node):
self.set_to_parent()
self.in_thead = False
self.paragraph_style_stack.pop()
def visit_row(self, node):
self.column_count = ord('A') - 1
el = self.append_child('table:table-row')
self.set_current_element(el)
def depart_row(self, node):
self.set_to_parent()
def visit_entry(self, node):
self.column_count += 1
cellspec_name = self.rststyle(
'%s%%d.%%c%%d' % TableStylePrefix,
(self.table_count, 'A', 1, )
)
attrib={
'table:style-name': cellspec_name,
'office:value-type': 'string',
}
morecols = node.get('morecols', 0)
if morecols > 0:
attrib['table:number-columns-spanned'] = '%d' % (morecols + 1,)
self.column_count += morecols
morerows = node.get('morerows', 0)
if morerows > 0:
attrib['table:number-rows-spanned'] = '%d' % (morerows + 1,)
el1 = self.append_child('table:table-cell', attrib=attrib)
self.set_current_element(el1)
def depart_entry(self, node):
self.set_to_parent()
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_target(self, node):
#
# I don't know how to implement targets in ODF.
# How do we create a target in oowriter? A cross-reference?
if not (node.has_key('refuri') or node.has_key('refid')
or node.has_key('refname')):
pass
else:
pass
def depart_target(self, node):
pass
def visit_title(self, node, move_ids=1, title_type='title'):
if isinstance(node.parent, docutils.nodes.section):
section_level = self.section_level
if section_level > 7:
self.document.reporter.warning(
'Heading/section levels greater than 7 not supported.')
self.document.reporter.warning(
' Reducing to heading level 7 for heading: "%s"' % (
node.astext(), ))
section_level = 7
el1 = self.append_child('text:h', attrib = {
'text:outline-level': '%d' % section_level,
#'text:style-name': 'Heading_20_%d' % section_level,
'text:style-name': self.rststyle(
'heading%d', (section_level, )),
})
self.append_pending_ids(el1)
self.set_current_element(el1)
elif isinstance(node.parent, docutils.nodes.document):
# text = self.settings.title
#else:
# text = node.astext()
el1 = SubElement(self.current_element, 'text:p', attrib = {
'text:style-name': self.rststyle(title_type),
})
self.append_pending_ids(el1)
text = node.astext()
self.title = text
self.found_doc_title = True
self.set_current_element(el1)
def depart_title(self, node):
if (isinstance(node.parent, docutils.nodes.section) or
isinstance(node.parent, docutils.nodes.document)):
self.set_to_parent()
def visit_subtitle(self, node, move_ids=1):
self.visit_title(node, move_ids, title_type='subtitle')
def depart_subtitle(self, node):
self.depart_title(node)
def visit_title_reference(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': self.rststyle('quotation')})
el.text = self.encode(node.astext())
raise nodes.SkipChildren()
def depart_title_reference(self, node):
pass
def generate_table_of_content_entry_template(self, el1):
for idx in range(1, 11):
el2 = SubElement(el1,
'text:table-of-content-entry-template',
attrib={
'text:outline-level': "%d" % (idx, ),
'text:style-name': self.rststyle('contents-%d' % (idx, )),
})
el3 = SubElement(el2, 'text:index-entry-chapter')
el3 = SubElement(el2, 'text:index-entry-text')
el3 = SubElement(el2, 'text:index-entry-tab-stop', attrib={
'style:leader-char': ".",
'style:type': "right",
})
el3 = SubElement(el2, 'text:index-entry-page-number')
def visit_topic(self, node):
if 'classes' in node.attributes:
if 'contents' in node.attributes['classes']:
if self.settings.generate_oowriter_toc:
el1 = self.append_child('text:table-of-content', attrib={
'text:name': 'Table of Contents1',
'text:protected': 'true',
'text:style-name': 'Sect1',
})
el2 = SubElement(el1,
'text:table-of-content-source',
attrib={
'text:outline-level': '10',
})
el3 =SubElement(el2, 'text:index-title-template', attrib={
'text:style-name': 'Contents_20_Heading',
})
el3.text = 'Table of Contents'
self.generate_table_of_content_entry_template(el2)
el4 = SubElement(el1, 'text:index-body')
el5 = SubElement(el4, 'text:index-title')
el6 = SubElement(el5, 'text:p', attrib={
'text:style-name': self.rststyle('contents-heading'),
})
el6.text = 'Table of Contents'
self.save_current_element = self.current_element
self.table_of_content_index_body = el4
self.set_current_element(el4)
else:
el = self.append_p('horizontalline')
el = self.append_p('centeredtextbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = 'Contents'
self.in_table_of_contents = True
elif 'abstract' in node.attributes['classes']:
el = self.append_p('horizontalline')
el = self.append_p('centeredtextbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = 'Abstract'
def depart_topic(self, node):
if 'classes' in node.attributes:
if 'contents' in node.attributes['classes']:
if self.settings.generate_oowriter_toc:
self.update_toc_page_numbers(
self.table_of_content_index_body)
self.set_current_element(self.save_current_element)
else:
el = self.append_p('horizontalline')
self.in_table_of_contents = False
def update_toc_page_numbers(self, el):
collection = []
self.update_toc_collect(el, 0, collection)
self.update_toc_add_numbers(collection)
def update_toc_collect(self, el, level, collection):
collection.append((level, el))
level += 1
for child_el in el.getchildren():
if child_el.tag != 'text:index-body':
self.update_toc_collect(child_el, level, collection)
def update_toc_add_numbers(self, collection):
for level, el1 in collection:
if (el1.tag == 'text:p' and
el1.text != 'Table of Contents'):
el2 = SubElement(el1, 'text:tab')
el2.tail = '9999'
def visit_transition(self, node):
el = self.append_p('horizontalline')
def depart_transition(self, node):
pass
#
# Admonitions
#
def visit_warning(self, node):
self.generate_admonition(node, 'warning')
def depart_warning(self, node):
self.paragraph_style_stack.pop()
def visit_attention(self, node):
self.generate_admonition(node, 'attention')
depart_attention = depart_warning
def visit_caution(self, node):
self.generate_admonition(node, 'caution')
depart_caution = depart_warning
def visit_danger(self, node):
self.generate_admonition(node, 'danger')
depart_danger = depart_warning
def visit_error(self, node):
self.generate_admonition(node, 'error')
depart_error = depart_warning
def visit_hint(self, node):
self.generate_admonition(node, 'hint')
depart_hint = depart_warning
def visit_important(self, node):
self.generate_admonition(node, 'important')
depart_important = depart_warning
def visit_note(self, node):
self.generate_admonition(node, 'note')
depart_note = depart_warning
def visit_tip(self, node):
self.generate_admonition(node, 'tip')
depart_tip = depart_warning
def visit_admonition(self, node):
#import pdb; pdb.set_trace()
title = None
for child in node.children:
if child.tagname == 'title':
title = child.astext()
if title is None:
classes1 = node.get('classes')
if classes1:
title = classes1[0]
self.generate_admonition(node, 'generic', title)
depart_admonition = depart_warning
def generate_admonition(self, node, label, title=None):
el1 = SubElement(self.current_element, 'text:p', attrib = {
'text:style-name': self.rststyle('admon-%s-hdr', ( label, )),
})
if title:
el1.text = title
else:
el1.text = '%s!' % (label.capitalize(), )
s1 = self.rststyle('admon-%s-body', ( label, ))
self.paragraph_style_stack.append(s1)
#
# Roles (e.g. subscript, superscript, strong, ...
#
def visit_subscript(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': 'rststyle-subscript',
})
self.set_current_element(el)
def depart_subscript(self, node):
self.set_to_parent()
def visit_superscript(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': 'rststyle-superscript',
})
self.set_current_element(el)
def depart_superscript(self, node):
self.set_to_parent()
# Use an own reader to modify transformations done.
class Reader(standalone.Reader):
def get_transforms(self):
default = standalone.Reader.get_transforms(self)
if self.settings.create_links:
return default
return [ i
for i in default
if i is not references.DanglingReferences ]
| Python |
# $Id: __init__.py 6153 2009-10-05 13:37:10Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the XHTML version 1.0 Transitional DTD
(*almost* strict). The output contains a minimum of formatting
information. The cascading style sheet "html4css1.css" is required
for proper viewing with a modern graphical browser.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import time
import re
try:
import Image # check for the Python Imaging Library
except ImportError:
Image = None
import docutils
from docutils import frontend, nodes, utils, writers, languages
from docutils.transforms import writer_aux
class Writer(writers.Writer):
supported = ('html', 'html4css1', 'xhtml')
"""Formats this writer supports."""
default_stylesheet = 'html4css1.css'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Specify comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL>', 'overrides': 'stylesheet_path'}),
('Specify comma separated list of stylesheet paths. '
'With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % default_stylesheet_path,
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet',
'default': default_stylesheet_path}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
relative_path_settings = ('stylesheet_path',)
config_section = 'html4css1 writer'
config_section_dependencies = ('writers',)
visitor_attributes = (
'head_prefix', 'head', 'stylesheet', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'body_suffix',
'title', 'subtitle', 'header', 'footer', 'meta', 'fragment',
'html_prolog', 'html_head', 'html_title', 'html_subtitle',
'html_body')
def get_transforms(self):
return writers.Writer.get_transforms(self) + [writer_aux.Admonitions]
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = HTMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
for attr in self.visitor_attributes:
setattr(self, attr, getattr(visitor, attr))
self.output = self.apply_template()
def apply_template(self):
template_file = open(self.document.settings.template, 'rb')
template = unicode(template_file.read(), 'utf-8')
template_file.close()
subs = self.interpolation_dict()
return template % subs
def interpolation_dict(self):
subs = {}
settings = self.document.settings
for attr in self.visitor_attributes:
subs[attr] = ''.join(getattr(self, attr)).rstrip('\n')
subs['encoding'] = settings.output_encoding
subs['version'] = docutils.__version__
return subs
def assemble_parts(self):
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
self.parts[part] = ''.join(getattr(self, part))
class HTMLTranslator(nodes.NodeVisitor):
"""
This HTML writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in browsers (although they really
shouldn't).
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
xml_declaration = '<?xml version="1.0" encoding="%s" ?>\n'
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
head_prefix_template = ('<html xmlns="http://www.w3.org/1999/xhtml"'
' xml:lang="%s" lang="%s">\n<head>\n')
content_type = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
generator = ('<meta name="generator" content="Docutils %s: '
'http://docutils.sourceforge.net/" />\n')
stylesheet_link = '<link rel="stylesheet" href="%s" type="text/css" />\n'
embedded_stylesheet = '<style type="text/css">\n\n%s\n</style>\n'
words_and_spaces = re.compile(r'\S+| +|\n')
sollbruchstelle = re.compile(r'.+\W\W.+|[-?].+', re.U) # wrap point inside word
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode)
self.meta = [self.content_type % settings.output_encoding,
self.generator % docutils.__version__]
self.head_prefix = []
self.html_prolog = []
if settings.xml_declaration:
self.head_prefix.append(self.xml_declaration
% settings.output_encoding)
# encoding not interpolated:
self.html_prolog.append(self.xml_declaration)
self.head_prefix.extend([self.doctype,
self.head_prefix_template % (lcode, lcode)])
self.html_prolog.append(self.doctype)
self.head = self.meta[:]
# stylesheets
styles = utils.get_stylesheet_list(settings)
if settings.stylesheet_path and not(settings.embed_stylesheet):
styles = [utils.relative_path(settings._destination, sheet)
for sheet in styles]
if settings.embed_stylesheet:
settings.record_dependencies.add(*styles)
self.stylesheet = [self.embedded_stylesheet %
unicode(open(sheet).read(), 'utf-8')
for sheet in styles]
else: # link to stylesheets
self.stylesheet = [self.stylesheet_link % self.encode(stylesheet)
for stylesheet in styles]
self.body_prefix = ['</head>\n<body>\n']
# document title, subtitle display
self.body_pre_docinfo = []
# author, date, etc.
self.docinfo = []
self.body = []
self.fragment = []
self.body_suffix = ['</body>\n</html>\n']
self.section_level = 0
self.initial_header_level = int(settings.initial_header_level)
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
self.topic_classes = []
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
self.compact_field_list = None
self.in_docinfo = None
self.in_sidebar = None
self.title = []
self.subtitle = []
self.header = []
self.footer = []
self.html_head = [self.content_type] # charset not interpolated
self.html_title = []
self.html_subtitle = []
self.html_body = []
self.in_document_title = 0
self.in_mailto = 0
self.author_in_authors = None
def astext(self):
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
+ self.body_pre_docinfo + self.docinfo
+ self.body + self.body_suffix)
def encode(self, text):
"""Encode special characters in `text` & return."""
# @@@ A codec to do these and all other HTML entities would be nice.
text = unicode(text)
return text.translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
ord('@'): u'@', # may thwart some address harvesters
# TODO: convert non-breaking space only if needed?
0xa0: u' '}) # non-breaking space
def cloak_mailto(self, uri):
"""Try to hide a mailto: URL from harvesters."""
# Encode "@" using a URL octet reference (see RFC 1738).
# Further cloaking with HTML entities will be done in the
# `attval` function.
return uri.replace('@', '%40')
def cloak_email(self, addr):
"""Try to hide the link text of a email link from harversters."""
# Surround at-signs and periods with <span> tags. ("@" has
# already been encoded to "@" by the `encode` method.)
addr = addr.replace('@', '<span>@</span>')
addr = addr.replace('.', '<span>.</span>')
return addr
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, HTML encode, and return attribute value text."""
encoded = self.encode(whitespace.sub(' ', text))
if self.in_mailto and self.settings.cloak_email_addresses:
# Cloak at-signs ("%40") and periods with HTML entities.
encoded = encoded.replace('%40', '%40')
encoded = encoded.replace('.', '.')
return encoded
def starttag(self, node, tagname, suffix='\n', empty=0, **attributes):
"""
Construct and return a start tag given a node (id & class attributes
are extracted), tag name, and optional attributes.
"""
tagname = tagname.lower()
prefix = []
atts = {}
ids = []
for (name, value) in attributes.items():
atts[name.lower()] = value
classes = node.get('classes', [])
if 'class' in atts:
classes.append(atts['class'])
if classes:
atts['class'] = ' '.join(classes)
assert 'id' not in atts
ids.extend(node.get('ids', []))
if 'ids' in atts:
ids.extend(atts['ids'])
del atts['ids']
if ids:
atts['id'] = ids[0]
for id in ids[1:]:
# Add empty "span" elements for additional IDs. Note
# that we cannot use empty "a" elements because there
# may be targets inside of references, but nested "a"
# elements aren't allowed in XHTML (even if they do
# not all have a "href" attribute).
if empty:
# Empty tag. Insert target right in front of element.
prefix.append('<span id="%s"></span>' % id)
else:
# Non-empty tag. Place the auxiliary <span> tag
# *inside* the element, as the first child.
suffix += '<span id="%s"></span>' % id
attlist = atts.items()
attlist.sort()
parts = [tagname]
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
values = [unicode(v) for v in value]
parts.append('%s="%s"' % (name.lower(),
self.attval(' '.join(values))))
else:
parts.append('%s="%s"' % (name.lower(),
self.attval(unicode(value))))
if empty:
infix = ' /'
else:
infix = ''
return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix
def emptytag(self, node, tagname, suffix='\n', **attributes):
"""Construct and return an XML-compatible empty tag."""
return self.starttag(node, tagname, suffix, empty=1, **attributes)
def set_class_on_child(self, node, class_, index=0):
"""
Set class `class_` on the visible child no. index of `node`.
Do nothing if node has fewer children than `index`.
"""
children = [n for n in node if not isinstance(n, nodes.Invisible)]
try:
child = children[index]
except IndexError:
return
child['classes'].append(class_)
def set_first_last(self, node):
self.set_class_on_child(node, 'first', 0)
self.set_class_on_child(node, 'last', -1)
def visit_Text(self, node):
text = node.astext()
encoded = self.encode(text)
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'acronym', ''))
def depart_acronym(self, node):
self.body.append('</acronym>')
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=None)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
def depart_address(self, node):
self.body.append('\n</pre>\n')
self.depart_docinfo_item()
def visit_admonition(self, node):
self.body.append(self.starttag(node, 'div'))
self.set_first_last(node)
def depart_admonition(self, node=None):
self.body.append('</div>\n')
attribution_formats = {'dash': ('—', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.context.append(suffix)
self.body.append(
self.starttag(node, 'p', prefix, CLASS='attribution'))
def depart_attribution(self, node):
self.body.append(self.context.pop() + '</p>\n')
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
if self.author_in_authors:
self.body.append('\n<br />')
else:
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
if isinstance(node.parent, nodes.authors):
self.author_in_authors += 1
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = 0 # initialize counter
def depart_authors(self, node):
self.depart_docinfo_item()
self.author_in_authors = None
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append('</blockquote>\n')
def check_simple_list(self, node):
"""Check for a simple list that can be rendered compactly."""
visitor = SimpleListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return None
else:
return 1
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
or self.check_simple_list(node))))
def visit_bullet_list(self, node):
atts = {}
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = 'simple'
self.body.append(self.starttag(node, 'ul', **atts))
def depart_bullet_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ul>\n')
def visit_caption(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
def depart_caption(self, node):
self.body.append('</p>\n')
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
href = '#' + node['refid']
self.body.append(self.starttag(
node, 'a', '[', CLASS='citation-reference', href=href))
def depart_citation_reference(self, node):
self.body.append(']</a>')
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
def depart_classifier(self, node):
self.body.append('</span>')
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
def write_colspecs(self):
width = 0
for node in self.colspecs:
width += node['colwidth']
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.colspecs = []
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
"""Escape double-dashes in comment text."""
self.body.append('<!-- %s -->\n' % sub('- ', node.astext()))
# Content already processed:
raise nodes.SkipNode
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div', CLASS='compound'))
if len(node) > 1:
node[0]['classes'].append('compound-first')
node[-1]['classes'].append('compound-last')
for child in node[1:-1]:
child['classes'].append('compound-middle')
def depart_compound(self, node):
self.body.append('</div>\n')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div', CLASS='container'))
def depart_container(self, node):
self.body.append('</div>\n')
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact', meta=None)
def depart_contact(self, node):
self.depart_docinfo_item()
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
def depart_definition(self, node):
self.body.append('</dd>\n')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
def depart_definition_list(self, node):
self.body.append('</dl>\n')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
def visit_docinfo(self, node):
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
'<tbody valign="top">\n')
self.in_docinfo = 1
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = None
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=1):
if meta:
meta_tag = '<meta name="%s" content="%s" />\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
def depart_doctest_block(self, node):
self.body.append('\n</pre>\n')
def visit_document(self, node):
self.head.append('<title>%s</title>\n'
% self.encode(node.get('title', '')))
def depart_document(self, node):
self.fragment.extend(self.body)
self.body_prefix.append(self.starttag(node, 'div', CLASS='document'))
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
assert not self.context, 'len(context) = %s' % len(self.context)
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em', ''))
def depart_emphasis(self, node):
self.body.append('</em>')
def visit_entry(self, node):
atts = {'class': []}
if isinstance(node.parent.parent, nodes.thead):
atts['class'].append('head')
if node.parent.parent.parent.stubs[node.parent.column]:
# "stubs" list is an attribute of the tgroup element
atts['class'].append('stub')
if atts['class']:
tagname = 'th'
atts['class'] = ' '.join(atts['class'])
else:
tagname = 'td'
del atts['class']
node.parent.column += 1
if 'morerows' in node:
atts['rowspan'] = node['morerows'] + 1
if 'morecols' in node:
atts['colspan'] = node['morecols'] + 1
node.parent.column += node['morecols']
self.body.append(self.starttag(node, tagname, '', **atts))
self.context.append('</%s>\n' % tagname.lower())
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
def depart_entry(self, node):
self.body.append(self.context.pop())
def visit_enumerated_list(self, node):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
CSS1 doesn't help. CSS2 isn't widely enough supported yet to be
usable.
"""
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = 1
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = 1
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = 0
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
def visit_figure(self, node):
atts = {'class': 'figure'}
if node.get('width'):
atts['style'] = 'width: %s' % node['width']
if node.get('align'):
atts['class'] += " align-" + node['align']
self.body.append(self.starttag(node, 'div', **atts))
def depart_figure(self, node):
self.body.append('</div>\n')
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = [self.starttag(node, 'div', CLASS='footer'),
'<hr class="footer" />\n']
footer.extend(self.body[start:])
footer.append('\n</div>\n')
self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
i = 1
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
header = [self.starttag(node, 'div', CLASS='header')]
header.extend(self.body[start:])
header.append('\n<hr class="header"/>\n</div>\n')
self.body_prefix.extend(header)
self.header.extend(header)
del self.body[start:]
def visit_image(self, node):
atts = {}
atts['src'] = node['uri']
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if Image and not ('width' in node
and 'height' in node):
try:
im = Image.open(str(atts['src']))
except (IOError, # Source image can't be found or opened
UnicodeError): # PIL doesn't like Unicode paths.
pass
else:
if 'width' not in atts:
atts['width'] = str(im.size[0])
if 'height' not in atts:
atts['height'] = str(im.size[1])
del im
for att_name in 'width', 'height':
if att_name in atts:
match = re.match(r'([0-9.]+)(\S*)$', atts[att_name])
assert match
atts[att_name] = '%s%s' % (
float(match.group(1)) * (float(node['scale']) / 100),
match.group(2))
style = []
for att_name in 'width', 'height':
if att_name in atts:
if re.match(r'^[0-9.]+$', atts[att_name]):
# Interpret unitless values as pixels.
atts[att_name] += 'px'
style.append('%s: %s;' % (att_name, atts[att_name]))
del atts[att_name]
if style:
atts['style'] = ' '.join(style)
atts['alt'] = node.get('alt', atts['src'])
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
if 'classes' in node and 'align-center' in node['classes']:
node['align'] = 'center'
if 'align' in node:
if node['align'] == 'center':
# "align" attribute is set in surrounding "div" element.
self.body.append('<div align="center" class="align-center">')
self.context.append('</div>\n')
suffix = ''
else:
# "align" attribute is set in "img" element.
atts['align'] = node['align']
self.context.append('')
atts['class'] = 'align-%s' % node['align']
else:
self.context.append('')
self.body.append(self.emptytag(node, 'img', suffix, **atts))
def depart_image(self, node):
self.body.append(self.context.pop())
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span', ''))
def depart_inline(self, node):
self.body.append('</span>')
def visit_label(self, node):
# Context added in footnote_backrefs.
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(']%s</td><td>%s' % (self.context.pop(), self.context.pop()))
def visit_legend(self, node):
self.body.append(self.starttag(node, 'div', CLASS='legend'))
def depart_legend(self, node):
self.body.append('</div>\n')
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', suffix='', CLASS='line'))
if not len(node):
self.body.append('<br />')
def depart_line(self, node):
self.body.append('</div>\n')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append('</div>\n')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
node[0]['classes'].append('first')
def depart_list_item(self, node):
self.body.append('</li>\n')
def visit_literal(self, node):
"""Process text to prevent tokens from wrapping."""
self.body.append(
self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.sollbruchstelle.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</tt>')
# Content already processed:
raise nodes.SkipNode
def visit_literal_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='literal-block'))
def depart_literal_block(self, node):
self.body.append('\n</pre>\n')
def visit_meta(self, node):
meta = self.emptytag(node, 'meta', **node.non_default_attributes())
self.add_meta(meta)
def depart_meta(self, node):
pass
def add_meta(self, tag):
self.meta.append(tag)
self.head.append(tag)
def visit_option(self, node):
if self.context[-1]:
self.body.append(', ')
self.body.append(self.starttag(node, 'span', '', CLASS='option'))
def depart_option(self, node):
self.body.append('</span>')
self.context[-1] += 1
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
self.body.append(self.starttag(node, 'var', ''))
def depart_option_argument(self, node):
self.body.append('</var>')
def visit_option_group(self, node):
atts = {}
if ( self.settings.option_limit
and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(
self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
def depart_option_group(self, node):
self.context.pop()
self.body.append('</kbd></td>\n')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append(
self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
'<tbody valign="top">\n')
def depart_option_list(self, node):
self.body.append('</tbody>\n</table>\n')
def visit_option_list_item(self, node):
self.body.append(self.starttag(node, 'tr', ''))
def depart_option_list_item(self, node):
self.body.append('</tr>\n')
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item()
def should_be_compact_paragraph(self, node):
"""
Determine if the <p> tags around paragraph ``node`` can be omitted.
"""
if (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.compound)):
# Never compact paragraphs in document or compound.
return 0
for key, value in node.attlist():
if (node.is_not_default(key) and
not (key == 'classes' and value in
([], ['first'], ['last'], ['first', 'last']))):
# Attribute which needs to survive.
return 0
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return 0
parent_length = len([n for n in node.parent if not isinstance(
n, (nodes.Invisible, nodes.label))])
if ( self.compact_simple
or self.compact_field_list
or self.compact_p and parent_length == 1):
return 1
return 0
def visit_paragraph(self, node):
if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
self.context.append('</p>\n')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_problematic(self, node):
if node.hasattr('refid'):
self.body.append('<a href="#%s">' % node['refid'])
self.context.append('</a>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'span', '', CLASS='problematic'))
def depart_problematic(self, node):
self.body.append('</span>')
self.body.append(self.context.pop())
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = isinstance(node.parent, nodes.TextElement) and 'span' or 'div'
if node['classes']:
self.body.append(self.starttag(node, t, suffix=''))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
atts = {'class': 'reference'}
if 'refuri' in node:
atts['href'] = node['refuri']
if ( self.settings.cloak_email_addresses
and atts['href'].startswith('mailto:')):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = 1
atts['class'] += ' external'
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
atts['class'] += ' internal'
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
self.body.append(self.starttag(node, 'a', '', **atts))
def depart_reference(self, node):
self.body.append('</a>')
if not isinstance(node.parent, nodes.TextElement):
self.body.append('\n')
self.in_mailto = 0
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision', meta=None)
def depart_revision(self, node):
self.depart_docinfo_item()
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0
def depart_row(self, node):
self.body.append('</tr>\n')
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='rubric'))
def depart_rubric(self, node):
self.body.append('</p>\n')
def visit_section(self, node):
self.section_level += 1
self.body.append(
self.starttag(node, 'div', CLASS='section'))
def depart_section(self, node):
self.section_level -= 1
self.body.append('</div>\n')
def visit_sidebar(self, node):
self.body.append(
self.starttag(node, 'div', CLASS='sidebar'))
self.set_first_last(node)
self.in_sidebar = 1
def depart_sidebar(self, node):
self.body.append('</div>\n')
self.in_sidebar = None
def visit_status(self, node):
self.visit_docinfo_item(node, 'status', meta=None)
def depart_status(self, node):
self.depart_docinfo_item()
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong', ''))
def depart_strong(self, node):
self.body.append('</strong>')
def visit_subscript(self, node):
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
self.body.append('</sub>')
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
self.in_document_title = len(self.body)
elif isinstance(node.parent, nodes.section):
tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
self.body.append(
self.starttag(node, tag, '', CLASS='section-subtitle') +
self.starttag({}, 'span', '', CLASS='section-subtitle'))
self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.subtitle = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_subtitle.extend(self.body)
del self.body[:]
def visit_superscript(self, node):
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
self.body.append('</sup>')
def visit_system_message(self, node):
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
backref_text = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
% backrefs[0])
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a href="#%s">%s</a>' % (backref, i))
i += 1
backref_text = ('; <em>backlinks: %s</em>'
% ', '.join(backlinks))
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s '
'(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (node['type'], node['level'],
self.encode(node['source']), line, backref_text))
def depart_system_message(self, node):
self.body.append('</div>\n')
def visit_table(self, node):
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="1"))
def depart_table(self, node):
self.body.append('</table>\n')
def visit_target(self, node):
if not ('refuri' in node or 'refid' in node
or 'refname' in node):
self.body.append(self.starttag(node, 'span', '', CLASS='target'))
self.context.append('</span>')
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n' or ''
self.body.append(self.starttag(node, 'tbody', valign='top'))
def depart_tbody(self, node):
self.body.append('</tbody>\n')
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt', ''))
def depart_term(self, node):
"""
Leave the end tag to `self.visit_definition()`, in case there's a
classifier.
"""
pass
def visit_tgroup(self, node):
# Mozilla needs <colgroup>:
self.body.append(self.starttag(node, 'colgroup'))
# Appended by thead or tbody:
self.context.append('</colgroup>\n')
node.stubs = []
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n'
# There may or may not be a <thead>; this is for <tbody> to use:
self.context.append('')
self.body.append(self.starttag(node, 'thead', valign='bottom'))
def depart_thead(self, node):
self.body.append('</thead>\n')
def visit_title(self, node):
"""Only 6 section levels are supported by HTML."""
check_id = 0
close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
self.body.append(
self.starttag(node, 'p', '', CLASS='topic-title first'))
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
elif isinstance(node.parent, nodes.table):
self.body.append(
self.starttag(node, 'caption', ''))
close_tag = '</caption>\n'
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1', '', CLASS='title'))
close_tag = '</h1>\n'
self.in_document_title = len(self.body)
else:
assert isinstance(node.parent, nodes.section)
h_level = self.section_level + self.initial_header_level - 1
atts = {}
if (len(node.parent) >= 2 and
isinstance(node.parent[1], nodes.subtitle)):
atts['CLASS'] = 'with-subtitle'
self.body.append(
self.starttag(node, 'h%s' % h_level, '', **atts))
atts = {}
if node.hasattr('refid'):
atts['class'] = 'toc-backref'
atts['href'] = '#' + node['refid']
if atts:
self.body.append(self.starttag({}, 'a', '', **atts))
close_tag = '</a></h%s>\n' % (h_level)
else:
close_tag = '</h%s>\n' % (h_level)
self.context.append(close_tag)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.title = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_title.extend(self.body)
del self.body[:]
def visit_title_reference(self, node):
self.body.append(self.starttag(node, 'cite', ''))
def depart_title_reference(self, node):
self.body.append('</cite>')
def visit_topic(self, node):
self.body.append(self.starttag(node, 'div', CLASS='topic'))
self.topic_classes = node['classes']
def depart_topic(self, node):
self.body.append('</div>\n')
self.topic_classes = []
def visit_transition(self, node):
self.body.append(self.emptytag(node, 'hr', CLASS='docutils'))
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version', meta=None)
def depart_version(self, node):
self.depart_docinfo_item()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
class SimpleListChecker(nodes.GenericNodeVisitor):
"""
Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def default_visit(self, node):
raise nodes.NodeFound
def visit_bullet_list(self, node):
pass
def visit_enumerated_list(self, node):
pass
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
and (isinstance(children[-1], nodes.bullet_list)
or isinstance(children[-1], nodes.enumerated_list))):
children.pop()
if len(children) <= 1:
return
else:
raise nodes.NodeFound
def visit_paragraph(self, node):
raise nodes.SkipNode
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
raise nodes.SkipNode
visit_comment = invisible_visit
visit_substitution_definition = invisible_visit
visit_target = invisible_visit
visit_pending = invisible_visit
| Python |
# $Id: null.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A do-nothing Writer.
"""
from docutils import writers
class Writer(writers.UnfilteredWriter):
supported = ('null',)
"""Formats this writer supports."""
config_section = 'null writer'
config_section_dependencies = ('writers',)
def translate(self):
pass
| Python |
# $Id$
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This file has been placed in the public domain.
# This is a mapping of Unicode characters to LaTeX equivalents.
# The information has been extracted from
# <http://www.w3.org/2003/entities/xml/unicode.xml>, written by
# David Carlisle and Sebastian Rahtz.
#
# The extraction has been done by the "create_unimap.py" script
# located at <http://docutils.sf.net/tools/dev/create_unimap.py>.
unicode_map = {u'\xa0': '$~$',
u'\xa1': '{\\textexclamdown}',
u'\xa2': '{\\textcent}',
u'\xa3': '{\\textsterling}',
u'\xa4': '{\\textcurrency}',
u'\xa5': '{\\textyen}',
u'\xa6': '{\\textbrokenbar}',
u'\xa7': '{\\textsection}',
u'\xa8': '{\\textasciidieresis}',
u'\xa9': '{\\textcopyright}',
u'\xaa': '{\\textordfeminine}',
u'\xab': '{\\guillemotleft}',
u'\xac': '$\\lnot$',
u'\xad': '$\\-$',
u'\xae': '{\\textregistered}',
u'\xaf': '{\\textasciimacron}',
u'\xb0': '{\\textdegree}',
u'\xb1': '$\\pm$',
u'\xb2': '${^2}$',
u'\xb3': '${^3}$',
u'\xb4': '{\\textasciiacute}',
u'\xb5': '$\\mathrm{\\mu}$',
u'\xb6': '{\\textparagraph}',
u'\xb7': '$\\cdot$',
u'\xb8': '{\\c{}}',
u'\xb9': '${^1}$',
u'\xba': '{\\textordmasculine}',
u'\xbb': '{\\guillemotright}',
u'\xbc': '{\\textonequarter}',
u'\xbd': '{\\textonehalf}',
u'\xbe': '{\\textthreequarters}',
u'\xbf': '{\\textquestiondown}',
u'\xc0': '{\\`{A}}',
u'\xc1': "{\\'{A}}",
u'\xc2': '{\\^{A}}',
u'\xc3': '{\\~{A}}',
u'\xc4': '{\\"{A}}',
u'\xc5': '{\\AA}',
u'\xc6': '{\\AE}',
u'\xc7': '{\\c{C}}',
u'\xc8': '{\\`{E}}',
u'\xc9': "{\\'{E}}",
u'\xca': '{\\^{E}}',
u'\xcb': '{\\"{E}}',
u'\xcc': '{\\`{I}}',
u'\xcd': "{\\'{I}}",
u'\xce': '{\\^{I}}',
u'\xcf': '{\\"{I}}',
u'\xd0': '{\\DH}',
u'\xd1': '{\\~{N}}',
u'\xd2': '{\\`{O}}',
u'\xd3': "{\\'{O}}",
u'\xd4': '{\\^{O}}',
u'\xd5': '{\\~{O}}',
u'\xd6': '{\\"{O}}',
u'\xd7': '{\\texttimes}',
u'\xd8': '{\\O}',
u'\xd9': '{\\`{U}}',
u'\xda': "{\\'{U}}",
u'\xdb': '{\\^{U}}',
u'\xdc': '{\\"{U}}',
u'\xdd': "{\\'{Y}}",
u'\xde': '{\\TH}',
u'\xdf': '{\\ss}',
u'\xe0': '{\\`{a}}',
u'\xe1': "{\\'{a}}",
u'\xe2': '{\\^{a}}',
u'\xe3': '{\\~{a}}',
u'\xe4': '{\\"{a}}',
u'\xe5': '{\\aa}',
u'\xe6': '{\\ae}',
u'\xe7': '{\\c{c}}',
u'\xe8': '{\\`{e}}',
u'\xe9': "{\\'{e}}",
u'\xea': '{\\^{e}}',
u'\xeb': '{\\"{e}}',
u'\xec': '{\\`{\\i}}',
u'\xed': "{\\'{\\i}}",
u'\xee': '{\\^{\\i}}',
u'\xef': '{\\"{\\i}}',
u'\xf0': '{\\dh}',
u'\xf1': '{\\~{n}}',
u'\xf2': '{\\`{o}}',
u'\xf3': "{\\'{o}}",
u'\xf4': '{\\^{o}}',
u'\xf5': '{\\~{o}}',
u'\xf6': '{\\"{o}}',
u'\xf7': '$\\div$',
u'\xf8': '{\\o}',
u'\xf9': '{\\`{u}}',
u'\xfa': "{\\'{u}}",
u'\xfb': '{\\^{u}}',
u'\xfc': '{\\"{u}}',
u'\xfd': "{\\'{y}}",
u'\xfe': '{\\th}',
u'\xff': '{\\"{y}}',
u'\u0100': '{\\={A}}',
u'\u0101': '{\\={a}}',
u'\u0102': '{\\u{A}}',
u'\u0103': '{\\u{a}}',
u'\u0104': '{\\k{A}}',
u'\u0105': '{\\k{a}}',
u'\u0106': "{\\'{C}}",
u'\u0107': "{\\'{c}}",
u'\u0108': '{\\^{C}}',
u'\u0109': '{\\^{c}}',
u'\u010a': '{\\.{C}}',
u'\u010b': '{\\.{c}}',
u'\u010c': '{\\v{C}}',
u'\u010d': '{\\v{c}}',
u'\u010e': '{\\v{D}}',
u'\u010f': '{\\v{d}}',
u'\u0110': '{\\DJ}',
u'\u0111': '{\\dj}',
u'\u0112': '{\\={E}}',
u'\u0113': '{\\={e}}',
u'\u0114': '{\\u{E}}',
u'\u0115': '{\\u{e}}',
u'\u0116': '{\\.{E}}',
u'\u0117': '{\\.{e}}',
u'\u0118': '{\\k{E}}',
u'\u0119': '{\\k{e}}',
u'\u011a': '{\\v{E}}',
u'\u011b': '{\\v{e}}',
u'\u011c': '{\\^{G}}',
u'\u011d': '{\\^{g}}',
u'\u011e': '{\\u{G}}',
u'\u011f': '{\\u{g}}',
u'\u0120': '{\\.{G}}',
u'\u0121': '{\\.{g}}',
u'\u0122': '{\\c{G}}',
u'\u0123': '{\\c{g}}',
u'\u0124': '{\\^{H}}',
u'\u0125': '{\\^{h}}',
u'\u0126': '{{\\fontencoding{LELA}\\selectfont\\char40}}',
u'\u0127': '$\\Elzxh$',
u'\u0128': '{\\~{I}}',
u'\u0129': '{\\~{\\i}}',
u'\u012a': '{\\={I}}',
u'\u012b': '{\\={\\i}}',
u'\u012c': '{\\u{I}}',
u'\u012d': '{\\u{\\i}}',
u'\u012e': '{\\k{I}}',
u'\u012f': '{\\k{i}}',
u'\u0130': '{\\.{I}}',
u'\u0131': '{\\i}',
u'\u0132': '{IJ}',
u'\u0133': '{ij}',
u'\u0134': '{\\^{J}}',
u'\u0135': '{\\^{\\j}}',
u'\u0136': '{\\c{K}}',
u'\u0137': '{\\c{k}}',
u'\u0138': '{{\\fontencoding{LELA}\\selectfont\\char91}}',
u'\u0139': "{\\'{L}}",
u'\u013a': "{\\'{l}}",
u'\u013b': '{\\c{L}}',
u'\u013c': '{\\c{l}}',
u'\u013d': '{\\v{L}}',
u'\u013e': '{\\v{l}}',
u'\u013f': '{{\\fontencoding{LELA}\\selectfont\\char201}}',
u'\u0140': '{{\\fontencoding{LELA}\\selectfont\\char202}}',
u'\u0141': '{\\L}',
u'\u0142': '{\\l}',
u'\u0143': "{\\'{N}}",
u'\u0144': "{\\'{n}}",
u'\u0145': '{\\c{N}}',
u'\u0146': '{\\c{n}}',
u'\u0147': '{\\v{N}}',
u'\u0148': '{\\v{n}}',
u'\u0149': "{'n}",
u'\u014a': '{\\NG}',
u'\u014b': '{\\ng}',
u'\u014c': '{\\={O}}',
u'\u014d': '{\\={o}}',
u'\u014e': '{\\u{O}}',
u'\u014f': '{\\u{o}}',
u'\u0150': '{\\H{O}}',
u'\u0151': '{\\H{o}}',
u'\u0152': '{\\OE}',
u'\u0153': '{\\oe}',
u'\u0154': "{\\'{R}}",
u'\u0155': "{\\'{r}}",
u'\u0156': '{\\c{R}}',
u'\u0157': '{\\c{r}}',
u'\u0158': '{\\v{R}}',
u'\u0159': '{\\v{r}}',
u'\u015a': "{\\'{S}}",
u'\u015b': "{\\'{s}}",
u'\u015c': '{\\^{S}}',
u'\u015d': '{\\^{s}}',
u'\u015e': '{\\c{S}}',
u'\u015f': '{\\c{s}}',
u'\u0160': '{\\v{S}}',
u'\u0161': '{\\v{s}}',
u'\u0162': '{\\c{T}}',
u'\u0163': '{\\c{t}}',
u'\u0164': '{\\v{T}}',
u'\u0165': '{\\v{t}}',
u'\u0166': '{{\\fontencoding{LELA}\\selectfont\\char47}}',
u'\u0167': '{{\\fontencoding{LELA}\\selectfont\\char63}}',
u'\u0168': '{\\~{U}}',
u'\u0169': '{\\~{u}}',
u'\u016a': '{\\={U}}',
u'\u016b': '{\\={u}}',
u'\u016c': '{\\u{U}}',
u'\u016d': '{\\u{u}}',
u'\u016e': '{\\r{U}}',
u'\u016f': '{\\r{u}}',
u'\u0170': '{\\H{U}}',
u'\u0171': '{\\H{u}}',
u'\u0172': '{\\k{U}}',
u'\u0173': '{\\k{u}}',
u'\u0174': '{\\^{W}}',
u'\u0175': '{\\^{w}}',
u'\u0176': '{\\^{Y}}',
u'\u0177': '{\\^{y}}',
u'\u0178': '{\\"{Y}}',
u'\u0179': "{\\'{Z}}",
u'\u017a': "{\\'{z}}",
u'\u017b': '{\\.{Z}}',
u'\u017c': '{\\.{z}}',
u'\u017d': '{\\v{Z}}',
u'\u017e': '{\\v{z}}',
u'\u0192': '$f$',
u'\u0195': '{\\texthvlig}',
u'\u019e': '{\\textnrleg}',
u'\u01aa': '$\\eth$',
u'\u01ba': '{{\\fontencoding{LELA}\\selectfont\\char195}}',
u'\u01c2': '{\\textdoublepipe}',
u'\u01f5': "{\\'{g}}",
u'\u0250': '$\\Elztrna$',
u'\u0252': '$\\Elztrnsa$',
u'\u0254': '$\\Elzopeno$',
u'\u0256': '$\\Elzrtld$',
u'\u0258': '{{\\fontencoding{LEIP}\\selectfont\\char61}}',
u'\u0259': '$\\Elzschwa$',
u'\u025b': '$\\varepsilon$',
u'\u0261': '{g}',
u'\u0263': '$\\Elzpgamma$',
u'\u0264': '$\\Elzpbgam$',
u'\u0265': '$\\Elztrnh$',
u'\u026c': '$\\Elzbtdl$',
u'\u026d': '$\\Elzrtll$',
u'\u026f': '$\\Elztrnm$',
u'\u0270': '$\\Elztrnmlr$',
u'\u0271': '$\\Elzltlmr$',
u'\u0272': '{\\Elzltln}',
u'\u0273': '$\\Elzrtln$',
u'\u0277': '$\\Elzclomeg$',
u'\u0278': '{\\textphi}',
u'\u0279': '$\\Elztrnr$',
u'\u027a': '$\\Elztrnrl$',
u'\u027b': '$\\Elzrttrnr$',
u'\u027c': '$\\Elzrl$',
u'\u027d': '$\\Elzrtlr$',
u'\u027e': '$\\Elzfhr$',
u'\u027f': '{{\\fontencoding{LEIP}\\selectfont\\char202}}',
u'\u0282': '$\\Elzrtls$',
u'\u0283': '$\\Elzesh$',
u'\u0287': '$\\Elztrnt$',
u'\u0288': '$\\Elzrtlt$',
u'\u028a': '$\\Elzpupsil$',
u'\u028b': '$\\Elzpscrv$',
u'\u028c': '$\\Elzinvv$',
u'\u028d': '$\\Elzinvw$',
u'\u028e': '$\\Elztrny$',
u'\u0290': '$\\Elzrtlz$',
u'\u0292': '$\\Elzyogh$',
u'\u0294': '$\\Elzglst$',
u'\u0295': '$\\Elzreglst$',
u'\u0296': '$\\Elzinglst$',
u'\u029e': '{\\textturnk}',
u'\u02a4': '$\\Elzdyogh$',
u'\u02a7': '$\\Elztesh$',
u'\u02bc': "{'}",
u'\u02c7': '{\\textasciicaron}',
u'\u02c8': '$\\Elzverts$',
u'\u02cc': '$\\Elzverti$',
u'\u02d0': '$\\Elzlmrk$',
u'\u02d1': '$\\Elzhlmrk$',
u'\u02d2': '$\\Elzsbrhr$',
u'\u02d3': '$\\Elzsblhr$',
u'\u02d4': '$\\Elzrais$',
u'\u02d5': '$\\Elzlow$',
u'\u02d8': '{\\textasciibreve}',
u'\u02d9': '{\\textperiodcentered}',
u'\u02da': '{\\r{}}',
u'\u02db': '{\\k{}}',
u'\u02dc': '{\\texttildelow}',
u'\u02dd': '{\\H{}}',
u'\u02e5': '{\\tone{55}}',
u'\u02e6': '{\\tone{44}}',
u'\u02e7': '{\\tone{33}}',
u'\u02e8': '{\\tone{22}}',
u'\u02e9': '{\\tone{11}}',
u'\u0300': '{\\`}',
u'\u0301': "{\\'}",
u'\u0302': '{\\^}',
u'\u0303': '{\\~}',
u'\u0304': '{\\=}',
u'\u0306': '{\\u}',
u'\u0307': '{\\.}',
u'\u0308': '{\\"}',
u'\u030a': '{\\r}',
u'\u030b': '{\\H}',
u'\u030c': '{\\v}',
u'\u030f': '{\\cyrchar\\C}',
u'\u0311': '{{\\fontencoding{LECO}\\selectfont\\char177}}',
u'\u0318': '{{\\fontencoding{LECO}\\selectfont\\char184}}',
u'\u0319': '{{\\fontencoding{LECO}\\selectfont\\char185}}',
u'\u0321': '$\\Elzpalh$',
u'\u0322': '{\\Elzrh}',
u'\u0327': '{\\c}',
u'\u0328': '{\\k}',
u'\u032a': '$\\Elzsbbrg$',
u'\u032b': '{{\\fontencoding{LECO}\\selectfont\\char203}}',
u'\u032f': '{{\\fontencoding{LECO}\\selectfont\\char207}}',
u'\u0335': '{\\Elzxl}',
u'\u0336': '{\\Elzbar}',
u'\u0337': '{{\\fontencoding{LECO}\\selectfont\\char215}}',
u'\u0338': '{{\\fontencoding{LECO}\\selectfont\\char216}}',
u'\u033a': '{{\\fontencoding{LECO}\\selectfont\\char218}}',
u'\u033b': '{{\\fontencoding{LECO}\\selectfont\\char219}}',
u'\u033c': '{{\\fontencoding{LECO}\\selectfont\\char220}}',
u'\u033d': '{{\\fontencoding{LECO}\\selectfont\\char221}}',
u'\u0361': '{{\\fontencoding{LECO}\\selectfont\\char225}}',
u'\u0386': "{\\'{A}}",
u'\u0388': "{\\'{E}}",
u'\u0389': "{\\'{H}}",
u'\u038a': "{\\'{}{I}}",
u'\u038c': "{\\'{}O}",
u'\u038e': "$\\mathrm{'Y}$",
u'\u038f': "$\\mathrm{'\\Omega}$",
u'\u0390': '$\\acute{\\ddot{\\iota}}$',
u'\u0391': '$\\Alpha$',
u'\u0392': '$\\Beta$',
u'\u0393': '$\\Gamma$',
u'\u0394': '$\\Delta$',
u'\u0395': '$\\Epsilon$',
u'\u0396': '$\\Zeta$',
u'\u0397': '$\\Eta$',
u'\u0398': '$\\Theta$',
u'\u0399': '$\\Iota$',
u'\u039a': '$\\Kappa$',
u'\u039b': '$\\Lambda$',
u'\u039c': '$M$',
u'\u039d': '$N$',
u'\u039e': '$\\Xi$',
u'\u039f': '$O$',
u'\u03a0': '$\\Pi$',
u'\u03a1': '$\\Rho$',
u'\u03a3': '$\\Sigma$',
u'\u03a4': '$\\Tau$',
u'\u03a5': '$\\Upsilon$',
u'\u03a6': '$\\Phi$',
u'\u03a7': '$\\Chi$',
u'\u03a8': '$\\Psi$',
u'\u03a9': '$\\Omega$',
u'\u03aa': '$\\mathrm{\\ddot{I}}$',
u'\u03ab': '$\\mathrm{\\ddot{Y}}$',
u'\u03ac': "{\\'{$\\alpha$}}",
u'\u03ad': '$\\acute{\\epsilon}$',
u'\u03ae': '$\\acute{\\eta}$',
u'\u03af': '$\\acute{\\iota}$',
u'\u03b0': '$\\acute{\\ddot{\\upsilon}}$',
u'\u03b1': '$\\alpha$',
u'\u03b2': '$\\beta$',
u'\u03b3': '$\\gamma$',
u'\u03b4': '$\\delta$',
u'\u03b5': '$\\epsilon$',
u'\u03b6': '$\\zeta$',
u'\u03b7': '$\\eta$',
u'\u03b8': '{\\texttheta}',
u'\u03b9': '$\\iota$',
u'\u03ba': '$\\kappa$',
u'\u03bb': '$\\lambda$',
u'\u03bc': '$\\mu$',
u'\u03bd': '$\\nu$',
u'\u03be': '$\\xi$',
u'\u03bf': '$o$',
u'\u03c0': '$\\pi$',
u'\u03c1': '$\\rho$',
u'\u03c2': '$\\varsigma$',
u'\u03c3': '$\\sigma$',
u'\u03c4': '$\\tau$',
u'\u03c5': '$\\upsilon$',
u'\u03c6': '$\\varphi$',
u'\u03c7': '$\\chi$',
u'\u03c8': '$\\psi$',
u'\u03c9': '$\\omega$',
u'\u03ca': '$\\ddot{\\iota}$',
u'\u03cb': '$\\ddot{\\upsilon}$',
u'\u03cc': "{\\'{o}}",
u'\u03cd': '$\\acute{\\upsilon}$',
u'\u03ce': '$\\acute{\\omega}$',
u'\u03d0': '{\\Pisymbol{ppi022}{87}}',
u'\u03d1': '{\\textvartheta}',
u'\u03d2': '$\\Upsilon$',
u'\u03d5': '$\\phi$',
u'\u03d6': '$\\varpi$',
u'\u03da': '$\\Stigma$',
u'\u03dc': '$\\Digamma$',
u'\u03dd': '$\\digamma$',
u'\u03de': '$\\Koppa$',
u'\u03e0': '$\\Sampi$',
u'\u03f0': '$\\varkappa$',
u'\u03f1': '$\\varrho$',
u'\u03f4': '{\\textTheta}',
u'\u03f6': '$\\backepsilon$',
u'\u0401': '{\\cyrchar\\CYRYO}',
u'\u0402': '{\\cyrchar\\CYRDJE}',
u'\u0403': "{\\cyrchar{\\'\\CYRG}}",
u'\u0404': '{\\cyrchar\\CYRIE}',
u'\u0405': '{\\cyrchar\\CYRDZE}',
u'\u0406': '{\\cyrchar\\CYRII}',
u'\u0407': '{\\cyrchar\\CYRYI}',
u'\u0408': '{\\cyrchar\\CYRJE}',
u'\u0409': '{\\cyrchar\\CYRLJE}',
u'\u040a': '{\\cyrchar\\CYRNJE}',
u'\u040b': '{\\cyrchar\\CYRTSHE}',
u'\u040c': "{\\cyrchar{\\'\\CYRK}}",
u'\u040e': '{\\cyrchar\\CYRUSHRT}',
u'\u040f': '{\\cyrchar\\CYRDZHE}',
u'\u0410': '{\\cyrchar\\CYRA}',
u'\u0411': '{\\cyrchar\\CYRB}',
u'\u0412': '{\\cyrchar\\CYRV}',
u'\u0413': '{\\cyrchar\\CYRG}',
u'\u0414': '{\\cyrchar\\CYRD}',
u'\u0415': '{\\cyrchar\\CYRE}',
u'\u0416': '{\\cyrchar\\CYRZH}',
u'\u0417': '{\\cyrchar\\CYRZ}',
u'\u0418': '{\\cyrchar\\CYRI}',
u'\u0419': '{\\cyrchar\\CYRISHRT}',
u'\u041a': '{\\cyrchar\\CYRK}',
u'\u041b': '{\\cyrchar\\CYRL}',
u'\u041c': '{\\cyrchar\\CYRM}',
u'\u041d': '{\\cyrchar\\CYRN}',
u'\u041e': '{\\cyrchar\\CYRO}',
u'\u041f': '{\\cyrchar\\CYRP}',
u'\u0420': '{\\cyrchar\\CYRR}',
u'\u0421': '{\\cyrchar\\CYRS}',
u'\u0422': '{\\cyrchar\\CYRT}',
u'\u0423': '{\\cyrchar\\CYRU}',
u'\u0424': '{\\cyrchar\\CYRF}',
u'\u0425': '{\\cyrchar\\CYRH}',
u'\u0426': '{\\cyrchar\\CYRC}',
u'\u0427': '{\\cyrchar\\CYRCH}',
u'\u0428': '{\\cyrchar\\CYRSH}',
u'\u0429': '{\\cyrchar\\CYRSHCH}',
u'\u042a': '{\\cyrchar\\CYRHRDSN}',
u'\u042b': '{\\cyrchar\\CYRERY}',
u'\u042c': '{\\cyrchar\\CYRSFTSN}',
u'\u042d': '{\\cyrchar\\CYREREV}',
u'\u042e': '{\\cyrchar\\CYRYU}',
u'\u042f': '{\\cyrchar\\CYRYA}',
u'\u0430': '{\\cyrchar\\cyra}',
u'\u0431': '{\\cyrchar\\cyrb}',
u'\u0432': '{\\cyrchar\\cyrv}',
u'\u0433': '{\\cyrchar\\cyrg}',
u'\u0434': '{\\cyrchar\\cyrd}',
u'\u0435': '{\\cyrchar\\cyre}',
u'\u0436': '{\\cyrchar\\cyrzh}',
u'\u0437': '{\\cyrchar\\cyrz}',
u'\u0438': '{\\cyrchar\\cyri}',
u'\u0439': '{\\cyrchar\\cyrishrt}',
u'\u043a': '{\\cyrchar\\cyrk}',
u'\u043b': '{\\cyrchar\\cyrl}',
u'\u043c': '{\\cyrchar\\cyrm}',
u'\u043d': '{\\cyrchar\\cyrn}',
u'\u043e': '{\\cyrchar\\cyro}',
u'\u043f': '{\\cyrchar\\cyrp}',
u'\u0440': '{\\cyrchar\\cyrr}',
u'\u0441': '{\\cyrchar\\cyrs}',
u'\u0442': '{\\cyrchar\\cyrt}',
u'\u0443': '{\\cyrchar\\cyru}',
u'\u0444': '{\\cyrchar\\cyrf}',
u'\u0445': '{\\cyrchar\\cyrh}',
u'\u0446': '{\\cyrchar\\cyrc}',
u'\u0447': '{\\cyrchar\\cyrch}',
u'\u0448': '{\\cyrchar\\cyrsh}',
u'\u0449': '{\\cyrchar\\cyrshch}',
u'\u044a': '{\\cyrchar\\cyrhrdsn}',
u'\u044b': '{\\cyrchar\\cyrery}',
u'\u044c': '{\\cyrchar\\cyrsftsn}',
u'\u044d': '{\\cyrchar\\cyrerev}',
u'\u044e': '{\\cyrchar\\cyryu}',
u'\u044f': '{\\cyrchar\\cyrya}',
u'\u0451': '{\\cyrchar\\cyryo}',
u'\u0452': '{\\cyrchar\\cyrdje}',
u'\u0453': "{\\cyrchar{\\'\\cyrg}}",
u'\u0454': '{\\cyrchar\\cyrie}',
u'\u0455': '{\\cyrchar\\cyrdze}',
u'\u0456': '{\\cyrchar\\cyrii}',
u'\u0457': '{\\cyrchar\\cyryi}',
u'\u0458': '{\\cyrchar\\cyrje}',
u'\u0459': '{\\cyrchar\\cyrlje}',
u'\u045a': '{\\cyrchar\\cyrnje}',
u'\u045b': '{\\cyrchar\\cyrtshe}',
u'\u045c': "{\\cyrchar{\\'\\cyrk}}",
u'\u045e': '{\\cyrchar\\cyrushrt}',
u'\u045f': '{\\cyrchar\\cyrdzhe}',
u'\u0460': '{\\cyrchar\\CYROMEGA}',
u'\u0461': '{\\cyrchar\\cyromega}',
u'\u0462': '{\\cyrchar\\CYRYAT}',
u'\u0464': '{\\cyrchar\\CYRIOTE}',
u'\u0465': '{\\cyrchar\\cyriote}',
u'\u0466': '{\\cyrchar\\CYRLYUS}',
u'\u0467': '{\\cyrchar\\cyrlyus}',
u'\u0468': '{\\cyrchar\\CYRIOTLYUS}',
u'\u0469': '{\\cyrchar\\cyriotlyus}',
u'\u046a': '{\\cyrchar\\CYRBYUS}',
u'\u046c': '{\\cyrchar\\CYRIOTBYUS}',
u'\u046d': '{\\cyrchar\\cyriotbyus}',
u'\u046e': '{\\cyrchar\\CYRKSI}',
u'\u046f': '{\\cyrchar\\cyrksi}',
u'\u0470': '{\\cyrchar\\CYRPSI}',
u'\u0471': '{\\cyrchar\\cyrpsi}',
u'\u0472': '{\\cyrchar\\CYRFITA}',
u'\u0474': '{\\cyrchar\\CYRIZH}',
u'\u0478': '{\\cyrchar\\CYRUK}',
u'\u0479': '{\\cyrchar\\cyruk}',
u'\u047a': '{\\cyrchar\\CYROMEGARND}',
u'\u047b': '{\\cyrchar\\cyromegarnd}',
u'\u047c': '{\\cyrchar\\CYROMEGATITLO}',
u'\u047d': '{\\cyrchar\\cyromegatitlo}',
u'\u047e': '{\\cyrchar\\CYROT}',
u'\u047f': '{\\cyrchar\\cyrot}',
u'\u0480': '{\\cyrchar\\CYRKOPPA}',
u'\u0481': '{\\cyrchar\\cyrkoppa}',
u'\u0482': '{\\cyrchar\\cyrthousands}',
u'\u0488': '{\\cyrchar\\cyrhundredthousands}',
u'\u0489': '{\\cyrchar\\cyrmillions}',
u'\u048c': '{\\cyrchar\\CYRSEMISFTSN}',
u'\u048d': '{\\cyrchar\\cyrsemisftsn}',
u'\u048e': '{\\cyrchar\\CYRRTICK}',
u'\u048f': '{\\cyrchar\\cyrrtick}',
u'\u0490': '{\\cyrchar\\CYRGUP}',
u'\u0491': '{\\cyrchar\\cyrgup}',
u'\u0492': '{\\cyrchar\\CYRGHCRS}',
u'\u0493': '{\\cyrchar\\cyrghcrs}',
u'\u0494': '{\\cyrchar\\CYRGHK}',
u'\u0495': '{\\cyrchar\\cyrghk}',
u'\u0496': '{\\cyrchar\\CYRZHDSC}',
u'\u0497': '{\\cyrchar\\cyrzhdsc}',
u'\u0498': '{\\cyrchar\\CYRZDSC}',
u'\u0499': '{\\cyrchar\\cyrzdsc}',
u'\u049a': '{\\cyrchar\\CYRKDSC}',
u'\u049b': '{\\cyrchar\\cyrkdsc}',
u'\u049c': '{\\cyrchar\\CYRKVCRS}',
u'\u049d': '{\\cyrchar\\cyrkvcrs}',
u'\u049e': '{\\cyrchar\\CYRKHCRS}',
u'\u049f': '{\\cyrchar\\cyrkhcrs}',
u'\u04a0': '{\\cyrchar\\CYRKBEAK}',
u'\u04a1': '{\\cyrchar\\cyrkbeak}',
u'\u04a2': '{\\cyrchar\\CYRNDSC}',
u'\u04a3': '{\\cyrchar\\cyrndsc}',
u'\u04a4': '{\\cyrchar\\CYRNG}',
u'\u04a5': '{\\cyrchar\\cyrng}',
u'\u04a6': '{\\cyrchar\\CYRPHK}',
u'\u04a7': '{\\cyrchar\\cyrphk}',
u'\u04a8': '{\\cyrchar\\CYRABHHA}',
u'\u04a9': '{\\cyrchar\\cyrabhha}',
u'\u04aa': '{\\cyrchar\\CYRSDSC}',
u'\u04ab': '{\\cyrchar\\cyrsdsc}',
u'\u04ac': '{\\cyrchar\\CYRTDSC}',
u'\u04ad': '{\\cyrchar\\cyrtdsc}',
u'\u04ae': '{\\cyrchar\\CYRY}',
u'\u04af': '{\\cyrchar\\cyry}',
u'\u04b0': '{\\cyrchar\\CYRYHCRS}',
u'\u04b1': '{\\cyrchar\\cyryhcrs}',
u'\u04b2': '{\\cyrchar\\CYRHDSC}',
u'\u04b3': '{\\cyrchar\\cyrhdsc}',
u'\u04b4': '{\\cyrchar\\CYRTETSE}',
u'\u04b5': '{\\cyrchar\\cyrtetse}',
u'\u04b6': '{\\cyrchar\\CYRCHRDSC}',
u'\u04b7': '{\\cyrchar\\cyrchrdsc}',
u'\u04b8': '{\\cyrchar\\CYRCHVCRS}',
u'\u04b9': '{\\cyrchar\\cyrchvcrs}',
u'\u04ba': '{\\cyrchar\\CYRSHHA}',
u'\u04bb': '{\\cyrchar\\cyrshha}',
u'\u04bc': '{\\cyrchar\\CYRABHCH}',
u'\u04bd': '{\\cyrchar\\cyrabhch}',
u'\u04be': '{\\cyrchar\\CYRABHCHDSC}',
u'\u04bf': '{\\cyrchar\\cyrabhchdsc}',
u'\u04c0': '{\\cyrchar\\CYRpalochka}',
u'\u04c3': '{\\cyrchar\\CYRKHK}',
u'\u04c4': '{\\cyrchar\\cyrkhk}',
u'\u04c7': '{\\cyrchar\\CYRNHK}',
u'\u04c8': '{\\cyrchar\\cyrnhk}',
u'\u04cb': '{\\cyrchar\\CYRCHLDSC}',
u'\u04cc': '{\\cyrchar\\cyrchldsc}',
u'\u04d4': '{\\cyrchar\\CYRAE}',
u'\u04d5': '{\\cyrchar\\cyrae}',
u'\u04d8': '{\\cyrchar\\CYRSCHWA}',
u'\u04d9': '{\\cyrchar\\cyrschwa}',
u'\u04e0': '{\\cyrchar\\CYRABHDZE}',
u'\u04e1': '{\\cyrchar\\cyrabhdze}',
u'\u04e8': '{\\cyrchar\\CYROTLD}',
u'\u04e9': '{\\cyrchar\\cyrotld}',
u'\u2002': '{\\hspace{0.6em}}',
u'\u2003': '{\\hspace{1em}}',
u'\u2004': '{\\hspace{0.33em}}',
u'\u2005': '{\\hspace{0.25em}}',
u'\u2006': '{\\hspace{0.166em}}',
u'\u2007': '{\\hphantom{0}}',
u'\u2008': '{\\hphantom{,}}',
u'\u2009': '{\\hspace{0.167em}}',
u'\u200a': '$\\mkern1mu$',
u'\u2010': '{-}',
u'\u2013': '{\\textendash}',
u'\u2014': '{\\textemdash}',
u'\u2015': '{\\rule{1em}{1pt}}',
u'\u2016': '$\\Vert$',
u'\u2018': '{`}',
u'\u2019': "{'}",
u'\u201a': '{,}',
u'\u201b': '$\\Elzreapos$',
u'\u201c': '{\\textquotedblleft}',
u'\u201d': '{\\textquotedblright}',
u'\u201e': '{,,}',
u'\u2020': '{\\textdagger}',
u'\u2021': '{\\textdaggerdbl}',
u'\u2022': '{\\textbullet}',
u'\u2024': '{.}',
u'\u2025': '{..}',
u'\u2026': '{\\ldots}',
u'\u2030': '{\\textperthousand}',
u'\u2031': '{\\textpertenthousand}',
u'\u2032': "${'}$",
u'\u2033': "${''}$",
u'\u2034': "${'''}$",
u'\u2035': '$\\backprime$',
u'\u2039': '{\\guilsinglleft}',
u'\u203a': '{\\guilsinglright}',
u'\u2057': "$''''$",
u'\u205f': '{\\mkern4mu}',
u'\u2060': '{\\nolinebreak}',
u'\u20a7': '{\\ensuremath{\\Elzpes}}',
u'\u20ac': '{\\mbox{\\texteuro}}',
u'\u20db': '$\\dddot$',
u'\u20dc': '$\\ddddot$',
u'\u2102': '$\\mathbb{C}$',
u'\u210a': '{\\mathscr{g}}',
u'\u210b': '$\\mathscr{H}$',
u'\u210c': '$\\mathfrak{H}$',
u'\u210d': '$\\mathbb{H}$',
u'\u210f': '$\\hslash$',
u'\u2110': '$\\mathscr{I}$',
u'\u2111': '$\\mathfrak{I}$',
u'\u2112': '$\\mathscr{L}$',
u'\u2113': '$\\mathscr{l}$',
u'\u2115': '$\\mathbb{N}$',
u'\u2116': '{\\cyrchar\\textnumero}',
u'\u2118': '$\\wp$',
u'\u2119': '$\\mathbb{P}$',
u'\u211a': '$\\mathbb{Q}$',
u'\u211b': '$\\mathscr{R}$',
u'\u211c': '$\\mathfrak{R}$',
u'\u211d': '$\\mathbb{R}$',
u'\u211e': '$\\Elzxrat$',
u'\u2122': '{\\texttrademark}',
u'\u2124': '$\\mathbb{Z}$',
u'\u2126': '$\\Omega$',
u'\u2127': '$\\mho$',
u'\u2128': '$\\mathfrak{Z}$',
u'\u2129': '$\\ElsevierGlyph{2129}$',
u'\u212b': '{\\AA}',
u'\u212c': '$\\mathscr{B}$',
u'\u212d': '$\\mathfrak{C}$',
u'\u212f': '$\\mathscr{e}$',
u'\u2130': '$\\mathscr{E}$',
u'\u2131': '$\\mathscr{F}$',
u'\u2133': '$\\mathscr{M}$',
u'\u2134': '$\\mathscr{o}$',
u'\u2135': '$\\aleph$',
u'\u2136': '$\\beth$',
u'\u2137': '$\\gimel$',
u'\u2138': '$\\daleth$',
u'\u2153': '$\\textfrac{1}{3}$',
u'\u2154': '$\\textfrac{2}{3}$',
u'\u2155': '$\\textfrac{1}{5}$',
u'\u2156': '$\\textfrac{2}{5}$',
u'\u2157': '$\\textfrac{3}{5}$',
u'\u2158': '$\\textfrac{4}{5}$',
u'\u2159': '$\\textfrac{1}{6}$',
u'\u215a': '$\\textfrac{5}{6}$',
u'\u215b': '$\\textfrac{1}{8}$',
u'\u215c': '$\\textfrac{3}{8}$',
u'\u215d': '$\\textfrac{5}{8}$',
u'\u215e': '$\\textfrac{7}{8}$',
u'\u2190': '$\\leftarrow$',
u'\u2191': '$\\uparrow$',
u'\u2192': '$\\rightarrow$',
u'\u2193': '$\\downarrow$',
u'\u2194': '$\\leftrightarrow$',
u'\u2195': '$\\updownarrow$',
u'\u2196': '$\\nwarrow$',
u'\u2197': '$\\nearrow$',
u'\u2198': '$\\searrow$',
u'\u2199': '$\\swarrow$',
u'\u219a': '$\\nleftarrow$',
u'\u219b': '$\\nrightarrow$',
u'\u219c': '$\\arrowwaveright$',
u'\u219d': '$\\arrowwaveright$',
u'\u219e': '$\\twoheadleftarrow$',
u'\u21a0': '$\\twoheadrightarrow$',
u'\u21a2': '$\\leftarrowtail$',
u'\u21a3': '$\\rightarrowtail$',
u'\u21a6': '$\\mapsto$',
u'\u21a9': '$\\hookleftarrow$',
u'\u21aa': '$\\hookrightarrow$',
u'\u21ab': '$\\looparrowleft$',
u'\u21ac': '$\\looparrowright$',
u'\u21ad': '$\\leftrightsquigarrow$',
u'\u21ae': '$\\nleftrightarrow$',
u'\u21b0': '$\\Lsh$',
u'\u21b1': '$\\Rsh$',
u'\u21b3': '$\\ElsevierGlyph{21B3}$',
u'\u21b6': '$\\curvearrowleft$',
u'\u21b7': '$\\curvearrowright$',
u'\u21ba': '$\\circlearrowleft$',
u'\u21bb': '$\\circlearrowright$',
u'\u21bc': '$\\leftharpoonup$',
u'\u21bd': '$\\leftharpoondown$',
u'\u21be': '$\\upharpoonright$',
u'\u21bf': '$\\upharpoonleft$',
u'\u21c0': '$\\rightharpoonup$',
u'\u21c1': '$\\rightharpoondown$',
u'\u21c2': '$\\downharpoonright$',
u'\u21c3': '$\\downharpoonleft$',
u'\u21c4': '$\\rightleftarrows$',
u'\u21c5': '$\\dblarrowupdown$',
u'\u21c6': '$\\leftrightarrows$',
u'\u21c7': '$\\leftleftarrows$',
u'\u21c8': '$\\upuparrows$',
u'\u21c9': '$\\rightrightarrows$',
u'\u21ca': '$\\downdownarrows$',
u'\u21cb': '$\\leftrightharpoons$',
u'\u21cc': '$\\rightleftharpoons$',
u'\u21cd': '$\\nLeftarrow$',
u'\u21ce': '$\\nLeftrightarrow$',
u'\u21cf': '$\\nRightarrow$',
u'\u21d0': '$\\Leftarrow$',
u'\u21d1': '$\\Uparrow$',
u'\u21d2': '$\\Rightarrow$',
u'\u21d3': '$\\Downarrow$',
u'\u21d4': '$\\Leftrightarrow$',
u'\u21d5': '$\\Updownarrow$',
u'\u21da': '$\\Lleftarrow$',
u'\u21db': '$\\Rrightarrow$',
u'\u21dd': '$\\rightsquigarrow$',
u'\u21f5': '$\\DownArrowUpArrow$',
u'\u2200': '$\\forall$',
u'\u2201': '$\\complement$',
u'\u2202': '$\\partial$',
u'\u2203': '$\\exists$',
u'\u2204': '$\\nexists$',
u'\u2205': '$\\varnothing$',
u'\u2207': '$\\nabla$',
u'\u2208': '$\\in$',
u'\u2209': '$\\not\\in$',
u'\u220b': '$\\ni$',
u'\u220c': '$\\not\\ni$',
u'\u220f': '$\\prod$',
u'\u2210': '$\\coprod$',
u'\u2211': '$\\sum$',
u'\u2212': '{-}',
u'\u2213': '$\\mp$',
u'\u2214': '$\\dotplus$',
u'\u2216': '$\\setminus$',
u'\u2217': '${_\\ast}$',
u'\u2218': '$\\circ$',
u'\u2219': '$\\bullet$',
u'\u221a': '$\\surd$',
u'\u221d': '$\\propto$',
u'\u221e': '$\\infty$',
u'\u221f': '$\\rightangle$',
u'\u2220': '$\\angle$',
u'\u2221': '$\\measuredangle$',
u'\u2222': '$\\sphericalangle$',
u'\u2223': '$\\mid$',
u'\u2224': '$\\nmid$',
u'\u2225': '$\\parallel$',
u'\u2226': '$\\nparallel$',
u'\u2227': '$\\wedge$',
u'\u2228': '$\\vee$',
u'\u2229': '$\\cap$',
u'\u222a': '$\\cup$',
u'\u222b': '$\\int$',
u'\u222c': '$\\int\\!\\int$',
u'\u222d': '$\\int\\!\\int\\!\\int$',
u'\u222e': '$\\oint$',
u'\u222f': '$\\surfintegral$',
u'\u2230': '$\\volintegral$',
u'\u2231': '$\\clwintegral$',
u'\u2232': '$\\ElsevierGlyph{2232}$',
u'\u2233': '$\\ElsevierGlyph{2233}$',
u'\u2234': '$\\therefore$',
u'\u2235': '$\\because$',
u'\u2237': '$\\Colon$',
u'\u2238': '$\\ElsevierGlyph{2238}$',
u'\u223a': '$\\mathbin{{:}\\!\\!{-}\\!\\!{:}}$',
u'\u223b': '$\\homothetic$',
u'\u223c': '$\\sim$',
u'\u223d': '$\\backsim$',
u'\u223e': '$\\lazysinv$',
u'\u2240': '$\\wr$',
u'\u2241': '$\\not\\sim$',
u'\u2242': '$\\ElsevierGlyph{2242}$',
u'\u2243': '$\\simeq$',
u'\u2244': '$\\not\\simeq$',
u'\u2245': '$\\cong$',
u'\u2246': '$\\approxnotequal$',
u'\u2247': '$\\not\\cong$',
u'\u2248': '$\\approx$',
u'\u2249': '$\\not\\approx$',
u'\u224a': '$\\approxeq$',
u'\u224b': '$\\tildetrpl$',
u'\u224c': '$\\allequal$',
u'\u224d': '$\\asymp$',
u'\u224e': '$\\Bumpeq$',
u'\u224f': '$\\bumpeq$',
u'\u2250': '$\\doteq$',
u'\u2251': '$\\doteqdot$',
u'\u2252': '$\\fallingdotseq$',
u'\u2253': '$\\risingdotseq$',
u'\u2254': '{:=}',
u'\u2255': '$=:$',
u'\u2256': '$\\eqcirc$',
u'\u2257': '$\\circeq$',
u'\u2259': '$\\estimates$',
u'\u225a': '$\\ElsevierGlyph{225A}$',
u'\u225b': '$\\starequal$',
u'\u225c': '$\\triangleq$',
u'\u225f': '$\\ElsevierGlyph{225F}$',
u'\u2260': '$\\not =$',
u'\u2261': '$\\equiv$',
u'\u2262': '$\\not\\equiv$',
u'\u2264': '$\\leq$',
u'\u2265': '$\\geq$',
u'\u2266': '$\\leqq$',
u'\u2267': '$\\geqq$',
u'\u2268': '$\\lneqq$',
u'\u2269': '$\\gneqq$',
u'\u226a': '$\\ll$',
u'\u226b': '$\\gg$',
u'\u226c': '$\\between$',
u'\u226d': '$\\not\\kern-0.3em\\times$',
u'\u226e': '$\\not<$',
u'\u226f': '$\\not>$',
u'\u2270': '$\\not\\leq$',
u'\u2271': '$\\not\\geq$',
u'\u2272': '$\\lessequivlnt$',
u'\u2273': '$\\greaterequivlnt$',
u'\u2274': '$\\ElsevierGlyph{2274}$',
u'\u2275': '$\\ElsevierGlyph{2275}$',
u'\u2276': '$\\lessgtr$',
u'\u2277': '$\\gtrless$',
u'\u2278': '$\\notlessgreater$',
u'\u2279': '$\\notgreaterless$',
u'\u227a': '$\\prec$',
u'\u227b': '$\\succ$',
u'\u227c': '$\\preccurlyeq$',
u'\u227d': '$\\succcurlyeq$',
u'\u227e': '$\\precapprox$',
u'\u227f': '$\\succapprox$',
u'\u2280': '$\\not\\prec$',
u'\u2281': '$\\not\\succ$',
u'\u2282': '$\\subset$',
u'\u2283': '$\\supset$',
u'\u2284': '$\\not\\subset$',
u'\u2285': '$\\not\\supset$',
u'\u2286': '$\\subseteq$',
u'\u2287': '$\\supseteq$',
u'\u2288': '$\\not\\subseteq$',
u'\u2289': '$\\not\\supseteq$',
u'\u228a': '$\\subsetneq$',
u'\u228b': '$\\supsetneq$',
u'\u228e': '$\\uplus$',
u'\u228f': '$\\sqsubset$',
u'\u2290': '$\\sqsupset$',
u'\u2291': '$\\sqsubseteq$',
u'\u2292': '$\\sqsupseteq$',
u'\u2293': '$\\sqcap$',
u'\u2294': '$\\sqcup$',
u'\u2295': '$\\oplus$',
u'\u2296': '$\\ominus$',
u'\u2297': '$\\otimes$',
u'\u2298': '$\\oslash$',
u'\u2299': '$\\odot$',
u'\u229a': '$\\circledcirc$',
u'\u229b': '$\\circledast$',
u'\u229d': '$\\circleddash$',
u'\u229e': '$\\boxplus$',
u'\u229f': '$\\boxminus$',
u'\u22a0': '$\\boxtimes$',
u'\u22a1': '$\\boxdot$',
u'\u22a2': '$\\vdash$',
u'\u22a3': '$\\dashv$',
u'\u22a4': '$\\top$',
u'\u22a5': '$\\perp$',
u'\u22a7': '$\\truestate$',
u'\u22a8': '$\\forcesextra$',
u'\u22a9': '$\\Vdash$',
u'\u22aa': '$\\Vvdash$',
u'\u22ab': '$\\VDash$',
u'\u22ac': '$\\nvdash$',
u'\u22ad': '$\\nvDash$',
u'\u22ae': '$\\nVdash$',
u'\u22af': '$\\nVDash$',
u'\u22b2': '$\\vartriangleleft$',
u'\u22b3': '$\\vartriangleright$',
u'\u22b4': '$\\trianglelefteq$',
u'\u22b5': '$\\trianglerighteq$',
u'\u22b6': '$\\original$',
u'\u22b7': '$\\image$',
u'\u22b8': '$\\multimap$',
u'\u22b9': '$\\hermitconjmatrix$',
u'\u22ba': '$\\intercal$',
u'\u22bb': '$\\veebar$',
u'\u22be': '$\\rightanglearc$',
u'\u22c0': '$\\ElsevierGlyph{22C0}$',
u'\u22c1': '$\\ElsevierGlyph{22C1}$',
u'\u22c2': '$\\bigcap$',
u'\u22c3': '$\\bigcup$',
u'\u22c4': '$\\diamond$',
u'\u22c5': '$\\cdot$',
u'\u22c6': '$\\star$',
u'\u22c7': '$\\divideontimes$',
u'\u22c8': '$\\bowtie$',
u'\u22c9': '$\\ltimes$',
u'\u22ca': '$\\rtimes$',
u'\u22cb': '$\\leftthreetimes$',
u'\u22cc': '$\\rightthreetimes$',
u'\u22cd': '$\\backsimeq$',
u'\u22ce': '$\\curlyvee$',
u'\u22cf': '$\\curlywedge$',
u'\u22d0': '$\\Subset$',
u'\u22d1': '$\\Supset$',
u'\u22d2': '$\\Cap$',
u'\u22d3': '$\\Cup$',
u'\u22d4': '$\\pitchfork$',
u'\u22d6': '$\\lessdot$',
u'\u22d7': '$\\gtrdot$',
u'\u22d8': '$\\verymuchless$',
u'\u22d9': '$\\verymuchgreater$',
u'\u22da': '$\\lesseqgtr$',
u'\u22db': '$\\gtreqless$',
u'\u22de': '$\\curlyeqprec$',
u'\u22df': '$\\curlyeqsucc$',
u'\u22e2': '$\\not\\sqsubseteq$',
u'\u22e3': '$\\not\\sqsupseteq$',
u'\u22e5': '$\\Elzsqspne$',
u'\u22e6': '$\\lnsim$',
u'\u22e7': '$\\gnsim$',
u'\u22e8': '$\\precedesnotsimilar$',
u'\u22e9': '$\\succnsim$',
u'\u22ea': '$\\ntriangleleft$',
u'\u22eb': '$\\ntriangleright$',
u'\u22ec': '$\\ntrianglelefteq$',
u'\u22ed': '$\\ntrianglerighteq$',
u'\u22ee': '$\\vdots$',
u'\u22ef': '$\\cdots$',
u'\u22f0': '$\\upslopeellipsis$',
u'\u22f1': '$\\downslopeellipsis$',
u'\u2305': '{\\barwedge}',
u'\u2306': '$\\perspcorrespond$',
u'\u2308': '$\\lceil$',
u'\u2309': '$\\rceil$',
u'\u230a': '$\\lfloor$',
u'\u230b': '$\\rfloor$',
u'\u2315': '$\\recorder$',
u'\u2316': '$\\mathchar"2208$',
u'\u231c': '$\\ulcorner$',
u'\u231d': '$\\urcorner$',
u'\u231e': '$\\llcorner$',
u'\u231f': '$\\lrcorner$',
u'\u2322': '$\\frown$',
u'\u2323': '$\\smile$',
u'\u2329': '$\\langle$',
u'\u232a': '$\\rangle$',
u'\u233d': '$\\ElsevierGlyph{E838}$',
u'\u23a3': '$\\Elzdlcorn$',
u'\u23b0': '$\\lmoustache$',
u'\u23b1': '$\\rmoustache$',
u'\u2423': '{\\textvisiblespace}',
u'\u2460': '{\\ding{172}}',
u'\u2461': '{\\ding{173}}',
u'\u2462': '{\\ding{174}}',
u'\u2463': '{\\ding{175}}',
u'\u2464': '{\\ding{176}}',
u'\u2465': '{\\ding{177}}',
u'\u2466': '{\\ding{178}}',
u'\u2467': '{\\ding{179}}',
u'\u2468': '{\\ding{180}}',
u'\u2469': '{\\ding{181}}',
u'\u24c8': '$\\circledS$',
u'\u2506': '$\\Elzdshfnc$',
u'\u2519': '$\\Elzsqfnw$',
u'\u2571': '$\\diagup$',
u'\u25a0': '{\\ding{110}}',
u'\u25a1': '$\\square$',
u'\u25aa': '$\\blacksquare$',
u'\u25ad': '$\\fbox{~~}$',
u'\u25af': '$\\Elzvrecto$',
u'\u25b1': '$\\ElsevierGlyph{E381}$',
u'\u25b2': '{\\ding{115}}',
u'\u25b3': '$\\bigtriangleup$',
u'\u25b4': '$\\blacktriangle$',
u'\u25b5': '$\\vartriangle$',
u'\u25b8': '$\\blacktriangleright$',
u'\u25b9': '$\\triangleright$',
u'\u25bc': '{\\ding{116}}',
u'\u25bd': '$\\bigtriangledown$',
u'\u25be': '$\\blacktriangledown$',
u'\u25bf': '$\\triangledown$',
u'\u25c2': '$\\blacktriangleleft$',
u'\u25c3': '$\\triangleleft$',
u'\u25c6': '{\\ding{117}}',
u'\u25ca': '$\\lozenge$',
u'\u25cb': '$\\bigcirc$',
u'\u25cf': '{\\ding{108}}',
u'\u25d0': '$\\Elzcirfl$',
u'\u25d1': '$\\Elzcirfr$',
u'\u25d2': '$\\Elzcirfb$',
u'\u25d7': '{\\ding{119}}',
u'\u25d8': '$\\Elzrvbull$',
u'\u25e7': '$\\Elzsqfl$',
u'\u25e8': '$\\Elzsqfr$',
u'\u25ea': '$\\Elzsqfse$',
u'\u25ef': '$\\bigcirc$',
u'\u2605': '{\\ding{72}}',
u'\u2606': '{\\ding{73}}',
u'\u260e': '{\\ding{37}}',
u'\u261b': '{\\ding{42}}',
u'\u261e': '{\\ding{43}}',
u'\u263e': '{\\rightmoon}',
u'\u263f': '{\\mercury}',
u'\u2640': '{\\venus}',
u'\u2642': '{\\male}',
u'\u2643': '{\\jupiter}',
u'\u2644': '{\\saturn}',
u'\u2645': '{\\uranus}',
u'\u2646': '{\\neptune}',
u'\u2647': '{\\pluto}',
u'\u2648': '{\\aries}',
u'\u2649': '{\\taurus}',
u'\u264a': '{\\gemini}',
u'\u264b': '{\\cancer}',
u'\u264c': '{\\leo}',
u'\u264d': '{\\virgo}',
u'\u264e': '{\\libra}',
u'\u264f': '{\\scorpio}',
u'\u2650': '{\\sagittarius}',
u'\u2651': '{\\capricornus}',
u'\u2652': '{\\aquarius}',
u'\u2653': '{\\pisces}',
u'\u2660': '{\\ding{171}}',
u'\u2662': '$\\diamond$',
u'\u2663': '{\\ding{168}}',
u'\u2665': '{\\ding{170}}',
u'\u2666': '{\\ding{169}}',
u'\u2669': '{\\quarternote}',
u'\u266a': '{\\eighthnote}',
u'\u266d': '$\\flat$',
u'\u266e': '$\\natural$',
u'\u266f': '$\\sharp$',
u'\u2701': '{\\ding{33}}',
u'\u2702': '{\\ding{34}}',
u'\u2703': '{\\ding{35}}',
u'\u2704': '{\\ding{36}}',
u'\u2706': '{\\ding{38}}',
u'\u2707': '{\\ding{39}}',
u'\u2708': '{\\ding{40}}',
u'\u2709': '{\\ding{41}}',
u'\u270c': '{\\ding{44}}',
u'\u270d': '{\\ding{45}}',
u'\u270e': '{\\ding{46}}',
u'\u270f': '{\\ding{47}}',
u'\u2710': '{\\ding{48}}',
u'\u2711': '{\\ding{49}}',
u'\u2712': '{\\ding{50}}',
u'\u2713': '{\\ding{51}}',
u'\u2714': '{\\ding{52}}',
u'\u2715': '{\\ding{53}}',
u'\u2716': '{\\ding{54}}',
u'\u2717': '{\\ding{55}}',
u'\u2718': '{\\ding{56}}',
u'\u2719': '{\\ding{57}}',
u'\u271a': '{\\ding{58}}',
u'\u271b': '{\\ding{59}}',
u'\u271c': '{\\ding{60}}',
u'\u271d': '{\\ding{61}}',
u'\u271e': '{\\ding{62}}',
u'\u271f': '{\\ding{63}}',
u'\u2720': '{\\ding{64}}',
u'\u2721': '{\\ding{65}}',
u'\u2722': '{\\ding{66}}',
u'\u2723': '{\\ding{67}}',
u'\u2724': '{\\ding{68}}',
u'\u2725': '{\\ding{69}}',
u'\u2726': '{\\ding{70}}',
u'\u2727': '{\\ding{71}}',
u'\u2729': '{\\ding{73}}',
u'\u272a': '{\\ding{74}}',
u'\u272b': '{\\ding{75}}',
u'\u272c': '{\\ding{76}}',
u'\u272d': '{\\ding{77}}',
u'\u272e': '{\\ding{78}}',
u'\u272f': '{\\ding{79}}',
u'\u2730': '{\\ding{80}}',
u'\u2731': '{\\ding{81}}',
u'\u2732': '{\\ding{82}}',
u'\u2733': '{\\ding{83}}',
u'\u2734': '{\\ding{84}}',
u'\u2735': '{\\ding{85}}',
u'\u2736': '{\\ding{86}}',
u'\u2737': '{\\ding{87}}',
u'\u2738': '{\\ding{88}}',
u'\u2739': '{\\ding{89}}',
u'\u273a': '{\\ding{90}}',
u'\u273b': '{\\ding{91}}',
u'\u273c': '{\\ding{92}}',
u'\u273d': '{\\ding{93}}',
u'\u273e': '{\\ding{94}}',
u'\u273f': '{\\ding{95}}',
u'\u2740': '{\\ding{96}}',
u'\u2741': '{\\ding{97}}',
u'\u2742': '{\\ding{98}}',
u'\u2743': '{\\ding{99}}',
u'\u2744': '{\\ding{100}}',
u'\u2745': '{\\ding{101}}',
u'\u2746': '{\\ding{102}}',
u'\u2747': '{\\ding{103}}',
u'\u2748': '{\\ding{104}}',
u'\u2749': '{\\ding{105}}',
u'\u274a': '{\\ding{106}}',
u'\u274b': '{\\ding{107}}',
u'\u274d': '{\\ding{109}}',
u'\u274f': '{\\ding{111}}',
u'\u2750': '{\\ding{112}}',
u'\u2751': '{\\ding{113}}',
u'\u2752': '{\\ding{114}}',
u'\u2756': '{\\ding{118}}',
u'\u2758': '{\\ding{120}}',
u'\u2759': '{\\ding{121}}',
u'\u275a': '{\\ding{122}}',
u'\u275b': '{\\ding{123}}',
u'\u275c': '{\\ding{124}}',
u'\u275d': '{\\ding{125}}',
u'\u275e': '{\\ding{126}}',
u'\u2761': '{\\ding{161}}',
u'\u2762': '{\\ding{162}}',
u'\u2763': '{\\ding{163}}',
u'\u2764': '{\\ding{164}}',
u'\u2765': '{\\ding{165}}',
u'\u2766': '{\\ding{166}}',
u'\u2767': '{\\ding{167}}',
u'\u2776': '{\\ding{182}}',
u'\u2777': '{\\ding{183}}',
u'\u2778': '{\\ding{184}}',
u'\u2779': '{\\ding{185}}',
u'\u277a': '{\\ding{186}}',
u'\u277b': '{\\ding{187}}',
u'\u277c': '{\\ding{188}}',
u'\u277d': '{\\ding{189}}',
u'\u277e': '{\\ding{190}}',
u'\u277f': '{\\ding{191}}',
u'\u2780': '{\\ding{192}}',
u'\u2781': '{\\ding{193}}',
u'\u2782': '{\\ding{194}}',
u'\u2783': '{\\ding{195}}',
u'\u2784': '{\\ding{196}}',
u'\u2785': '{\\ding{197}}',
u'\u2786': '{\\ding{198}}',
u'\u2787': '{\\ding{199}}',
u'\u2788': '{\\ding{200}}',
u'\u2789': '{\\ding{201}}',
u'\u278a': '{\\ding{202}}',
u'\u278b': '{\\ding{203}}',
u'\u278c': '{\\ding{204}}',
u'\u278d': '{\\ding{205}}',
u'\u278e': '{\\ding{206}}',
u'\u278f': '{\\ding{207}}',
u'\u2790': '{\\ding{208}}',
u'\u2791': '{\\ding{209}}',
u'\u2792': '{\\ding{210}}',
u'\u2793': '{\\ding{211}}',
u'\u2794': '{\\ding{212}}',
u'\u2798': '{\\ding{216}}',
u'\u2799': '{\\ding{217}}',
u'\u279a': '{\\ding{218}}',
u'\u279b': '{\\ding{219}}',
u'\u279c': '{\\ding{220}}',
u'\u279d': '{\\ding{221}}',
u'\u279e': '{\\ding{222}}',
u'\u279f': '{\\ding{223}}',
u'\u27a0': '{\\ding{224}}',
u'\u27a1': '{\\ding{225}}',
u'\u27a2': '{\\ding{226}}',
u'\u27a3': '{\\ding{227}}',
u'\u27a4': '{\\ding{228}}',
u'\u27a5': '{\\ding{229}}',
u'\u27a6': '{\\ding{230}}',
u'\u27a7': '{\\ding{231}}',
u'\u27a8': '{\\ding{232}}',
u'\u27a9': '{\\ding{233}}',
u'\u27aa': '{\\ding{234}}',
u'\u27ab': '{\\ding{235}}',
u'\u27ac': '{\\ding{236}}',
u'\u27ad': '{\\ding{237}}',
u'\u27ae': '{\\ding{238}}',
u'\u27af': '{\\ding{239}}',
u'\u27b1': '{\\ding{241}}',
u'\u27b2': '{\\ding{242}}',
u'\u27b3': '{\\ding{243}}',
u'\u27b4': '{\\ding{244}}',
u'\u27b5': '{\\ding{245}}',
u'\u27b6': '{\\ding{246}}',
u'\u27b7': '{\\ding{247}}',
u'\u27b8': '{\\ding{248}}',
u'\u27b9': '{\\ding{249}}',
u'\u27ba': '{\\ding{250}}',
u'\u27bb': '{\\ding{251}}',
u'\u27bc': '{\\ding{252}}',
u'\u27bd': '{\\ding{253}}',
u'\u27be': '{\\ding{254}}',
u'\u27f5': '$\\longleftarrow$',
u'\u27f6': '$\\longrightarrow$',
u'\u27f7': '$\\longleftrightarrow$',
u'\u27f8': '$\\Longleftarrow$',
u'\u27f9': '$\\Longrightarrow$',
u'\u27fa': '$\\Longleftrightarrow$',
u'\u27fc': '$\\longmapsto$',
u'\u27ff': '$\\sim\\joinrel\\leadsto$',
u'\u2905': '$\\ElsevierGlyph{E212}$',
u'\u2912': '$\\UpArrowBar$',
u'\u2913': '$\\DownArrowBar$',
u'\u2923': '$\\ElsevierGlyph{E20C}$',
u'\u2924': '$\\ElsevierGlyph{E20D}$',
u'\u2925': '$\\ElsevierGlyph{E20B}$',
u'\u2926': '$\\ElsevierGlyph{E20A}$',
u'\u2927': '$\\ElsevierGlyph{E211}$',
u'\u2928': '$\\ElsevierGlyph{E20E}$',
u'\u2929': '$\\ElsevierGlyph{E20F}$',
u'\u292a': '$\\ElsevierGlyph{E210}$',
u'\u2933': '$\\ElsevierGlyph{E21C}$',
u'\u2936': '$\\ElsevierGlyph{E21A}$',
u'\u2937': '$\\ElsevierGlyph{E219}$',
u'\u2940': '$\\Elolarr$',
u'\u2941': '$\\Elorarr$',
u'\u2942': '$\\ElzRlarr$',
u'\u2944': '$\\ElzrLarr$',
u'\u2947': '$\\Elzrarrx$',
u'\u294e': '$\\LeftRightVector$',
u'\u294f': '$\\RightUpDownVector$',
u'\u2950': '$\\DownLeftRightVector$',
u'\u2951': '$\\LeftUpDownVector$',
u'\u2952': '$\\LeftVectorBar$',
u'\u2953': '$\\RightVectorBar$',
u'\u2954': '$\\RightUpVectorBar$',
u'\u2955': '$\\RightDownVectorBar$',
u'\u2956': '$\\DownLeftVectorBar$',
u'\u2957': '$\\DownRightVectorBar$',
u'\u2958': '$\\LeftUpVectorBar$',
u'\u2959': '$\\LeftDownVectorBar$',
u'\u295a': '$\\LeftTeeVector$',
u'\u295b': '$\\RightTeeVector$',
u'\u295c': '$\\RightUpTeeVector$',
u'\u295d': '$\\RightDownTeeVector$',
u'\u295e': '$\\DownLeftTeeVector$',
u'\u295f': '$\\DownRightTeeVector$',
u'\u2960': '$\\LeftUpTeeVector$',
u'\u2961': '$\\LeftDownTeeVector$',
u'\u296e': '$\\UpEquilibrium$',
u'\u296f': '$\\ReverseUpEquilibrium$',
u'\u2970': '$\\RoundImplies$',
u'\u297c': '$\\ElsevierGlyph{E214}$',
u'\u297d': '$\\ElsevierGlyph{E215}$',
u'\u2980': '$\\Elztfnc$',
u'\u2985': '$\\ElsevierGlyph{3018}$',
u'\u2986': '$\\Elroang$',
u'\u2993': '$<\\kern-0.58em($',
u'\u2994': '$\\ElsevierGlyph{E291}$',
u'\u2999': '$\\Elzddfnc$',
u'\u299c': '$\\Angle$',
u'\u29a0': '$\\Elzlpargt$',
u'\u29b5': '$\\ElsevierGlyph{E260}$',
u'\u29b6': '$\\ElsevierGlyph{E61B}$',
u'\u29ca': '$\\ElzLap$',
u'\u29cb': '$\\Elzdefas$',
u'\u29cf': '$\\LeftTriangleBar$',
u'\u29d0': '$\\RightTriangleBar$',
u'\u29dc': '$\\ElsevierGlyph{E372}$',
u'\u29eb': '$\\blacklozenge$',
u'\u29f4': '$\\RuleDelayed$',
u'\u2a04': '$\\Elxuplus$',
u'\u2a05': '$\\ElzThr$',
u'\u2a06': '$\\Elxsqcup$',
u'\u2a07': '$\\ElzInf$',
u'\u2a08': '$\\ElzSup$',
u'\u2a0d': '$\\ElzCint$',
u'\u2a0f': '$\\clockoint$',
u'\u2a10': '$\\ElsevierGlyph{E395}$',
u'\u2a16': '$\\sqrint$',
u'\u2a25': '$\\ElsevierGlyph{E25A}$',
u'\u2a2a': '$\\ElsevierGlyph{E25B}$',
u'\u2a2d': '$\\ElsevierGlyph{E25C}$',
u'\u2a2e': '$\\ElsevierGlyph{E25D}$',
u'\u2a2f': '$\\ElzTimes$',
u'\u2a34': '$\\ElsevierGlyph{E25E}$',
u'\u2a35': '$\\ElsevierGlyph{E25E}$',
u'\u2a3c': '$\\ElsevierGlyph{E259}$',
u'\u2a3f': '$\\amalg$',
u'\u2a53': '$\\ElzAnd$',
u'\u2a54': '$\\ElzOr$',
u'\u2a55': '$\\ElsevierGlyph{E36E}$',
u'\u2a56': '$\\ElOr$',
u'\u2a5e': '$\\perspcorrespond$',
u'\u2a5f': '$\\Elzminhat$',
u'\u2a63': '$\\ElsevierGlyph{225A}$',
u'\u2a6e': '$\\stackrel{*}{=}$',
u'\u2a75': '$\\Equal$',
u'\u2a7d': '$\\leqslant$',
u'\u2a7e': '$\\geqslant$',
u'\u2a85': '$\\lessapprox$',
u'\u2a86': '$\\gtrapprox$',
u'\u2a87': '$\\lneq$',
u'\u2a88': '$\\gneq$',
u'\u2a89': '$\\lnapprox$',
u'\u2a8a': '$\\gnapprox$',
u'\u2a8b': '$\\lesseqqgtr$',
u'\u2a8c': '$\\gtreqqless$',
u'\u2a95': '$\\eqslantless$',
u'\u2a96': '$\\eqslantgtr$',
u'\u2a9d': '$\\Pisymbol{ppi020}{117}$',
u'\u2a9e': '$\\Pisymbol{ppi020}{105}$',
u'\u2aa1': '$\\NestedLessLess$',
u'\u2aa2': '$\\NestedGreaterGreater$',
u'\u2aaf': '$\\preceq$',
u'\u2ab0': '$\\succeq$',
u'\u2ab5': '$\\precneqq$',
u'\u2ab6': '$\\succneqq$',
u'\u2ab7': '$\\precapprox$',
u'\u2ab8': '$\\succapprox$',
u'\u2ab9': '$\\precnapprox$',
u'\u2aba': '$\\succnapprox$',
u'\u2ac5': '$\\subseteqq$',
u'\u2ac6': '$\\supseteqq$',
u'\u2acb': '$\\subsetneqq$',
u'\u2acc': '$\\supsetneqq$',
u'\u2aeb': '$\\ElsevierGlyph{E30D}$',
u'\u2af6': '$\\Elztdcol$',
u'\u2afd': '${{/}\\!\\!{/}}$',
u'\u300a': '$\\ElsevierGlyph{300A}$',
u'\u300b': '$\\ElsevierGlyph{300B}$',
u'\u3018': '$\\ElsevierGlyph{3018}$',
u'\u3019': '$\\ElsevierGlyph{3019}$',
u'\u301a': '$\\openbracketleft$',
u'\u301b': '$\\openbracketright$',
u'\ufb00': '{ff}',
u'\ufb01': '{fi}',
u'\ufb02': '{fl}',
u'\ufb03': '{ffi}',
u'\ufb04': '{ffl}',
u'\U0001d400': '$\\mathbf{A}$',
u'\U0001d401': '$\\mathbf{B}$',
u'\U0001d402': '$\\mathbf{C}$',
u'\U0001d403': '$\\mathbf{D}$',
u'\U0001d404': '$\\mathbf{E}$',
u'\U0001d405': '$\\mathbf{F}$',
u'\U0001d406': '$\\mathbf{G}$',
u'\U0001d407': '$\\mathbf{H}$',
u'\U0001d408': '$\\mathbf{I}$',
u'\U0001d409': '$\\mathbf{J}$',
u'\U0001d40a': '$\\mathbf{K}$',
u'\U0001d40b': '$\\mathbf{L}$',
u'\U0001d40c': '$\\mathbf{M}$',
u'\U0001d40d': '$\\mathbf{N}$',
u'\U0001d40e': '$\\mathbf{O}$',
u'\U0001d40f': '$\\mathbf{P}$',
u'\U0001d410': '$\\mathbf{Q}$',
u'\U0001d411': '$\\mathbf{R}$',
u'\U0001d412': '$\\mathbf{S}$',
u'\U0001d413': '$\\mathbf{T}$',
u'\U0001d414': '$\\mathbf{U}$',
u'\U0001d415': '$\\mathbf{V}$',
u'\U0001d416': '$\\mathbf{W}$',
u'\U0001d417': '$\\mathbf{X}$',
u'\U0001d418': '$\\mathbf{Y}$',
u'\U0001d419': '$\\mathbf{Z}$',
u'\U0001d41a': '$\\mathbf{a}$',
u'\U0001d41b': '$\\mathbf{b}$',
u'\U0001d41c': '$\\mathbf{c}$',
u'\U0001d41d': '$\\mathbf{d}$',
u'\U0001d41e': '$\\mathbf{e}$',
u'\U0001d41f': '$\\mathbf{f}$',
u'\U0001d420': '$\\mathbf{g}$',
u'\U0001d421': '$\\mathbf{h}$',
u'\U0001d422': '$\\mathbf{i}$',
u'\U0001d423': '$\\mathbf{j}$',
u'\U0001d424': '$\\mathbf{k}$',
u'\U0001d425': '$\\mathbf{l}$',
u'\U0001d426': '$\\mathbf{m}$',
u'\U0001d427': '$\\mathbf{n}$',
u'\U0001d428': '$\\mathbf{o}$',
u'\U0001d429': '$\\mathbf{p}$',
u'\U0001d42a': '$\\mathbf{q}$',
u'\U0001d42b': '$\\mathbf{r}$',
u'\U0001d42c': '$\\mathbf{s}$',
u'\U0001d42d': '$\\mathbf{t}$',
u'\U0001d42e': '$\\mathbf{u}$',
u'\U0001d42f': '$\\mathbf{v}$',
u'\U0001d430': '$\\mathbf{w}$',
u'\U0001d431': '$\\mathbf{x}$',
u'\U0001d432': '$\\mathbf{y}$',
u'\U0001d433': '$\\mathbf{z}$',
u'\U0001d434': '$\\mathsl{A}$',
u'\U0001d435': '$\\mathsl{B}$',
u'\U0001d436': '$\\mathsl{C}$',
u'\U0001d437': '$\\mathsl{D}$',
u'\U0001d438': '$\\mathsl{E}$',
u'\U0001d439': '$\\mathsl{F}$',
u'\U0001d43a': '$\\mathsl{G}$',
u'\U0001d43b': '$\\mathsl{H}$',
u'\U0001d43c': '$\\mathsl{I}$',
u'\U0001d43d': '$\\mathsl{J}$',
u'\U0001d43e': '$\\mathsl{K}$',
u'\U0001d43f': '$\\mathsl{L}$',
u'\U0001d440': '$\\mathsl{M}$',
u'\U0001d441': '$\\mathsl{N}$',
u'\U0001d442': '$\\mathsl{O}$',
u'\U0001d443': '$\\mathsl{P}$',
u'\U0001d444': '$\\mathsl{Q}$',
u'\U0001d445': '$\\mathsl{R}$',
u'\U0001d446': '$\\mathsl{S}$',
u'\U0001d447': '$\\mathsl{T}$',
u'\U0001d448': '$\\mathsl{U}$',
u'\U0001d449': '$\\mathsl{V}$',
u'\U0001d44a': '$\\mathsl{W}$',
u'\U0001d44b': '$\\mathsl{X}$',
u'\U0001d44c': '$\\mathsl{Y}$',
u'\U0001d44d': '$\\mathsl{Z}$',
u'\U0001d44e': '$\\mathsl{a}$',
u'\U0001d44f': '$\\mathsl{b}$',
u'\U0001d450': '$\\mathsl{c}$',
u'\U0001d451': '$\\mathsl{d}$',
u'\U0001d452': '$\\mathsl{e}$',
u'\U0001d453': '$\\mathsl{f}$',
u'\U0001d454': '$\\mathsl{g}$',
u'\U0001d456': '$\\mathsl{i}$',
u'\U0001d457': '$\\mathsl{j}$',
u'\U0001d458': '$\\mathsl{k}$',
u'\U0001d459': '$\\mathsl{l}$',
u'\U0001d45a': '$\\mathsl{m}$',
u'\U0001d45b': '$\\mathsl{n}$',
u'\U0001d45c': '$\\mathsl{o}$',
u'\U0001d45d': '$\\mathsl{p}$',
u'\U0001d45e': '$\\mathsl{q}$',
u'\U0001d45f': '$\\mathsl{r}$',
u'\U0001d460': '$\\mathsl{s}$',
u'\U0001d461': '$\\mathsl{t}$',
u'\U0001d462': '$\\mathsl{u}$',
u'\U0001d463': '$\\mathsl{v}$',
u'\U0001d464': '$\\mathsl{w}$',
u'\U0001d465': '$\\mathsl{x}$',
u'\U0001d466': '$\\mathsl{y}$',
u'\U0001d467': '$\\mathsl{z}$',
u'\U0001d468': '$\\mathbit{A}$',
u'\U0001d469': '$\\mathbit{B}$',
u'\U0001d46a': '$\\mathbit{C}$',
u'\U0001d46b': '$\\mathbit{D}$',
u'\U0001d46c': '$\\mathbit{E}$',
u'\U0001d46d': '$\\mathbit{F}$',
u'\U0001d46e': '$\\mathbit{G}$',
u'\U0001d46f': '$\\mathbit{H}$',
u'\U0001d470': '$\\mathbit{I}$',
u'\U0001d471': '$\\mathbit{J}$',
u'\U0001d472': '$\\mathbit{K}$',
u'\U0001d473': '$\\mathbit{L}$',
u'\U0001d474': '$\\mathbit{M}$',
u'\U0001d475': '$\\mathbit{N}$',
u'\U0001d476': '$\\mathbit{O}$',
u'\U0001d477': '$\\mathbit{P}$',
u'\U0001d478': '$\\mathbit{Q}$',
u'\U0001d479': '$\\mathbit{R}$',
u'\U0001d47a': '$\\mathbit{S}$',
u'\U0001d47b': '$\\mathbit{T}$',
u'\U0001d47c': '$\\mathbit{U}$',
u'\U0001d47d': '$\\mathbit{V}$',
u'\U0001d47e': '$\\mathbit{W}$',
u'\U0001d47f': '$\\mathbit{X}$',
u'\U0001d480': '$\\mathbit{Y}$',
u'\U0001d481': '$\\mathbit{Z}$',
u'\U0001d482': '$\\mathbit{a}$',
u'\U0001d483': '$\\mathbit{b}$',
u'\U0001d484': '$\\mathbit{c}$',
u'\U0001d485': '$\\mathbit{d}$',
u'\U0001d486': '$\\mathbit{e}$',
u'\U0001d487': '$\\mathbit{f}$',
u'\U0001d488': '$\\mathbit{g}$',
u'\U0001d489': '$\\mathbit{h}$',
u'\U0001d48a': '$\\mathbit{i}$',
u'\U0001d48b': '$\\mathbit{j}$',
u'\U0001d48c': '$\\mathbit{k}$',
u'\U0001d48d': '$\\mathbit{l}$',
u'\U0001d48e': '$\\mathbit{m}$',
u'\U0001d48f': '$\\mathbit{n}$',
u'\U0001d490': '$\\mathbit{o}$',
u'\U0001d491': '$\\mathbit{p}$',
u'\U0001d492': '$\\mathbit{q}$',
u'\U0001d493': '$\\mathbit{r}$',
u'\U0001d494': '$\\mathbit{s}$',
u'\U0001d495': '$\\mathbit{t}$',
u'\U0001d496': '$\\mathbit{u}$',
u'\U0001d497': '$\\mathbit{v}$',
u'\U0001d498': '$\\mathbit{w}$',
u'\U0001d499': '$\\mathbit{x}$',
u'\U0001d49a': '$\\mathbit{y}$',
u'\U0001d49b': '$\\mathbit{z}$',
u'\U0001d49c': '$\\mathscr{A}$',
u'\U0001d49e': '$\\mathscr{C}$',
u'\U0001d49f': '$\\mathscr{D}$',
u'\U0001d4a2': '$\\mathscr{G}$',
u'\U0001d4a5': '$\\mathscr{J}$',
u'\U0001d4a6': '$\\mathscr{K}$',
u'\U0001d4a9': '$\\mathscr{N}$',
u'\U0001d4aa': '$\\mathscr{O}$',
u'\U0001d4ab': '$\\mathscr{P}$',
u'\U0001d4ac': '$\\mathscr{Q}$',
u'\U0001d4ae': '$\\mathscr{S}$',
u'\U0001d4af': '$\\mathscr{T}$',
u'\U0001d4b0': '$\\mathscr{U}$',
u'\U0001d4b1': '$\\mathscr{V}$',
u'\U0001d4b2': '$\\mathscr{W}$',
u'\U0001d4b3': '$\\mathscr{X}$',
u'\U0001d4b4': '$\\mathscr{Y}$',
u'\U0001d4b5': '$\\mathscr{Z}$',
u'\U0001d4b6': '$\\mathscr{a}$',
u'\U0001d4b7': '$\\mathscr{b}$',
u'\U0001d4b8': '$\\mathscr{c}$',
u'\U0001d4b9': '$\\mathscr{d}$',
u'\U0001d4bb': '$\\mathscr{f}$',
u'\U0001d4bd': '$\\mathscr{h}$',
u'\U0001d4be': '$\\mathscr{i}$',
u'\U0001d4bf': '$\\mathscr{j}$',
u'\U0001d4c0': '$\\mathscr{k}$',
u'\U0001d4c1': '$\\mathscr{l}$',
u'\U0001d4c2': '$\\mathscr{m}$',
u'\U0001d4c3': '$\\mathscr{n}$',
u'\U0001d4c5': '$\\mathscr{p}$',
u'\U0001d4c6': '$\\mathscr{q}$',
u'\U0001d4c7': '$\\mathscr{r}$',
u'\U0001d4c8': '$\\mathscr{s}$',
u'\U0001d4c9': '$\\mathscr{t}$',
u'\U0001d4ca': '$\\mathscr{u}$',
u'\U0001d4cb': '$\\mathscr{v}$',
u'\U0001d4cc': '$\\mathscr{w}$',
u'\U0001d4cd': '$\\mathscr{x}$',
u'\U0001d4ce': '$\\mathscr{y}$',
u'\U0001d4cf': '$\\mathscr{z}$',
u'\U0001d4d0': '$\\mathmit{A}$',
u'\U0001d4d1': '$\\mathmit{B}$',
u'\U0001d4d2': '$\\mathmit{C}$',
u'\U0001d4d3': '$\\mathmit{D}$',
u'\U0001d4d4': '$\\mathmit{E}$',
u'\U0001d4d5': '$\\mathmit{F}$',
u'\U0001d4d6': '$\\mathmit{G}$',
u'\U0001d4d7': '$\\mathmit{H}$',
u'\U0001d4d8': '$\\mathmit{I}$',
u'\U0001d4d9': '$\\mathmit{J}$',
u'\U0001d4da': '$\\mathmit{K}$',
u'\U0001d4db': '$\\mathmit{L}$',
u'\U0001d4dc': '$\\mathmit{M}$',
u'\U0001d4dd': '$\\mathmit{N}$',
u'\U0001d4de': '$\\mathmit{O}$',
u'\U0001d4df': '$\\mathmit{P}$',
u'\U0001d4e0': '$\\mathmit{Q}$',
u'\U0001d4e1': '$\\mathmit{R}$',
u'\U0001d4e2': '$\\mathmit{S}$',
u'\U0001d4e3': '$\\mathmit{T}$',
u'\U0001d4e4': '$\\mathmit{U}$',
u'\U0001d4e5': '$\\mathmit{V}$',
u'\U0001d4e6': '$\\mathmit{W}$',
u'\U0001d4e7': '$\\mathmit{X}$',
u'\U0001d4e8': '$\\mathmit{Y}$',
u'\U0001d4e9': '$\\mathmit{Z}$',
u'\U0001d4ea': '$\\mathmit{a}$',
u'\U0001d4eb': '$\\mathmit{b}$',
u'\U0001d4ec': '$\\mathmit{c}$',
u'\U0001d4ed': '$\\mathmit{d}$',
u'\U0001d4ee': '$\\mathmit{e}$',
u'\U0001d4ef': '$\\mathmit{f}$',
u'\U0001d4f0': '$\\mathmit{g}$',
u'\U0001d4f1': '$\\mathmit{h}$',
u'\U0001d4f2': '$\\mathmit{i}$',
u'\U0001d4f3': '$\\mathmit{j}$',
u'\U0001d4f4': '$\\mathmit{k}$',
u'\U0001d4f5': '$\\mathmit{l}$',
u'\U0001d4f6': '$\\mathmit{m}$',
u'\U0001d4f7': '$\\mathmit{n}$',
u'\U0001d4f8': '$\\mathmit{o}$',
u'\U0001d4f9': '$\\mathmit{p}$',
u'\U0001d4fa': '$\\mathmit{q}$',
u'\U0001d4fb': '$\\mathmit{r}$',
u'\U0001d4fc': '$\\mathmit{s}$',
u'\U0001d4fd': '$\\mathmit{t}$',
u'\U0001d4fe': '$\\mathmit{u}$',
u'\U0001d4ff': '$\\mathmit{v}$',
u'\U0001d500': '$\\mathmit{w}$',
u'\U0001d501': '$\\mathmit{x}$',
u'\U0001d502': '$\\mathmit{y}$',
u'\U0001d503': '$\\mathmit{z}$',
u'\U0001d504': '$\\mathfrak{A}$',
u'\U0001d505': '$\\mathfrak{B}$',
u'\U0001d507': '$\\mathfrak{D}$',
u'\U0001d508': '$\\mathfrak{E}$',
u'\U0001d509': '$\\mathfrak{F}$',
u'\U0001d50a': '$\\mathfrak{G}$',
u'\U0001d50d': '$\\mathfrak{J}$',
u'\U0001d50e': '$\\mathfrak{K}$',
u'\U0001d50f': '$\\mathfrak{L}$',
u'\U0001d510': '$\\mathfrak{M}$',
u'\U0001d511': '$\\mathfrak{N}$',
u'\U0001d512': '$\\mathfrak{O}$',
u'\U0001d513': '$\\mathfrak{P}$',
u'\U0001d514': '$\\mathfrak{Q}$',
u'\U0001d516': '$\\mathfrak{S}$',
u'\U0001d517': '$\\mathfrak{T}$',
u'\U0001d518': '$\\mathfrak{U}$',
u'\U0001d519': '$\\mathfrak{V}$',
u'\U0001d51a': '$\\mathfrak{W}$',
u'\U0001d51b': '$\\mathfrak{X}$',
u'\U0001d51c': '$\\mathfrak{Y}$',
u'\U0001d51e': '$\\mathfrak{a}$',
u'\U0001d51f': '$\\mathfrak{b}$',
u'\U0001d520': '$\\mathfrak{c}$',
u'\U0001d521': '$\\mathfrak{d}$',
u'\U0001d522': '$\\mathfrak{e}$',
u'\U0001d523': '$\\mathfrak{f}$',
u'\U0001d524': '$\\mathfrak{g}$',
u'\U0001d525': '$\\mathfrak{h}$',
u'\U0001d526': '$\\mathfrak{i}$',
u'\U0001d527': '$\\mathfrak{j}$',
u'\U0001d528': '$\\mathfrak{k}$',
u'\U0001d529': '$\\mathfrak{l}$',
u'\U0001d52a': '$\\mathfrak{m}$',
u'\U0001d52b': '$\\mathfrak{n}$',
u'\U0001d52c': '$\\mathfrak{o}$',
u'\U0001d52d': '$\\mathfrak{p}$',
u'\U0001d52e': '$\\mathfrak{q}$',
u'\U0001d52f': '$\\mathfrak{r}$',
u'\U0001d530': '$\\mathfrak{s}$',
u'\U0001d531': '$\\mathfrak{t}$',
u'\U0001d532': '$\\mathfrak{u}$',
u'\U0001d533': '$\\mathfrak{v}$',
u'\U0001d534': '$\\mathfrak{w}$',
u'\U0001d535': '$\\mathfrak{x}$',
u'\U0001d536': '$\\mathfrak{y}$',
u'\U0001d537': '$\\mathfrak{z}$',
u'\U0001d538': '$\\mathbb{A}$',
u'\U0001d539': '$\\mathbb{B}$',
u'\U0001d53b': '$\\mathbb{D}$',
u'\U0001d53c': '$\\mathbb{E}$',
u'\U0001d53d': '$\\mathbb{F}$',
u'\U0001d53e': '$\\mathbb{G}$',
u'\U0001d540': '$\\mathbb{I}$',
u'\U0001d541': '$\\mathbb{J}$',
u'\U0001d542': '$\\mathbb{K}$',
u'\U0001d543': '$\\mathbb{L}$',
u'\U0001d544': '$\\mathbb{M}$',
u'\U0001d546': '$\\mathbb{O}$',
u'\U0001d54a': '$\\mathbb{S}$',
u'\U0001d54b': '$\\mathbb{T}$',
u'\U0001d54c': '$\\mathbb{U}$',
u'\U0001d54d': '$\\mathbb{V}$',
u'\U0001d54e': '$\\mathbb{W}$',
u'\U0001d54f': '$\\mathbb{X}$',
u'\U0001d550': '$\\mathbb{Y}$',
u'\U0001d552': '$\\mathbb{a}$',
u'\U0001d553': '$\\mathbb{b}$',
u'\U0001d554': '$\\mathbb{c}$',
u'\U0001d555': '$\\mathbb{d}$',
u'\U0001d556': '$\\mathbb{e}$',
u'\U0001d557': '$\\mathbb{f}$',
u'\U0001d558': '$\\mathbb{g}$',
u'\U0001d559': '$\\mathbb{h}$',
u'\U0001d55a': '$\\mathbb{i}$',
u'\U0001d55b': '$\\mathbb{j}$',
u'\U0001d55c': '$\\mathbb{k}$',
u'\U0001d55d': '$\\mathbb{l}$',
u'\U0001d55e': '$\\mathbb{m}$',
u'\U0001d55f': '$\\mathbb{n}$',
u'\U0001d560': '$\\mathbb{o}$',
u'\U0001d561': '$\\mathbb{p}$',
u'\U0001d562': '$\\mathbb{q}$',
u'\U0001d563': '$\\mathbb{r}$',
u'\U0001d564': '$\\mathbb{s}$',
u'\U0001d565': '$\\mathbb{t}$',
u'\U0001d566': '$\\mathbb{u}$',
u'\U0001d567': '$\\mathbb{v}$',
u'\U0001d568': '$\\mathbb{w}$',
u'\U0001d569': '$\\mathbb{x}$',
u'\U0001d56a': '$\\mathbb{y}$',
u'\U0001d56b': '$\\mathbb{z}$',
u'\U0001d56c': '$\\mathslbb{A}$',
u'\U0001d56d': '$\\mathslbb{B}$',
u'\U0001d56e': '$\\mathslbb{C}$',
u'\U0001d56f': '$\\mathslbb{D}$',
u'\U0001d570': '$\\mathslbb{E}$',
u'\U0001d571': '$\\mathslbb{F}$',
u'\U0001d572': '$\\mathslbb{G}$',
u'\U0001d573': '$\\mathslbb{H}$',
u'\U0001d574': '$\\mathslbb{I}$',
u'\U0001d575': '$\\mathslbb{J}$',
u'\U0001d576': '$\\mathslbb{K}$',
u'\U0001d577': '$\\mathslbb{L}$',
u'\U0001d578': '$\\mathslbb{M}$',
u'\U0001d579': '$\\mathslbb{N}$',
u'\U0001d57a': '$\\mathslbb{O}$',
u'\U0001d57b': '$\\mathslbb{P}$',
u'\U0001d57c': '$\\mathslbb{Q}$',
u'\U0001d57d': '$\\mathslbb{R}$',
u'\U0001d57e': '$\\mathslbb{S}$',
u'\U0001d57f': '$\\mathslbb{T}$',
u'\U0001d580': '$\\mathslbb{U}$',
u'\U0001d581': '$\\mathslbb{V}$',
u'\U0001d582': '$\\mathslbb{W}$',
u'\U0001d583': '$\\mathslbb{X}$',
u'\U0001d584': '$\\mathslbb{Y}$',
u'\U0001d585': '$\\mathslbb{Z}$',
u'\U0001d586': '$\\mathslbb{a}$',
u'\U0001d587': '$\\mathslbb{b}$',
u'\U0001d588': '$\\mathslbb{c}$',
u'\U0001d589': '$\\mathslbb{d}$',
u'\U0001d58a': '$\\mathslbb{e}$',
u'\U0001d58b': '$\\mathslbb{f}$',
u'\U0001d58c': '$\\mathslbb{g}$',
u'\U0001d58d': '$\\mathslbb{h}$',
u'\U0001d58e': '$\\mathslbb{i}$',
u'\U0001d58f': '$\\mathslbb{j}$',
u'\U0001d590': '$\\mathslbb{k}$',
u'\U0001d591': '$\\mathslbb{l}$',
u'\U0001d592': '$\\mathslbb{m}$',
u'\U0001d593': '$\\mathslbb{n}$',
u'\U0001d594': '$\\mathslbb{o}$',
u'\U0001d595': '$\\mathslbb{p}$',
u'\U0001d596': '$\\mathslbb{q}$',
u'\U0001d597': '$\\mathslbb{r}$',
u'\U0001d598': '$\\mathslbb{s}$',
u'\U0001d599': '$\\mathslbb{t}$',
u'\U0001d59a': '$\\mathslbb{u}$',
u'\U0001d59b': '$\\mathslbb{v}$',
u'\U0001d59c': '$\\mathslbb{w}$',
u'\U0001d59d': '$\\mathslbb{x}$',
u'\U0001d59e': '$\\mathslbb{y}$',
u'\U0001d59f': '$\\mathslbb{z}$',
u'\U0001d5a0': '$\\mathsf{A}$',
u'\U0001d5a1': '$\\mathsf{B}$',
u'\U0001d5a2': '$\\mathsf{C}$',
u'\U0001d5a3': '$\\mathsf{D}$',
u'\U0001d5a4': '$\\mathsf{E}$',
u'\U0001d5a5': '$\\mathsf{F}$',
u'\U0001d5a6': '$\\mathsf{G}$',
u'\U0001d5a7': '$\\mathsf{H}$',
u'\U0001d5a8': '$\\mathsf{I}$',
u'\U0001d5a9': '$\\mathsf{J}$',
u'\U0001d5aa': '$\\mathsf{K}$',
u'\U0001d5ab': '$\\mathsf{L}$',
u'\U0001d5ac': '$\\mathsf{M}$',
u'\U0001d5ad': '$\\mathsf{N}$',
u'\U0001d5ae': '$\\mathsf{O}$',
u'\U0001d5af': '$\\mathsf{P}$',
u'\U0001d5b0': '$\\mathsf{Q}$',
u'\U0001d5b1': '$\\mathsf{R}$',
u'\U0001d5b2': '$\\mathsf{S}$',
u'\U0001d5b3': '$\\mathsf{T}$',
u'\U0001d5b4': '$\\mathsf{U}$',
u'\U0001d5b5': '$\\mathsf{V}$',
u'\U0001d5b6': '$\\mathsf{W}$',
u'\U0001d5b7': '$\\mathsf{X}$',
u'\U0001d5b8': '$\\mathsf{Y}$',
u'\U0001d5b9': '$\\mathsf{Z}$',
u'\U0001d5ba': '$\\mathsf{a}$',
u'\U0001d5bb': '$\\mathsf{b}$',
u'\U0001d5bc': '$\\mathsf{c}$',
u'\U0001d5bd': '$\\mathsf{d}$',
u'\U0001d5be': '$\\mathsf{e}$',
u'\U0001d5bf': '$\\mathsf{f}$',
u'\U0001d5c0': '$\\mathsf{g}$',
u'\U0001d5c1': '$\\mathsf{h}$',
u'\U0001d5c2': '$\\mathsf{i}$',
u'\U0001d5c3': '$\\mathsf{j}$',
u'\U0001d5c4': '$\\mathsf{k}$',
u'\U0001d5c5': '$\\mathsf{l}$',
u'\U0001d5c6': '$\\mathsf{m}$',
u'\U0001d5c7': '$\\mathsf{n}$',
u'\U0001d5c8': '$\\mathsf{o}$',
u'\U0001d5c9': '$\\mathsf{p}$',
u'\U0001d5ca': '$\\mathsf{q}$',
u'\U0001d5cb': '$\\mathsf{r}$',
u'\U0001d5cc': '$\\mathsf{s}$',
u'\U0001d5cd': '$\\mathsf{t}$',
u'\U0001d5ce': '$\\mathsf{u}$',
u'\U0001d5cf': '$\\mathsf{v}$',
u'\U0001d5d0': '$\\mathsf{w}$',
u'\U0001d5d1': '$\\mathsf{x}$',
u'\U0001d5d2': '$\\mathsf{y}$',
u'\U0001d5d3': '$\\mathsf{z}$',
u'\U0001d5d4': '$\\mathsfbf{A}$',
u'\U0001d5d5': '$\\mathsfbf{B}$',
u'\U0001d5d6': '$\\mathsfbf{C}$',
u'\U0001d5d7': '$\\mathsfbf{D}$',
u'\U0001d5d8': '$\\mathsfbf{E}$',
u'\U0001d5d9': '$\\mathsfbf{F}$',
u'\U0001d5da': '$\\mathsfbf{G}$',
u'\U0001d5db': '$\\mathsfbf{H}$',
u'\U0001d5dc': '$\\mathsfbf{I}$',
u'\U0001d5dd': '$\\mathsfbf{J}$',
u'\U0001d5de': '$\\mathsfbf{K}$',
u'\U0001d5df': '$\\mathsfbf{L}$',
u'\U0001d5e0': '$\\mathsfbf{M}$',
u'\U0001d5e1': '$\\mathsfbf{N}$',
u'\U0001d5e2': '$\\mathsfbf{O}$',
u'\U0001d5e3': '$\\mathsfbf{P}$',
u'\U0001d5e4': '$\\mathsfbf{Q}$',
u'\U0001d5e5': '$\\mathsfbf{R}$',
u'\U0001d5e6': '$\\mathsfbf{S}$',
u'\U0001d5e7': '$\\mathsfbf{T}$',
u'\U0001d5e8': '$\\mathsfbf{U}$',
u'\U0001d5e9': '$\\mathsfbf{V}$',
u'\U0001d5ea': '$\\mathsfbf{W}$',
u'\U0001d5eb': '$\\mathsfbf{X}$',
u'\U0001d5ec': '$\\mathsfbf{Y}$',
u'\U0001d5ed': '$\\mathsfbf{Z}$',
u'\U0001d5ee': '$\\mathsfbf{a}$',
u'\U0001d5ef': '$\\mathsfbf{b}$',
u'\U0001d5f0': '$\\mathsfbf{c}$',
u'\U0001d5f1': '$\\mathsfbf{d}$',
u'\U0001d5f2': '$\\mathsfbf{e}$',
u'\U0001d5f3': '$\\mathsfbf{f}$',
u'\U0001d5f4': '$\\mathsfbf{g}$',
u'\U0001d5f5': '$\\mathsfbf{h}$',
u'\U0001d5f6': '$\\mathsfbf{i}$',
u'\U0001d5f7': '$\\mathsfbf{j}$',
u'\U0001d5f8': '$\\mathsfbf{k}$',
u'\U0001d5f9': '$\\mathsfbf{l}$',
u'\U0001d5fa': '$\\mathsfbf{m}$',
u'\U0001d5fb': '$\\mathsfbf{n}$',
u'\U0001d5fc': '$\\mathsfbf{o}$',
u'\U0001d5fd': '$\\mathsfbf{p}$',
u'\U0001d5fe': '$\\mathsfbf{q}$',
u'\U0001d5ff': '$\\mathsfbf{r}$',
u'\U0001d600': '$\\mathsfbf{s}$',
u'\U0001d601': '$\\mathsfbf{t}$',
u'\U0001d602': '$\\mathsfbf{u}$',
u'\U0001d603': '$\\mathsfbf{v}$',
u'\U0001d604': '$\\mathsfbf{w}$',
u'\U0001d605': '$\\mathsfbf{x}$',
u'\U0001d606': '$\\mathsfbf{y}$',
u'\U0001d607': '$\\mathsfbf{z}$',
u'\U0001d608': '$\\mathsfsl{A}$',
u'\U0001d609': '$\\mathsfsl{B}$',
u'\U0001d60a': '$\\mathsfsl{C}$',
u'\U0001d60b': '$\\mathsfsl{D}$',
u'\U0001d60c': '$\\mathsfsl{E}$',
u'\U0001d60d': '$\\mathsfsl{F}$',
u'\U0001d60e': '$\\mathsfsl{G}$',
u'\U0001d60f': '$\\mathsfsl{H}$',
u'\U0001d610': '$\\mathsfsl{I}$',
u'\U0001d611': '$\\mathsfsl{J}$',
u'\U0001d612': '$\\mathsfsl{K}$',
u'\U0001d613': '$\\mathsfsl{L}$',
u'\U0001d614': '$\\mathsfsl{M}$',
u'\U0001d615': '$\\mathsfsl{N}$',
u'\U0001d616': '$\\mathsfsl{O}$',
u'\U0001d617': '$\\mathsfsl{P}$',
u'\U0001d618': '$\\mathsfsl{Q}$',
u'\U0001d619': '$\\mathsfsl{R}$',
u'\U0001d61a': '$\\mathsfsl{S}$',
u'\U0001d61b': '$\\mathsfsl{T}$',
u'\U0001d61c': '$\\mathsfsl{U}$',
u'\U0001d61d': '$\\mathsfsl{V}$',
u'\U0001d61e': '$\\mathsfsl{W}$',
u'\U0001d61f': '$\\mathsfsl{X}$',
u'\U0001d620': '$\\mathsfsl{Y}$',
u'\U0001d621': '$\\mathsfsl{Z}$',
u'\U0001d622': '$\\mathsfsl{a}$',
u'\U0001d623': '$\\mathsfsl{b}$',
u'\U0001d624': '$\\mathsfsl{c}$',
u'\U0001d625': '$\\mathsfsl{d}$',
u'\U0001d626': '$\\mathsfsl{e}$',
u'\U0001d627': '$\\mathsfsl{f}$',
u'\U0001d628': '$\\mathsfsl{g}$',
u'\U0001d629': '$\\mathsfsl{h}$',
u'\U0001d62a': '$\\mathsfsl{i}$',
u'\U0001d62b': '$\\mathsfsl{j}$',
u'\U0001d62c': '$\\mathsfsl{k}$',
u'\U0001d62d': '$\\mathsfsl{l}$',
u'\U0001d62e': '$\\mathsfsl{m}$',
u'\U0001d62f': '$\\mathsfsl{n}$',
u'\U0001d630': '$\\mathsfsl{o}$',
u'\U0001d631': '$\\mathsfsl{p}$',
u'\U0001d632': '$\\mathsfsl{q}$',
u'\U0001d633': '$\\mathsfsl{r}$',
u'\U0001d634': '$\\mathsfsl{s}$',
u'\U0001d635': '$\\mathsfsl{t}$',
u'\U0001d636': '$\\mathsfsl{u}$',
u'\U0001d637': '$\\mathsfsl{v}$',
u'\U0001d638': '$\\mathsfsl{w}$',
u'\U0001d639': '$\\mathsfsl{x}$',
u'\U0001d63a': '$\\mathsfsl{y}$',
u'\U0001d63b': '$\\mathsfsl{z}$',
u'\U0001d63c': '$\\mathsfbfsl{A}$',
u'\U0001d63d': '$\\mathsfbfsl{B}$',
u'\U0001d63e': '$\\mathsfbfsl{C}$',
u'\U0001d63f': '$\\mathsfbfsl{D}$',
u'\U0001d640': '$\\mathsfbfsl{E}$',
u'\U0001d641': '$\\mathsfbfsl{F}$',
u'\U0001d642': '$\\mathsfbfsl{G}$',
u'\U0001d643': '$\\mathsfbfsl{H}$',
u'\U0001d644': '$\\mathsfbfsl{I}$',
u'\U0001d645': '$\\mathsfbfsl{J}$',
u'\U0001d646': '$\\mathsfbfsl{K}$',
u'\U0001d647': '$\\mathsfbfsl{L}$',
u'\U0001d648': '$\\mathsfbfsl{M}$',
u'\U0001d649': '$\\mathsfbfsl{N}$',
u'\U0001d64a': '$\\mathsfbfsl{O}$',
u'\U0001d64b': '$\\mathsfbfsl{P}$',
u'\U0001d64c': '$\\mathsfbfsl{Q}$',
u'\U0001d64d': '$\\mathsfbfsl{R}$',
u'\U0001d64e': '$\\mathsfbfsl{S}$',
u'\U0001d64f': '$\\mathsfbfsl{T}$',
u'\U0001d650': '$\\mathsfbfsl{U}$',
u'\U0001d651': '$\\mathsfbfsl{V}$',
u'\U0001d652': '$\\mathsfbfsl{W}$',
u'\U0001d653': '$\\mathsfbfsl{X}$',
u'\U0001d654': '$\\mathsfbfsl{Y}$',
u'\U0001d655': '$\\mathsfbfsl{Z}$',
u'\U0001d656': '$\\mathsfbfsl{a}$',
u'\U0001d657': '$\\mathsfbfsl{b}$',
u'\U0001d658': '$\\mathsfbfsl{c}$',
u'\U0001d659': '$\\mathsfbfsl{d}$',
u'\U0001d65a': '$\\mathsfbfsl{e}$',
u'\U0001d65b': '$\\mathsfbfsl{f}$',
u'\U0001d65c': '$\\mathsfbfsl{g}$',
u'\U0001d65d': '$\\mathsfbfsl{h}$',
u'\U0001d65e': '$\\mathsfbfsl{i}$',
u'\U0001d65f': '$\\mathsfbfsl{j}$',
u'\U0001d660': '$\\mathsfbfsl{k}$',
u'\U0001d661': '$\\mathsfbfsl{l}$',
u'\U0001d662': '$\\mathsfbfsl{m}$',
u'\U0001d663': '$\\mathsfbfsl{n}$',
u'\U0001d664': '$\\mathsfbfsl{o}$',
u'\U0001d665': '$\\mathsfbfsl{p}$',
u'\U0001d666': '$\\mathsfbfsl{q}$',
u'\U0001d667': '$\\mathsfbfsl{r}$',
u'\U0001d668': '$\\mathsfbfsl{s}$',
u'\U0001d669': '$\\mathsfbfsl{t}$',
u'\U0001d66a': '$\\mathsfbfsl{u}$',
u'\U0001d66b': '$\\mathsfbfsl{v}$',
u'\U0001d66c': '$\\mathsfbfsl{w}$',
u'\U0001d66d': '$\\mathsfbfsl{x}$',
u'\U0001d66e': '$\\mathsfbfsl{y}$',
u'\U0001d66f': '$\\mathsfbfsl{z}$',
u'\U0001d670': '$\\mathtt{A}$',
u'\U0001d671': '$\\mathtt{B}$',
u'\U0001d672': '$\\mathtt{C}$',
u'\U0001d673': '$\\mathtt{D}$',
u'\U0001d674': '$\\mathtt{E}$',
u'\U0001d675': '$\\mathtt{F}$',
u'\U0001d676': '$\\mathtt{G}$',
u'\U0001d677': '$\\mathtt{H}$',
u'\U0001d678': '$\\mathtt{I}$',
u'\U0001d679': '$\\mathtt{J}$',
u'\U0001d67a': '$\\mathtt{K}$',
u'\U0001d67b': '$\\mathtt{L}$',
u'\U0001d67c': '$\\mathtt{M}$',
u'\U0001d67d': '$\\mathtt{N}$',
u'\U0001d67e': '$\\mathtt{O}$',
u'\U0001d67f': '$\\mathtt{P}$',
u'\U0001d680': '$\\mathtt{Q}$',
u'\U0001d681': '$\\mathtt{R}$',
u'\U0001d682': '$\\mathtt{S}$',
u'\U0001d683': '$\\mathtt{T}$',
u'\U0001d684': '$\\mathtt{U}$',
u'\U0001d685': '$\\mathtt{V}$',
u'\U0001d686': '$\\mathtt{W}$',
u'\U0001d687': '$\\mathtt{X}$',
u'\U0001d688': '$\\mathtt{Y}$',
u'\U0001d689': '$\\mathtt{Z}$',
u'\U0001d68a': '$\\mathtt{a}$',
u'\U0001d68b': '$\\mathtt{b}$',
u'\U0001d68c': '$\\mathtt{c}$',
u'\U0001d68d': '$\\mathtt{d}$',
u'\U0001d68e': '$\\mathtt{e}$',
u'\U0001d68f': '$\\mathtt{f}$',
u'\U0001d690': '$\\mathtt{g}$',
u'\U0001d691': '$\\mathtt{h}$',
u'\U0001d692': '$\\mathtt{i}$',
u'\U0001d693': '$\\mathtt{j}$',
u'\U0001d694': '$\\mathtt{k}$',
u'\U0001d695': '$\\mathtt{l}$',
u'\U0001d696': '$\\mathtt{m}$',
u'\U0001d697': '$\\mathtt{n}$',
u'\U0001d698': '$\\mathtt{o}$',
u'\U0001d699': '$\\mathtt{p}$',
u'\U0001d69a': '$\\mathtt{q}$',
u'\U0001d69b': '$\\mathtt{r}$',
u'\U0001d69c': '$\\mathtt{s}$',
u'\U0001d69d': '$\\mathtt{t}$',
u'\U0001d69e': '$\\mathtt{u}$',
u'\U0001d69f': '$\\mathtt{v}$',
u'\U0001d6a0': '$\\mathtt{w}$',
u'\U0001d6a1': '$\\mathtt{x}$',
u'\U0001d6a2': '$\\mathtt{y}$',
u'\U0001d6a3': '$\\mathtt{z}$',
u'\U0001d6a8': '$\\mathbf{\\Alpha}$',
u'\U0001d6a9': '$\\mathbf{\\Beta}$',
u'\U0001d6aa': '$\\mathbf{\\Gamma}$',
u'\U0001d6ab': '$\\mathbf{\\Delta}$',
u'\U0001d6ac': '$\\mathbf{\\Epsilon}$',
u'\U0001d6ad': '$\\mathbf{\\Zeta}$',
u'\U0001d6ae': '$\\mathbf{\\Eta}$',
u'\U0001d6af': '$\\mathbf{\\Theta}$',
u'\U0001d6b0': '$\\mathbf{\\Iota}$',
u'\U0001d6b1': '$\\mathbf{\\Kappa}$',
u'\U0001d6b2': '$\\mathbf{\\Lambda}$',
u'\U0001d6b3': '$M$',
u'\U0001d6b4': '$N$',
u'\U0001d6b5': '$\\mathbf{\\Xi}$',
u'\U0001d6b6': '$O$',
u'\U0001d6b7': '$\\mathbf{\\Pi}$',
u'\U0001d6b8': '$\\mathbf{\\Rho}$',
u'\U0001d6b9': '{\\mathbf{\\vartheta}}',
u'\U0001d6ba': '$\\mathbf{\\Sigma}$',
u'\U0001d6bb': '$\\mathbf{\\Tau}$',
u'\U0001d6bc': '$\\mathbf{\\Upsilon}$',
u'\U0001d6bd': '$\\mathbf{\\Phi}$',
u'\U0001d6be': '$\\mathbf{\\Chi}$',
u'\U0001d6bf': '$\\mathbf{\\Psi}$',
u'\U0001d6c0': '$\\mathbf{\\Omega}$',
u'\U0001d6c1': '$\\mathbf{\\nabla}$',
u'\U0001d6c2': '$\\mathbf{\\Alpha}$',
u'\U0001d6c3': '$\\mathbf{\\Beta}$',
u'\U0001d6c4': '$\\mathbf{\\Gamma}$',
u'\U0001d6c5': '$\\mathbf{\\Delta}$',
u'\U0001d6c6': '$\\mathbf{\\Epsilon}$',
u'\U0001d6c7': '$\\mathbf{\\Zeta}$',
u'\U0001d6c8': '$\\mathbf{\\Eta}$',
u'\U0001d6c9': '$\\mathbf{\\theta}$',
u'\U0001d6ca': '$\\mathbf{\\Iota}$',
u'\U0001d6cb': '$\\mathbf{\\Kappa}$',
u'\U0001d6cc': '$\\mathbf{\\Lambda}$',
u'\U0001d6cd': '$M$',
u'\U0001d6ce': '$N$',
u'\U0001d6cf': '$\\mathbf{\\Xi}$',
u'\U0001d6d0': '$O$',
u'\U0001d6d1': '$\\mathbf{\\Pi}$',
u'\U0001d6d2': '$\\mathbf{\\Rho}$',
u'\U0001d6d3': '$\\mathbf{\\varsigma}$',
u'\U0001d6d4': '$\\mathbf{\\Sigma}$',
u'\U0001d6d5': '$\\mathbf{\\Tau}$',
u'\U0001d6d6': '$\\mathbf{\\Upsilon}$',
u'\U0001d6d7': '$\\mathbf{\\Phi}$',
u'\U0001d6d8': '$\\mathbf{\\Chi}$',
u'\U0001d6d9': '$\\mathbf{\\Psi}$',
u'\U0001d6da': '$\\mathbf{\\Omega}$',
u'\U0001d6db': '$\\partial$',
u'\U0001d6dc': '$\\in$',
u'\U0001d6dd': '{\\mathbf{\\vartheta}}',
u'\U0001d6de': '{\\mathbf{\\varkappa}}',
u'\U0001d6df': '{\\mathbf{\\phi}}',
u'\U0001d6e0': '{\\mathbf{\\varrho}}',
u'\U0001d6e1': '{\\mathbf{\\varpi}}',
u'\U0001d6e2': '$\\mathsl{\\Alpha}$',
u'\U0001d6e3': '$\\mathsl{\\Beta}$',
u'\U0001d6e4': '$\\mathsl{\\Gamma}$',
u'\U0001d6e5': '$\\mathsl{\\Delta}$',
u'\U0001d6e6': '$\\mathsl{\\Epsilon}$',
u'\U0001d6e7': '$\\mathsl{\\Zeta}$',
u'\U0001d6e8': '$\\mathsl{\\Eta}$',
u'\U0001d6e9': '$\\mathsl{\\Theta}$',
u'\U0001d6ea': '$\\mathsl{\\Iota}$',
u'\U0001d6eb': '$\\mathsl{\\Kappa}$',
u'\U0001d6ec': '$\\mathsl{\\Lambda}$',
u'\U0001d6ed': '$M$',
u'\U0001d6ee': '$N$',
u'\U0001d6ef': '$\\mathsl{\\Xi}$',
u'\U0001d6f0': '$O$',
u'\U0001d6f1': '$\\mathsl{\\Pi}$',
u'\U0001d6f2': '$\\mathsl{\\Rho}$',
u'\U0001d6f3': '{\\mathsl{\\vartheta}}',
u'\U0001d6f4': '$\\mathsl{\\Sigma}$',
u'\U0001d6f5': '$\\mathsl{\\Tau}$',
u'\U0001d6f6': '$\\mathsl{\\Upsilon}$',
u'\U0001d6f7': '$\\mathsl{\\Phi}$',
u'\U0001d6f8': '$\\mathsl{\\Chi}$',
u'\U0001d6f9': '$\\mathsl{\\Psi}$',
u'\U0001d6fa': '$\\mathsl{\\Omega}$',
u'\U0001d6fb': '$\\mathsl{\\nabla}$',
u'\U0001d6fc': '$\\mathsl{\\Alpha}$',
u'\U0001d6fd': '$\\mathsl{\\Beta}$',
u'\U0001d6fe': '$\\mathsl{\\Gamma}$',
u'\U0001d6ff': '$\\mathsl{\\Delta}$',
u'\U0001d700': '$\\mathsl{\\Epsilon}$',
u'\U0001d701': '$\\mathsl{\\Zeta}$',
u'\U0001d702': '$\\mathsl{\\Eta}$',
u'\U0001d703': '$\\mathsl{\\Theta}$',
u'\U0001d704': '$\\mathsl{\\Iota}$',
u'\U0001d705': '$\\mathsl{\\Kappa}$',
u'\U0001d706': '$\\mathsl{\\Lambda}$',
u'\U0001d707': '$M$',
u'\U0001d708': '$N$',
u'\U0001d709': '$\\mathsl{\\Xi}$',
u'\U0001d70a': '$O$',
u'\U0001d70b': '$\\mathsl{\\Pi}$',
u'\U0001d70c': '$\\mathsl{\\Rho}$',
u'\U0001d70d': '$\\mathsl{\\varsigma}$',
u'\U0001d70e': '$\\mathsl{\\Sigma}$',
u'\U0001d70f': '$\\mathsl{\\Tau}$',
u'\U0001d710': '$\\mathsl{\\Upsilon}$',
u'\U0001d711': '$\\mathsl{\\Phi}$',
u'\U0001d712': '$\\mathsl{\\Chi}$',
u'\U0001d713': '$\\mathsl{\\Psi}$',
u'\U0001d714': '$\\mathsl{\\Omega}$',
u'\U0001d715': '$\\partial$',
u'\U0001d716': '$\\in$',
u'\U0001d717': '{\\mathsl{\\vartheta}}',
u'\U0001d718': '{\\mathsl{\\varkappa}}',
u'\U0001d719': '{\\mathsl{\\phi}}',
u'\U0001d71a': '{\\mathsl{\\varrho}}',
u'\U0001d71b': '{\\mathsl{\\varpi}}',
u'\U0001d71c': '$\\mathbit{\\Alpha}$',
u'\U0001d71d': '$\\mathbit{\\Beta}$',
u'\U0001d71e': '$\\mathbit{\\Gamma}$',
u'\U0001d71f': '$\\mathbit{\\Delta}$',
u'\U0001d720': '$\\mathbit{\\Epsilon}$',
u'\U0001d721': '$\\mathbit{\\Zeta}$',
u'\U0001d722': '$\\mathbit{\\Eta}$',
u'\U0001d723': '$\\mathbit{\\Theta}$',
u'\U0001d724': '$\\mathbit{\\Iota}$',
u'\U0001d725': '$\\mathbit{\\Kappa}$',
u'\U0001d726': '$\\mathbit{\\Lambda}$',
u'\U0001d727': '$M$',
u'\U0001d728': '$N$',
u'\U0001d729': '$\\mathbit{\\Xi}$',
u'\U0001d72a': '$O$',
u'\U0001d72b': '$\\mathbit{\\Pi}$',
u'\U0001d72c': '$\\mathbit{\\Rho}$',
u'\U0001d72d': '{\\mathbit{O}}',
u'\U0001d72e': '$\\mathbit{\\Sigma}$',
u'\U0001d72f': '$\\mathbit{\\Tau}$',
u'\U0001d730': '$\\mathbit{\\Upsilon}$',
u'\U0001d731': '$\\mathbit{\\Phi}$',
u'\U0001d732': '$\\mathbit{\\Chi}$',
u'\U0001d733': '$\\mathbit{\\Psi}$',
u'\U0001d734': '$\\mathbit{\\Omega}$',
u'\U0001d735': '$\\mathbit{\\nabla}$',
u'\U0001d736': '$\\mathbit{\\Alpha}$',
u'\U0001d737': '$\\mathbit{\\Beta}$',
u'\U0001d738': '$\\mathbit{\\Gamma}$',
u'\U0001d739': '$\\mathbit{\\Delta}$',
u'\U0001d73a': '$\\mathbit{\\Epsilon}$',
u'\U0001d73b': '$\\mathbit{\\Zeta}$',
u'\U0001d73c': '$\\mathbit{\\Eta}$',
u'\U0001d73d': '$\\mathbit{\\Theta}$',
u'\U0001d73e': '$\\mathbit{\\Iota}$',
u'\U0001d73f': '$\\mathbit{\\Kappa}$',
u'\U0001d740': '$\\mathbit{\\Lambda}$',
u'\U0001d741': '$M$',
u'\U0001d742': '$N$',
u'\U0001d743': '$\\mathbit{\\Xi}$',
u'\U0001d744': '$O$',
u'\U0001d745': '$\\mathbit{\\Pi}$',
u'\U0001d746': '$\\mathbit{\\Rho}$',
u'\U0001d747': '$\\mathbit{\\varsigma}$',
u'\U0001d748': '$\\mathbit{\\Sigma}$',
u'\U0001d749': '$\\mathbit{\\Tau}$',
u'\U0001d74a': '$\\mathbit{\\Upsilon}$',
u'\U0001d74b': '$\\mathbit{\\Phi}$',
u'\U0001d74c': '$\\mathbit{\\Chi}$',
u'\U0001d74d': '$\\mathbit{\\Psi}$',
u'\U0001d74e': '$\\mathbit{\\Omega}$',
u'\U0001d74f': '$\\partial$',
u'\U0001d750': '$\\in$',
u'\U0001d751': '{\\mathbit{\\vartheta}}',
u'\U0001d752': '{\\mathbit{\\varkappa}}',
u'\U0001d753': '{\\mathbit{\\phi}}',
u'\U0001d754': '{\\mathbit{\\varrho}}',
u'\U0001d755': '{\\mathbit{\\varpi}}',
u'\U0001d756': '$\\mathsfbf{\\Alpha}$',
u'\U0001d757': '$\\mathsfbf{\\Beta}$',
u'\U0001d758': '$\\mathsfbf{\\Gamma}$',
u'\U0001d759': '$\\mathsfbf{\\Delta}$',
u'\U0001d75a': '$\\mathsfbf{\\Epsilon}$',
u'\U0001d75b': '$\\mathsfbf{\\Zeta}$',
u'\U0001d75c': '$\\mathsfbf{\\Eta}$',
u'\U0001d75d': '$\\mathsfbf{\\Theta}$',
u'\U0001d75e': '$\\mathsfbf{\\Iota}$',
u'\U0001d75f': '$\\mathsfbf{\\Kappa}$',
u'\U0001d760': '$\\mathsfbf{\\Lambda}$',
u'\U0001d761': '$M$',
u'\U0001d762': '$N$',
u'\U0001d763': '$\\mathsfbf{\\Xi}$',
u'\U0001d764': '$O$',
u'\U0001d765': '$\\mathsfbf{\\Pi}$',
u'\U0001d766': '$\\mathsfbf{\\Rho}$',
u'\U0001d767': '{\\mathsfbf{\\vartheta}}',
u'\U0001d768': '$\\mathsfbf{\\Sigma}$',
u'\U0001d769': '$\\mathsfbf{\\Tau}$',
u'\U0001d76a': '$\\mathsfbf{\\Upsilon}$',
u'\U0001d76b': '$\\mathsfbf{\\Phi}$',
u'\U0001d76c': '$\\mathsfbf{\\Chi}$',
u'\U0001d76d': '$\\mathsfbf{\\Psi}$',
u'\U0001d76e': '$\\mathsfbf{\\Omega}$',
u'\U0001d76f': '$\\mathsfbf{\\nabla}$',
u'\U0001d770': '$\\mathsfbf{\\Alpha}$',
u'\U0001d771': '$\\mathsfbf{\\Beta}$',
u'\U0001d772': '$\\mathsfbf{\\Gamma}$',
u'\U0001d773': '$\\mathsfbf{\\Delta}$',
u'\U0001d774': '$\\mathsfbf{\\Epsilon}$',
u'\U0001d775': '$\\mathsfbf{\\Zeta}$',
u'\U0001d776': '$\\mathsfbf{\\Eta}$',
u'\U0001d777': '$\\mathsfbf{\\Theta}$',
u'\U0001d778': '$\\mathsfbf{\\Iota}$',
u'\U0001d779': '$\\mathsfbf{\\Kappa}$',
u'\U0001d77a': '$\\mathsfbf{\\Lambda}$',
u'\U0001d77b': '$M$',
u'\U0001d77c': '$N$',
u'\U0001d77d': '$\\mathsfbf{\\Xi}$',
u'\U0001d77e': '$O$',
u'\U0001d77f': '$\\mathsfbf{\\Pi}$',
u'\U0001d780': '$\\mathsfbf{\\Rho}$',
u'\U0001d781': '$\\mathsfbf{\\varsigma}$',
u'\U0001d782': '$\\mathsfbf{\\Sigma}$',
u'\U0001d783': '$\\mathsfbf{\\Tau}$',
u'\U0001d784': '$\\mathsfbf{\\Upsilon}$',
u'\U0001d785': '$\\mathsfbf{\\Phi}$',
u'\U0001d786': '$\\mathsfbf{\\Chi}$',
u'\U0001d787': '$\\mathsfbf{\\Psi}$',
u'\U0001d788': '$\\mathsfbf{\\Omega}$',
u'\U0001d789': '$\\partial$',
u'\U0001d78a': '$\\in$',
u'\U0001d78b': '{\\mathsfbf{\\vartheta}}',
u'\U0001d78c': '{\\mathsfbf{\\varkappa}}',
u'\U0001d78d': '{\\mathsfbf{\\phi}}',
u'\U0001d78e': '{\\mathsfbf{\\varrho}}',
u'\U0001d78f': '{\\mathsfbf{\\varpi}}',
u'\U0001d790': '$\\mathsfbfsl{\\Alpha}$',
u'\U0001d791': '$\\mathsfbfsl{\\Beta}$',
u'\U0001d792': '$\\mathsfbfsl{\\Gamma}$',
u'\U0001d793': '$\\mathsfbfsl{\\Delta}$',
u'\U0001d794': '$\\mathsfbfsl{\\Epsilon}$',
u'\U0001d795': '$\\mathsfbfsl{\\Zeta}$',
u'\U0001d796': '$\\mathsfbfsl{\\Eta}$',
u'\U0001d797': '$\\mathsfbfsl{\\vartheta}$',
u'\U0001d798': '$\\mathsfbfsl{\\Iota}$',
u'\U0001d799': '$\\mathsfbfsl{\\Kappa}$',
u'\U0001d79a': '$\\mathsfbfsl{\\Lambda}$',
u'\U0001d79b': '$M$',
u'\U0001d79c': '$N$',
u'\U0001d79d': '$\\mathsfbfsl{\\Xi}$',
u'\U0001d79e': '$O$',
u'\U0001d79f': '$\\mathsfbfsl{\\Pi}$',
u'\U0001d7a0': '$\\mathsfbfsl{\\Rho}$',
u'\U0001d7a1': '{\\mathsfbfsl{\\vartheta}}',
u'\U0001d7a2': '$\\mathsfbfsl{\\Sigma}$',
u'\U0001d7a3': '$\\mathsfbfsl{\\Tau}$',
u'\U0001d7a4': '$\\mathsfbfsl{\\Upsilon}$',
u'\U0001d7a5': '$\\mathsfbfsl{\\Phi}$',
u'\U0001d7a6': '$\\mathsfbfsl{\\Chi}$',
u'\U0001d7a7': '$\\mathsfbfsl{\\Psi}$',
u'\U0001d7a8': '$\\mathsfbfsl{\\Omega}$',
u'\U0001d7a9': '$\\mathsfbfsl{\\nabla}$',
u'\U0001d7aa': '$\\mathsfbfsl{\\Alpha}$',
u'\U0001d7ab': '$\\mathsfbfsl{\\Beta}$',
u'\U0001d7ac': '$\\mathsfbfsl{\\Gamma}$',
u'\U0001d7ad': '$\\mathsfbfsl{\\Delta}$',
u'\U0001d7ae': '$\\mathsfbfsl{\\Epsilon}$',
u'\U0001d7af': '$\\mathsfbfsl{\\Zeta}$',
u'\U0001d7b0': '$\\mathsfbfsl{\\Eta}$',
u'\U0001d7b1': '$\\mathsfbfsl{\\vartheta}$',
u'\U0001d7b2': '$\\mathsfbfsl{\\Iota}$',
u'\U0001d7b3': '$\\mathsfbfsl{\\Kappa}$',
u'\U0001d7b4': '$\\mathsfbfsl{\\Lambda}$',
u'\U0001d7b5': '$M$',
u'\U0001d7b6': '$N$',
u'\U0001d7b7': '$\\mathsfbfsl{\\Xi}$',
u'\U0001d7b8': '$O$',
u'\U0001d7b9': '$\\mathsfbfsl{\\Pi}$',
u'\U0001d7ba': '$\\mathsfbfsl{\\Rho}$',
u'\U0001d7bb': '$\\mathsfbfsl{\\varsigma}$',
u'\U0001d7bc': '$\\mathsfbfsl{\\Sigma}$',
u'\U0001d7bd': '$\\mathsfbfsl{\\Tau}$',
u'\U0001d7be': '$\\mathsfbfsl{\\Upsilon}$',
u'\U0001d7bf': '$\\mathsfbfsl{\\Phi}$',
u'\U0001d7c0': '$\\mathsfbfsl{\\Chi}$',
u'\U0001d7c1': '$\\mathsfbfsl{\\Psi}$',
u'\U0001d7c2': '$\\mathsfbfsl{\\Omega}$',
u'\U0001d7c3': '$\\partial$',
u'\U0001d7c4': '$\\in$',
u'\U0001d7c5': '{\\mathsfbfsl{\\vartheta}}',
u'\U0001d7c6': '{\\mathsfbfsl{\\varkappa}}',
u'\U0001d7c7': '{\\mathsfbfsl{\\phi}}',
u'\U0001d7c8': '{\\mathsfbfsl{\\varrho}}',
u'\U0001d7c9': '{\\mathsfbfsl{\\varpi}}',
u'\U0001d7ce': '$\\mathbf{0}$',
u'\U0001d7cf': '$\\mathbf{1}$',
u'\U0001d7d0': '$\\mathbf{2}$',
u'\U0001d7d1': '$\\mathbf{3}$',
u'\U0001d7d2': '$\\mathbf{4}$',
u'\U0001d7d3': '$\\mathbf{5}$',
u'\U0001d7d4': '$\\mathbf{6}$',
u'\U0001d7d5': '$\\mathbf{7}$',
u'\U0001d7d6': '$\\mathbf{8}$',
u'\U0001d7d7': '$\\mathbf{9}$',
u'\U0001d7d8': '$\\mathbb{0}$',
u'\U0001d7d9': '$\\mathbb{1}$',
u'\U0001d7da': '$\\mathbb{2}$',
u'\U0001d7db': '$\\mathbb{3}$',
u'\U0001d7dc': '$\\mathbb{4}$',
u'\U0001d7dd': '$\\mathbb{5}$',
u'\U0001d7de': '$\\mathbb{6}$',
u'\U0001d7df': '$\\mathbb{7}$',
u'\U0001d7e0': '$\\mathbb{8}$',
u'\U0001d7e1': '$\\mathbb{9}$',
u'\U0001d7e2': '$\\mathsf{0}$',
u'\U0001d7e3': '$\\mathsf{1}$',
u'\U0001d7e4': '$\\mathsf{2}$',
u'\U0001d7e5': '$\\mathsf{3}$',
u'\U0001d7e6': '$\\mathsf{4}$',
u'\U0001d7e7': '$\\mathsf{5}$',
u'\U0001d7e8': '$\\mathsf{6}$',
u'\U0001d7e9': '$\\mathsf{7}$',
u'\U0001d7ea': '$\\mathsf{8}$',
u'\U0001d7eb': '$\\mathsf{9}$',
u'\U0001d7ec': '$\\mathsfbf{0}$',
u'\U0001d7ed': '$\\mathsfbf{1}$',
u'\U0001d7ee': '$\\mathsfbf{2}$',
u'\U0001d7ef': '$\\mathsfbf{3}$',
u'\U0001d7f0': '$\\mathsfbf{4}$',
u'\U0001d7f1': '$\\mathsfbf{5}$',
u'\U0001d7f2': '$\\mathsfbf{6}$',
u'\U0001d7f3': '$\\mathsfbf{7}$',
u'\U0001d7f4': '$\\mathsfbf{8}$',
u'\U0001d7f5': '$\\mathsfbf{9}$',
u'\U0001d7f6': '$\\mathtt{0}$',
u'\U0001d7f7': '$\\mathtt{1}$',
u'\U0001d7f8': '$\\mathtt{2}$',
u'\U0001d7f9': '$\\mathtt{3}$',
u'\U0001d7fa': '$\\mathtt{4}$',
u'\U0001d7fb': '$\\mathtt{5}$',
u'\U0001d7fc': '$\\mathtt{6}$',
u'\U0001d7fd': '$\\mathtt{7}$',
u'\U0001d7fe': '$\\mathtt{8}$',
u'\U0001d7ff': '$\\mathtt{9}$'}
| Python |
# $Id: __init__.py 5738 2008-11-30 08:59:04Z grubert $
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This module has been placed in the public domain.
"""
LaTeX2e document tree Writer.
"""
# Thanks to Engelbert Gruber and various contributors for the original
# LaTeX writer, some code and many ideas of which have been used for
# this writer.
__docformat__ = 'reStructuredText'
import re
import os.path
import docutils
from docutils import nodes, writers, utils
from docutils.writers.newlatex2e import unicode_map
from docutils.transforms import writer_aux
class Writer(writers.Writer):
supported = ('newlatex', 'newlatex2e')
"""Formats this writer supports."""
default_stylesheet = 'base.tex'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
settings_spec = (
'LaTeX-Specific Options',
'Note that this LaTeX writer is still EXPERIMENTAL and not '
'feature-complete. ',
(('Specify a stylesheet file. The path is used verbatim to include '
'the file. Overrides --stylesheet-path.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Specify a stylesheet file, relative to the current working '
'directory. Overrides --stylesheet. Default: "%s"'
% default_stylesheet_path,
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet',
'default': default_stylesheet_path}),
('Specify a user stylesheet file. See --stylesheet.',
['--user-stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'user_stylesheet_path'}),
('Specify a user stylesheet file. See --stylesheet-path.',
['--user-stylesheet-path'],
{'metavar': '<file>', 'overrides': 'user_stylesheet'})
),)
settings_defaults = {
# Many Unicode characters are provided by unicode_map.py, so
# we can default to latin-1.
'output_encoding': 'latin-1',
'output_encoding_error_handler': 'strict',
# Since we are using superscript footnotes, it is necessary to
# trim whitespace in front of footnote references.
'trim_footnote_reference_space': 1,
# Currently unsupported:
'docinfo_xform': 0,
# During development:
'traceback': 1
}
relative_path_settings = ('stylesheet_path', 'user_stylesheet_path')
config_section = 'newlatex2e writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def get_transforms(self):
return writers.Writer.get_transforms(self) + [
writer_aux.Compound, writer_aux.Admonitions]
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
assert not visitor.context, 'context not empty: %s' % visitor.context
self.output = visitor.astext()
self.head = visitor.header
self.body = visitor.body
class LaTeXException(Exception):
"""
Exception base class to for exceptions which influence the
automatic generation of LaTeX code.
"""
class SkipAttrParentLaTeX(LaTeXException):
"""
Do not generate ``\DECattr`` and ``\renewcommand{\DEVparent}{...}`` for this
node.
To be raised from ``before_...`` methods.
"""
class SkipParentLaTeX(LaTeXException):
"""
Do not generate ``\renewcommand{\DEVparent}{...}`` for this node.
To be raised from ``before_...`` methods.
"""
class LaTeXTranslator(nodes.SparseNodeVisitor):
# Country code by a.schlock.
# Partly manually converted from iso and babel stuff.
iso639_to_babel = {
'no': 'norsk', # added by hand
'gd': 'scottish', # added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman',
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
# Start with left double quote.
left_quote = 1
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = document.settings
self.header = []
self.body = []
self.context = []
self.stylesheet_path = utils.get_stylesheet_reference(
self.settings, os.path.join(os.getcwd(), 'dummy'))
if self.stylesheet_path:
self.settings.record_dependencies.add(self.stylesheet_path)
# This ugly hack will be cleaned up when refactoring the
# stylesheet mess.
self.settings.stylesheet = self.settings.user_stylesheet
self.settings.stylesheet_path = self.settings.user_stylesheet_path
self.user_stylesheet_path = utils.get_stylesheet_reference(
self.settings, os.path.join(os.getcwd(), 'dummy'))
if self.user_stylesheet_path:
self.settings.record_dependencies.add(self.user_stylesheet_path)
lang = self.settings.language_code or ''
if lang.startswith('de'):
self.double_quote_replacment = "{\\dq}"
elif lang.startswith('it'):
self.double_quote_replacment = r'{\char`\"}'
else:
self.double_quote_replacment = None
self.write_header()
def write_header(self):
a = self.header.append
a('%% Generated by Docutils %s <http://docutils.sourceforge.net>.'
% docutils.__version__)
a('')
a('% Docutils settings:')
lang = self.settings.language_code or ''
a(r'\providecommand{\DEVlanguageiso}{%s}' % lang)
a(r'\providecommand{\DEVlanguagebabel}{%s}' % self.iso639_to_babel.get(
lang, self.iso639_to_babel.get(lang.split('_')[0], '')))
a('')
if self.user_stylesheet_path:
a('% User stylesheet:')
a(r'\input{%s}' % self.user_stylesheet_path)
a('% Docutils stylesheet:')
a(r'\input{%s}' % self.stylesheet_path)
a('')
a('% Default definitions for Docutils nodes:')
for node_name in nodes.node_class_names:
a(r'\providecommand{\DN%s}[1]{#1}' % node_name.replace('_', ''))
a('')
a('% Auxiliary definitions:')
for attr in (r'\DEVparent \DEVattrlen \DEVtitleastext '
r'\DEVsinglebackref \DEVmultiplebackrefs'
).split():
# Later set using \renewcommand.
a(r'\providecommand{%s}{DOCUTILSUNINITIALIZEDVARIABLE}' % attr)
for attr in (r'\DEVparagraphindented \DEVhassubtitle').split():
# Initialize as boolean variables.
a(r'\providecommand{%s}{false}' % attr)
a('\n\n')
unicode_map = unicode_map.unicode_map # comprehensive Unicode map
# Fix problems with unimap.py.
unicode_map.update({
# We have AE or T1 encoding, so "``" etc. work. The macros
# from unimap.py may *not* work.
u'\u201C': '{``}',
u'\u201D': "{''}",
u'\u201E': '{,,}',
})
character_map = {
'\\': r'{\textbackslash}',
'{': r'{\{}',
'}': r'{\}}',
'$': r'{\$}',
'&': r'{\&}',
'%': r'{\%}',
'#': r'{\#}',
'[': r'{[}',
']': r'{]}',
'-': r'{-}',
'`': r'{`}',
"'": r"{'}",
',': r'{,}',
'"': r'{"}',
'|': r'{\textbar}',
'<': r'{\textless}',
'>': r'{\textgreater}',
'^': r'{\textasciicircum}',
'~': r'{\textasciitilde}',
'_': r'{\DECtextunderscore}',
}
character_map.update(unicode_map)
#character_map.update(special_map)
# `att_map` is for encoding attributes. According to
# <http://www-h.eng.cam.ac.uk/help/tpl/textprocessing/teTeX/latex/latex2e-html/ltx-164.html>,
# the following characters are special: # $ % & ~ _ ^ \ { }
# These work without special treatment in macro parameters:
# $, &, ~, _, ^
att_map = {'#': '\\#',
'%': '\\%',
# We cannot do anything about backslashes.
'\\': '',
'{': '\\{',
'}': '\\}',
# The quotation mark may be redefined by babel.
'"': '"{}',
}
att_map.update(unicode_map)
def encode(self, text, attval=None):
"""
Encode special characters in ``text`` and return it.
If attval is true, preserve as much as possible verbatim (used
in attribute value encoding). If attval is 'width' or
'height', `text` is interpreted as a length value.
"""
if attval in ('width', 'height'):
match = re.match(r'([0-9.]+)(\S*)$', text)
assert match, '%s="%s" must be a length' % (attval, text)
value, unit = match.groups()
if unit == '%':
value = str(float(value) / 100)
unit = r'\DECrelativeunit'
elif unit in ('', 'px'):
# If \DECpixelunit is "pt", this gives the same notion
# of pixels as graphicx. This is a bit of a hack.
value = str(float(value) * 0.75)
unit = '\DECpixelunit'
return '%s%s' % (value, unit)
if attval:
get = self.att_map.get
else:
get = self.character_map.get
text = ''.join([get(c, c) for c in text])
if (self.literal_block or self.inline_literal) and not attval:
# NB: We can have inline literals within literal blocks.
# Shrink '\r\n'.
text = text.replace('\r\n', '\n')
# Convert space. If "{ }~~~~~" is wrapped (at the
# brace-enclosed space "{ }"), the following non-breaking
# spaces ("~~~~") do *not* wind up at the beginning of the
# next line. Also note that no hyphenation is done if the
# breaking space ("{ }") comes *after* the non-breaking
# spaces.
if self.literal_block:
# Replace newlines with real newlines.
text = text.replace('\n', '\mbox{}\\\\{}')
replace_fn = self.encode_replace_for_literal_block_spaces
else:
replace_fn = self.encode_replace_for_inline_literal_spaces
text = re.sub(r'\s+', replace_fn, text)
# Protect hyphens; if we don't, line breaks will be
# possible at the hyphens and even the \textnhtt macro
# from the hyphenat package won't change that.
text = text.replace('-', r'\mbox{-}')
text = text.replace("'", r'{\DECtextliteralsinglequote}')
if self.double_quote_replacment is not None:
text = text.replace('"', self.double_quote_replacment)
return text
else:
if not attval:
# Replace space with single protected space.
text = re.sub(r'\s+', '{ }', text)
# Replace double quotes with macro calls.
L = []
for part in text.split(self.character_map['"']):
if L:
# Insert quote.
L.append(self.left_quote and r'{\DECtextleftdblquote}'
or r'{\DECtextrightdblquote}')
self.left_quote = not self.left_quote
L.append(part)
return ''.join(L)
else:
return text
def encode_replace_for_literal_block_spaces(self, match):
return '~' * len(match.group())
def encode_replace_for_inline_literal_spaces(self, match):
return '{ }' + '~' * (len(match.group()) - 1)
def astext(self):
return '\n'.join(self.header) + (''.join(self.body))
def append(self, text, newline='%\n'):
"""
Append text, stripping newlines, producing nice LaTeX code.
"""
lines = [' ' * self.indentation_level + line + newline
for line in text.splitlines(0)]
self.body.append(''.join(lines))
def visit_Text(self, node):
self.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def is_indented(self, paragraph):
"""Return true if `paragraph` should be first-line-indented."""
assert isinstance(paragraph, nodes.paragraph)
siblings = [n for n in paragraph.parent if
self.is_visible(n) and not isinstance(n, nodes.Titular)]
index = siblings.index(paragraph)
if ('continued' in paragraph['classes'] or
index > 0 and isinstance(siblings[index-1], nodes.transition)):
return 0
# Indent all but the first paragraphs.
return index > 0
def before_paragraph(self, node):
self.append(r'\renewcommand{\DEVparagraphindented}{%s}'
% (self.is_indented(node) and 'true' or 'false'))
def before_title(self, node):
self.append(r'\renewcommand{\DEVtitleastext}{%s}'
% self.encode(node.astext()))
self.append(r'\renewcommand{\DEVhassubtitle}{%s}'
% ((len(node.parent) > 2 and
isinstance(node.parent[1], nodes.subtitle))
and 'true' or 'false'))
def before_generated(self, node):
if 'sectnum' in node['classes']:
node[0] = node[0].strip()
literal_block = 0
def visit_literal_block(self, node):
self.literal_block = 1
def depart_literal_block(self, node):
self.literal_block = 0
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
inline_literal = 0
def visit_literal(self, node):
self.inline_literal += 1
def depart_literal(self, node):
self.inline_literal -= 1
def _make_encodable(self, text):
"""
Return text (a unicode object) with all unencodable characters
replaced with '?'.
Thus, the returned unicode string is guaranteed to be encodable.
"""
encoding = self.settings.output_encoding
return text.encode(encoding, 'replace').decode(encoding)
def visit_comment(self, node):
"""
Insert the comment unchanged into the document, replacing
unencodable characters with '?'.
(This is done in order not to fail if comments contain unencodable
characters, because our default encoding is not UTF-8.)
"""
self.append('\n'.join(['% ' + self._make_encodable(line) for line
in node.astext().splitlines(0)]), newline='\n')
raise nodes.SkipChildren
def before_topic(self, node):
if 'contents' in node['classes']:
for bullet_list in list(node.traverse(nodes.bullet_list)):
p = bullet_list.parent
if isinstance(p, nodes.list_item):
p.parent.insert(p.parent.index(p) + 1, bullet_list)
del p[1]
for paragraph in node.traverse(nodes.paragraph):
paragraph.attributes.update(paragraph[0].attributes)
paragraph[:] = paragraph[0]
paragraph.parent['tocrefid'] = paragraph['refid']
node['contents'] = 1
else:
node['contents'] = 0
bullet_list_level = 0
def visit_bullet_list(self, node):
self.append(r'\DECsetbullet{\labelitem%s}' %
['i', 'ii', 'iii', 'iv'][min(self.bullet_list_level, 3)])
self.bullet_list_level += 1
def depart_bullet_list(self, node):
self.bullet_list_level -= 1
enum_styles = {'arabic': 'arabic', 'loweralpha': 'alph', 'upperalpha':
'Alph', 'lowerroman': 'roman', 'upperroman': 'Roman'}
enum_counter = 0
def visit_enumerated_list(self, node):
# We create our own enumeration list environment. This allows
# to set the style and starting value and unlimited nesting.
# Maybe the actual creation (\DEC) can be moved to the
# stylesheet?
self.enum_counter += 1
enum_prefix = self.encode(node['prefix'])
enum_suffix = self.encode(node['suffix'])
enum_type = '\\' + self.enum_styles.get(node['enumtype'], r'arabic')
start = node.get('start', 1) - 1
counter = 'Denumcounter%d' % self.enum_counter
self.append(r'\DECmakeenumeratedlist{%s}{%s}{%s}{%s}{%s}{'
% (enum_prefix, enum_type, enum_suffix, counter, start))
# for Emacs: }
def depart_enumerated_list(self, node):
self.append('}') # for Emacs: {
def before_list_item(self, node):
# XXX needs cleanup.
if (len(node) and (isinstance(node[-1], nodes.TextElement) or
isinstance(node[-1], nodes.Text)) and
node.parent.index(node) == len(node.parent) - 1):
node['lastitem'] = 'true'
before_line = before_list_item
def before_raw(self, node):
if 'latex' in node.get('format', '').split():
# We're inserting the text in before_raw and thus outside
# of \DN... and \DECattr in order to make grouping with
# curly brackets work.
self.append(node.astext())
raise nodes.SkipChildren
def process_backlinks(self, node, type):
"""
Add LaTeX handling code for backlinks of footnote or citation
node `node`. `type` is either 'footnote' or 'citation'.
"""
self.append(r'\renewcommand{\DEVsinglebackref}{}')
self.append(r'\renewcommand{\DEVmultiplebackrefs}{}')
if len(node['backrefs']) > 1:
refs = []
for i in range(len(node['backrefs'])):
# \DECmulticitationbacklink or \DECmultifootnotebacklink.
refs.append(r'\DECmulti%sbacklink{%s}{%s}'
% (type, node['backrefs'][i], i + 1))
self.append(r'\renewcommand{\DEVmultiplebackrefs}{(%s){ }}'
% ', '.join(refs))
elif len(node['backrefs']) == 1:
self.append(r'\renewcommand{\DEVsinglebackref}{%s}'
% node['backrefs'][0])
def visit_footnote(self, node):
self.process_backlinks(node, 'footnote')
def visit_citation(self, node):
self.process_backlinks(node, 'citation')
def before_table(self, node):
# A table contains exactly one tgroup. See before_tgroup.
pass
def before_tgroup(self, node):
widths = []
total_width = 0
for i in range(int(node['cols'])):
assert isinstance(node[i], nodes.colspec)
widths.append(int(node[i]['colwidth']) + 1)
total_width += widths[-1]
del node[:len(widths)]
tablespec = '|'
for w in widths:
# 0.93 is probably wrong in many cases. XXX Find a
# solution which works *always*.
tablespec += r'p{%s\textwidth}|' % (0.93 * w /
max(total_width, 60))
self.append(r'\DECmaketable{%s}{' % tablespec)
self.context.append('}')
raise SkipAttrParentLaTeX
def depart_tgroup(self, node):
self.append(self.context.pop())
def before_row(self, node):
raise SkipAttrParentLaTeX
def before_thead(self, node):
raise SkipAttrParentLaTeX
def before_tbody(self, node):
raise SkipAttrParentLaTeX
def is_simply_entry(self, node):
return (len(node) == 1 and isinstance(node[0], nodes.paragraph) or
len(node) == 0)
def before_entry(self, node):
is_leftmost = 0
if node.hasattr('morerows'):
self.document.reporter.severe('Rowspans are not supported.')
# Todo: Add empty cells below rowspanning cell and issue
# warning instead of severe.
if node.hasattr('morecols'):
# The author got a headache trying to implement
# multicolumn support.
if not self.is_simply_entry(node):
self.document.reporter.severe(
'Colspanning table cells may only contain one paragraph.')
# Todo: Same as above.
# The number of columns this entry spans (as a string).
colspan = int(node['morecols']) + 1
del node['morecols']
else:
colspan = 1
# Macro to call -- \DECcolspan or \DECcolspanleft.
macro_name = r'\DECcolspan'
if node.parent.index(node) == 0:
# Leftmost column.
macro_name += 'left'
is_leftmost = 1
if colspan > 1:
self.append('%s{%s}{' % (macro_name, colspan))
self.context.append('}')
else:
# Do not add a multicolumn with colspan 1 beacuse we need
# at least one non-multicolumn cell per column to get the
# desired column widths, and we can only do colspans with
# cells consisting of only one paragraph.
if not is_leftmost:
self.append(r'\DECsubsequententry{')
self.context.append('}')
else:
self.context.append('')
if isinstance(node.parent.parent, nodes.thead):
node['tableheaderentry'] = 'true'
# Don't add \renewcommand{\DEVparent}{...} because there must
# not be any non-expandable commands in front of \multicolumn.
raise SkipParentLaTeX
def depart_entry(self, node):
self.append(self.context.pop())
def before_substitution_definition(self, node):
raise nodes.SkipNode
indentation_level = 0
def node_name(self, node):
return node.__class__.__name__.replace('_', '')
# Attribute propagation order.
attribute_order = ['align', 'classes', 'ids']
def attribute_cmp(self, a1, a2):
"""
Compare attribute names `a1` and `a2`. Used in
propagate_attributes to determine propagation order.
See built-in function `cmp` for return value.
"""
if a1 in self.attribute_order and a2 in self.attribute_order:
return cmp(self.attribute_order.index(a1),
self.attribute_order.index(a2))
if (a1 in self.attribute_order) != (a2 in self.attribute_order):
# Attributes not in self.attribute_order come last.
return a1 in self.attribute_order and -1 or 1
else:
return cmp(a1, a2)
def propagate_attributes(self, node):
# Propagate attributes using \DECattr macros.
node_name = self.node_name(node)
attlist = []
if isinstance(node, nodes.Element):
attlist = node.attlist()
attlist.sort(lambda pair1, pair2: self.attribute_cmp(pair1[0],
pair2[0]))
# `numatts` may be greater than len(attlist) due to list
# attributes.
numatts = 0
pass_contents = self.pass_contents(node)
for key, value in attlist:
if isinstance(value, list):
self.append(r'\renewcommand{\DEVattrlen}{%s}' % len(value))
for i in range(len(value)):
self.append(r'\DECattr{%s}{%s}{%s}{%s}{' %
(i+1, key, self.encode(value[i], attval=key),
node_name))
if not pass_contents:
self.append('}')
numatts += len(value)
else:
self.append(r'\DECattr{}{%s}{%s}{%s}{' %
(key, self.encode(unicode(value), attval=key),
node_name))
if not pass_contents:
self.append('}')
numatts += 1
if pass_contents:
self.context.append('}' * numatts) # for Emacs: {
else:
self.context.append('')
def visit_docinfo(self, node):
raise NotImplementedError('Docinfo not yet implemented.')
def visit_document(self, node):
document = node
# Move IDs into TextElements. This won't work for images.
# Need to review this.
for node in document.traverse(nodes.Element):
if 'ids' in node and not isinstance(node,
nodes.TextElement):
next_text_element = node.next_node(nodes.TextElement)
if next_text_element:
next_text_element['ids'].extend(node['ids'])
node['ids'] = []
def pass_contents(self, node):
r"""
Return True if the node contents should be passed in
\DN<nodename>{<contents>} and \DECattr{}{}{}{}{<contents>}.
Return False if the node contents should be passed in
\DECvisit<nodename> <contents> \DECdepart<nodename>, and no
attribute handler should be called.
"""
# Passing the whole document or whole sections as parameters
# to \DN... or \DECattr causes LaTeX to run out of memory.
return not isinstance(node, (nodes.document, nodes.section))
def dispatch_visit(self, node):
skip_attr = skip_parent = 0
# TreePruningException to be propagated.
tree_pruning_exception = None
if hasattr(self, 'before_' + node.__class__.__name__):
try:
getattr(self, 'before_' + node.__class__.__name__)(node)
except SkipParentLaTeX:
skip_parent = 1
except SkipAttrParentLaTeX:
skip_attr = 1
skip_parent = 1
except nodes.SkipNode:
raise
except (nodes.SkipChildren, nodes.SkipSiblings), instance:
tree_pruning_exception = instance
except nodes.SkipDeparture:
raise NotImplementedError(
'SkipDeparture not usable in LaTeX writer')
if not isinstance(node, nodes.Text):
node_name = self.node_name(node)
# attribute_deleters will be appended to self.context.
attribute_deleters = []
if not skip_parent and not isinstance(node, nodes.document):
self.append(r'\renewcommand{\DEVparent}{%s}'
% self.node_name(node.parent))
for name, value in node.attlist():
if not isinstance(value, list) and not ':' in name:
# For non-list and non-special (like
# 'xml:preserve') attributes, set
# \DEVcurrentN<nodename>A<attribute> to the
# attribute value, so that the value of the
# attribute is available in the node handler
# and all children.
macro = r'\DEVcurrentN%sA%s' % (node_name, name)
self.append(r'\def%s{%s}' % (
macro, self.encode(unicode(value), attval=name)))
# Make the attribute undefined afterwards.
attribute_deleters.append(r'\let%s=\relax' % macro)
self.context.append('\n'.join(attribute_deleters))
if self.pass_contents(node):
# Call \DN<nodename>{<contents>}.
self.append(r'\DN%s{' % node_name)
self.context.append('}')
else:
# Call \DECvisit<nodename> <contents>
# \DECdepart<nodename>. (Maybe we should use LaTeX
# environments for this?)
self.append(r'\DECvisit%s' % node_name)
self.context.append(r'\DECdepart%s' % node_name)
self.indentation_level += 1
if not skip_attr:
self.propagate_attributes(node)
else:
self.context.append('')
if (isinstance(node, nodes.TextElement) and
not isinstance(node.parent, nodes.TextElement)):
# Reset current quote to left.
self.left_quote = 1
# Call visit_... method.
try:
nodes.SparseNodeVisitor.dispatch_visit(self, node)
except LaTeXException:
raise NotImplementedError(
'visit_... methods must not raise LaTeXExceptions')
if tree_pruning_exception:
# Propagate TreePruningException raised in before_... method.
raise tree_pruning_exception
def is_invisible(self, node):
# Return true if node is invisible or moved away in the LaTeX
# rendering.
return (not isinstance(node, nodes.Text) and
(isinstance(node, nodes.Invisible) or
isinstance(node, nodes.footnote) or
isinstance(node, nodes.citation) or
# Assume raw nodes to be invisible.
isinstance(node, nodes.raw) or
# Floating image or figure.
node.get('align') in ('left', 'right')))
def is_visible(self, node):
return not self.is_invisible(node)
def needs_space(self, node):
"""Two nodes for which `needs_space` is true need auxiliary space."""
# Return true if node is a visible block-level element.
return ((isinstance(node, nodes.Body) or
isinstance(node, nodes.topic)) and
not (self.is_invisible(node) or
isinstance(node.parent, nodes.TextElement)))
def always_needs_space(self, node):
"""
Always add space around nodes for which `always_needs_space()`
is true, regardless of whether the other node needs space as
well. (E.g. transition next to section.)
"""
return isinstance(node, nodes.transition)
def dispatch_departure(self, node):
# Call departure method.
nodes.SparseNodeVisitor.dispatch_departure(self, node)
if not isinstance(node, nodes.Text):
# Close attribute and node handler call (\DN...{...}).
self.indentation_level -= 1
self.append(self.context.pop() + self.context.pop())
# Delete \DECcurrentN... attribute macros.
self.append(self.context.pop())
# Get next sibling.
next_node = node.next_node(
ascend=0, siblings=1, descend=0,
condition=self.is_visible)
# Insert space if necessary.
if (self.needs_space(node) and self.needs_space(next_node) or
self.always_needs_space(node) or
self.always_needs_space(next_node)):
if isinstance(node, nodes.paragraph) and isinstance(next_node, nodes.paragraph):
# Space between paragraphs.
self.append(r'\DECparagraphspace')
else:
# One of the elements is not a paragraph.
self.append(r'\DECauxiliaryspace')
| Python |
# $Id: __init__.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
PEP HTML Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import codecs
import docutils
from docutils import frontend, nodes, utils, writers
from docutils.writers import html4css1
class Writer(html4css1.Writer):
default_stylesheet = 'pep.css'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = html4css1.Writer.settings_spec + (
'PEP/HTML-Specific Options',
'For the PEP/HTML writer, the default value for the --stylesheet-path '
'option is "%s", and the default value for --template is "%s". '
'See HTML-Specific Options above.'
% (default_stylesheet_path, default_template_path),
(('Python\'s home URL. Default is "http://www.python.org".',
['--python-home'],
{'default': 'http://www.python.org', 'metavar': '<URL>'}),
('Home URL prefix for PEPs. Default is "." (current directory).',
['--pep-home'],
{'default': '.', 'metavar': '<URL>'}),
# For testing.
(frontend.SUPPRESS_HELP,
['--no-random'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_default_overrides = {'stylesheet_path': default_stylesheet_path,
'template': default_template_path,}
relative_path_settings = (html4css1.Writer.relative_path_settings
+ ('template',))
config_section = 'pep_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = HTMLTranslator
def interpolation_dict(self):
subs = html4css1.Writer.interpolation_dict(self)
settings = self.document.settings
pyhome = settings.python_home
subs['pyhome'] = pyhome
subs['pephome'] = settings.pep_home
if pyhome == '..':
subs['pepindex'] = '.'
else:
subs['pepindex'] = pyhome + '/dev/peps'
index = self.document.first_child_matching_class(nodes.field_list)
header = self.document[index]
self.pepnum = header[0][1].astext()
subs['pep'] = self.pepnum
if settings.no_random:
subs['banner'] = 0
else:
import random
subs['banner'] = random.randrange(64)
try:
subs['pepnum'] = '%04i' % int(self.pepnum)
except ValueError:
subs['pepnum'] = pepnum
self.title = header[1][1].astext()
subs['title'] = self.title
subs['body'] = ''.join(
self.body_pre_docinfo + self.docinfo + self.body)
return subs
def assemble_parts(self):
html4css1.Writer.assemble_parts(self)
self.parts['title'] = [self.title]
self.parts['pepnum'] = self.pepnum
class HTMLTranslator(html4css1.HTMLTranslator):
def depart_field_list(self, node):
html4css1.HTMLTranslator.depart_field_list(self, node)
if 'rfc2822' in node['classes']:
self.body.append('<hr />\n')
| Python |
# $Id: pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes indented pseudo-XML.
"""
__docformat__ = 'reStructuredText'
from docutils import writers
class Writer(writers.Writer):
supported = ('pprint', 'pformat', 'pseudoxml')
"""Formats this writer supports."""
config_section = 'pseudoxml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def translate(self):
self.output = self.document.pformat()
def supports(self, format):
"""This writer supports all format-specific elements."""
return 1
| Python |
# $Id: __init__.py 6111 2009-09-02 21:36:05Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Writer modules.
"""
__docformat__ = 'reStructuredText'
import os.path
import docutils
from docutils import languages, Component
from docutils.transforms import universal
class Writer(Component):
"""
Abstract base class for docutils Writers.
Each writer module or package must export a subclass also called 'Writer'.
Each writer must support all standard node types listed in
`docutils.nodes.node_class_names`.
The `write()` method is the main entry point.
"""
component_type = 'writer'
config_section = 'writers'
def get_transforms(self):
return Component.get_transforms(self) + [
universal.Messages,
universal.FilterMessages,
universal.StripClassesAndElements,]
document = None
"""The document to write (Docutils doctree); set by `write`."""
output = None
"""Final translated form of `document` (Unicode string for text, binary
string for other forms); set by `translate`."""
language = None
"""Language module for the document; set by `write`."""
destination = None
"""`docutils.io` Output object; where to write the document.
Set by `write`."""
def __init__(self):
# Used by HTML and LaTex writer for output fragments:
self.parts = {}
"""Mapping of document part names to fragments of `self.output`.
Values are Unicode strings; encoding is up to the client. The 'whole'
key should contain the entire document output.
"""
def write(self, document, destination):
"""
Process a document into its final form.
Translate `document` (a Docutils document tree) into the Writer's
native format, and write it out to its `destination` (a
`docutils.io.Output` subclass object).
Normally not overridden or extended in subclasses.
"""
self.document = document
self.language = languages.get_language(
document.settings.language_code)
self.destination = destination
self.translate()
output = self.destination.write(self.output)
return output
def translate(self):
"""
Do final translation of `self.document` into `self.output`. Called
from `write`. Override in subclasses.
Usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or
`docutils.nodes.Node.walkabout()`. The ``NodeVisitor`` subclass must
support all standard elements (listed in
`docutils.nodes.node_class_names`) and possibly non-standard elements
used by the current Reader as well.
"""
raise NotImplementedError('subclass must override this method')
def assemble_parts(self):
"""Assemble the `self.parts` dictionary. Extend in subclasses."""
self.parts['whole'] = self.output
self.parts['encoding'] = self.document.settings.output_encoding
self.parts['version'] = docutils.__version__
class UnfilteredWriter(Writer):
"""
A writer that passes the document tree on unchanged (e.g. a
serializer.)
Documents written by UnfilteredWriters are typically reused at a
later date using a subclass of `readers.ReReader`.
"""
def get_transforms(self):
# Do not add any transforms. When the document is reused
# later, the then-used writer will add the appropriate
# transforms.
return Component.get_transforms(self)
_writer_aliases = {
'html': 'html4css1',
'latex': 'latex2e',
'pprint': 'pseudoxml',
'pformat': 'pseudoxml',
'pdf': 'rlpdf',
'xml': 'docutils_xml',
's5': 's5_html'}
def get_writer_class(writer_name):
"""Return the Writer class from the `writer_name` module."""
writer_name = writer_name.lower()
if writer_name in _writer_aliases:
writer_name = _writer_aliases[writer_name]
module = __import__(writer_name, globals(), locals())
return module.Writer
| Python |
# $Id: docutils_xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes Docutils XML.
"""
__docformat__ = 'reStructuredText'
import docutils
from docutils import frontend, writers
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
'Warning: the --newlines and --indents options may adversely affect '
'whitespace; use them only for reading convenience.',
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
def translate(self):
settings = self.document.settings
indent = newline = ''
if settings.newlines:
newline = '\n'
if settings.indents:
newline = '\n'
indent = ' '
output_prefix = []
if settings.xml_declaration:
output_prefix.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
output_prefix.append(self.doctype)
output_prefix.append(self.generator % docutils.__version__)
docnode = self.document.asdom().childNodes[0]
self.output = (''.join(output_prefix)
+ docnode.toprettyxml(indent, newline))
| Python |
# $Id: urischemes.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
`schemes` is a dictionary with lowercase URI addressing schemes as
keys and descriptions as values. It was compiled from the index at
http://www.iana.org/assignments/uri-schemes (revised 2005-11-28)
and an older list at http://www.w3.org/Addressing/schemes.html.
"""
# Many values are blank and should be filled in with useful descriptions.
schemes = {
'about': 'provides information on Navigator',
'acap': 'Application Configuration Access Protocol; RFC 2244',
'addbook': "To add vCard entries to Communicator's Address Book",
'afp': 'Apple Filing Protocol',
'afs': 'Andrew File System global file names',
'aim': 'AOL Instant Messenger',
'callto': 'for NetMeeting links',
'castanet': 'Castanet Tuner URLs for Netcaster',
'chttp': 'cached HTTP supported by RealPlayer',
'cid': 'content identifier; RFC 2392',
'crid': 'TV-Anytime Content Reference Identifier; RFC 4078',
'data': ('allows inclusion of small data items as "immediate" data; '
'RFC 2397'),
'dav': 'Distributed Authoring and Versioning Protocol; RFC 2518',
'dict': 'dictionary service protocol; RFC 2229',
'dns': 'Domain Name System resources',
'eid': ('External ID; non-URL data; general escape mechanism to allow '
'access to information for applications that are too '
'specialized to justify their own schemes'),
'fax': ('a connection to a terminal that can handle telefaxes '
'(facsimiles); RFC 2806'),
'feed' : 'NetNewsWire feed',
'file': 'Host-specific file names; RFC 1738',
'finger': '',
'freenet': '',
'ftp': 'File Transfer Protocol; RFC 1738',
'go': 'go; RFC 3368',
'gopher': 'The Gopher Protocol',
'gsm-sms': ('Global System for Mobile Communications Short Message '
'Service'),
'h323': ('video (audiovisual) communication on local area networks; '
'RFC 3508'),
'h324': ('video and audio communications over low bitrate connections '
'such as POTS modem connections'),
'hdl': 'CNRI handle system',
'hnews': 'an HTTP-tunneling variant of the NNTP news protocol',
'http': 'Hypertext Transfer Protocol; RFC 2616',
'https': 'HTTP over SSL; RFC 2818',
'hydra': 'SubEthaEdit URI. See http://www.codingmonkeys.de/subethaedit.',
'iioploc': 'Internet Inter-ORB Protocol Location?',
'ilu': 'Inter-Language Unification',
'im': 'Instant Messaging; RFC 3860',
'imap': 'Internet Message Access Protocol; RFC 2192',
'info': 'Information Assets with Identifiers in Public Namespaces',
'ior': 'CORBA interoperable object reference',
'ipp': 'Internet Printing Protocol; RFC 3510',
'irc': 'Internet Relay Chat',
'iris.beep': 'iris.beep; RFC 3983',
'iseek' : 'See www.ambrosiasw.com; a little util for OS X.',
'jar': 'Java archive',
'javascript': ('JavaScript code; evaluates the expression after the '
'colon'),
'jdbc': 'JDBC connection URI.',
'ldap': 'Lightweight Directory Access Protocol',
'lifn': '',
'livescript': '',
'lrq': '',
'mailbox': 'Mail folder access',
'mailserver': 'Access to data available from mail servers',
'mailto': 'Electronic mail address; RFC 2368',
'md5': '',
'mid': 'message identifier; RFC 2392',
'mocha': '',
'modem': ('a connection to a terminal that can handle incoming data '
'calls; RFC 2806'),
'mtqp': 'Message Tracking Query Protocol; RFC 3887',
'mupdate': 'Mailbox Update (MUPDATE) Protocol; RFC 3656',
'news': 'USENET news; RFC 1738',
'nfs': 'Network File System protocol; RFC 2224',
'nntp': 'USENET news using NNTP access; RFC 1738',
'opaquelocktoken': 'RFC 2518',
'phone': '',
'pop': 'Post Office Protocol; RFC 2384',
'pop3': 'Post Office Protocol v3',
'pres': 'Presence; RFC 3859',
'printer': '',
'prospero': 'Prospero Directory Service; RFC 4157',
'rdar' : ('URLs found in Darwin source '
'(http://www.opensource.apple.com/darwinsource/).'),
'res': '',
'rtsp': 'real time streaming protocol; RFC 2326',
'rvp': '',
'rwhois': '',
'rx': 'Remote Execution',
'sdp': '',
'service': 'service location; RFC 2609',
'shttp': 'secure hypertext transfer protocol',
'sip': 'Session Initiation Protocol; RFC 3261',
'sips': 'secure session intitiaion protocol; RFC 3261',
'smb': 'SAMBA filesystems.',
'snews': 'For NNTP postings via SSL',
'snmp': 'Simple Network Management Protocol; RFC 4088',
'soap.beep': 'RFC 3288',
'soap.beeps': 'RFC 3288',
'ssh': 'Reference to interactive sessions via ssh.',
't120': 'real time data conferencing (audiographics)',
'tag': 'RFC 4151',
'tcp': '',
'tel': ('a connection to a terminal that handles normal voice '
'telephone calls, a voice mailbox or another voice messaging '
'system or a service that can be operated using DTMF tones; '
'RFC 2806.'),
'telephone': 'telephone',
'telnet': 'Reference to interactive sessions; RFC 4248',
'tftp': 'Trivial File Transfer Protocol; RFC 3617',
'tip': 'Transaction Internet Protocol; RFC 2371',
'tn3270': 'Interactive 3270 emulation sessions',
'tv': '',
'urn': 'Uniform Resource Name; RFC 2141',
'uuid': '',
'vemmi': 'versatile multimedia interface; RFC 2122',
'videotex': '',
'view-source': 'displays HTML code that was generated with JavaScript',
'wais': 'Wide Area Information Servers; RFC 4156',
'whodp': '',
'whois++': 'Distributed directory service.',
'x-man-page': ('Opens man page in Terminal.app on OS X '
'(see macosxhints.com)'),
'xmlrpc.beep': 'RFC 3529',
'xmlrpc.beeps': 'RFC 3529',
'z39.50r': 'Z39.50 Retrieval; RFC 2056',
'z39.50s': 'Z39.50 Session; RFC 2056',}
| Python |
# $Id: examples.py 4800 2006-11-12 18:02:01Z goodger $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=1, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
def internals(input_string, source_path=None, destination_path=None,
input_encoding='unicode', settings_overrides=None):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides['input_encoding'] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput, source=input_string,
source_path=source_path,
destination_class=io.NullOutput, destination=None,
destination_path=destination_path,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='null',
settings=None, settings_spec=None, settings_overrides=overrides,
config_section=None, enable_exit_status=None)
return pub.writer.document, pub
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# string_template_compat.py: string.Template for Python <= 2.4
# =====================================================
# This is just an excerpt of the standard string module to provide backwards
# compatibility.
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# string_template_compat.py: string.Template for Python <= 2.4
# =====================================================
# This is just an excerpt of the standard string module to provide backwards
# compatibility.
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
| Python |
# $Id: parts.py 6073 2009-08-06 12:21:10Z milde $
# Authors: David Goodger <goodger@python.org>; Ueli Schlaepfer; Dmitry Jemerov
# Copyright: This module has been placed in the public domain.
"""
Transforms related to document parts.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class SectNum(Transform):
"""
Automatically assigns numbers to the titles of document sections.
It is possible to limit the maximum section level for which the numbers
are added. For those sections that are auto-numbered, the "autonum"
attribute is set, informing the contents table generator that a different
form of the TOC should be used.
"""
default_priority = 710
"""Should be applied before `Contents`."""
def apply(self):
self.maxdepth = self.startnode.details.get('depth', None)
self.startvalue = self.startnode.details.get('start', 1)
self.prefix = self.startnode.details.get('prefix', '')
self.suffix = self.startnode.details.get('suffix', '')
self.startnode.parent.remove(self.startnode)
if self.document.settings.sectnum_xform:
if self.maxdepth is None:
self.maxdepth = sys.maxint
self.update_section_numbers(self.document)
else: # store details for eventual section numbering by the writer
self.document.settings.sectnum_depth = self.maxdepth
self.document.settings.sectnum_start = self.startvalue
self.document.settings.sectnum_prefix = self.prefix
self.document.settings.sectnum_suffix = self.suffix
def update_section_numbers(self, node, prefix=(), depth=0):
depth += 1
if prefix:
sectnum = 1
else:
sectnum = self.startvalue
for child in node:
if isinstance(child, nodes.section):
numbers = prefix + (str(sectnum),)
title = child[0]
# Use for spacing:
generated = nodes.generated(
'', (self.prefix + '.'.join(numbers) + self.suffix
+ u'\u00a0' * 3),
classes=['sectnum'])
title.insert(0, generated)
title['auto'] = 1
if depth < self.maxdepth:
self.update_section_numbers(child, numbers, depth)
sectnum += 1
class Contents(Transform):
"""
This transform generates a table of contents from the entire document tree
or from a single branch. It locates "section" elements and builds them
into a nested bullet list, which is placed within a "topic" created by the
contents directive. A title is either explicitly specified, taken from
the appropriate language module, or omitted (local table of contents).
The depth may be specified. Two-way references between the table of
contents and section titles are generated (requires Writer support).
This transform requires a startnode, which contains generation
options and provides the location for the generated table of contents (the
startnode is replaced by the table of contents "topic").
"""
default_priority = 720
def apply(self):
try: # let the writer (or output software) build the contents list?
toc_by_writer = self.document.settings.use_latex_toc
except AttributeError:
toc_by_writer = False
details = self.startnode.details
if 'local' in details:
startnode = self.startnode.parent.parent
while not (isinstance(startnode, nodes.section)
or isinstance(startnode, nodes.document)):
# find the ToC root: a direct ancestor of startnode
startnode = startnode.parent
else:
startnode = self.document
self.toc_id = self.startnode.parent['ids'][0]
if 'backlinks' in details:
self.backlinks = details['backlinks']
else:
self.backlinks = self.document.settings.toc_backlinks
if toc_by_writer:
# move customization settings to the parent node
self.startnode.parent.attributes.update(details)
self.startnode.parent.remove(self.startnode)
else:
contents = self.build_contents(startnode)
if len(contents):
self.startnode.replace_self(contents)
else:
self.startnode.parent.parent.remove(self.startnode.parent)
def build_contents(self, node, level=0):
level += 1
sections = [sect for sect in node if isinstance(sect, nodes.section)]
entries = []
autonum = 0
depth = self.startnode.details.get('depth', sys.maxint)
for section in sections:
title = section[0]
auto = title.get('auto') # May be set by SectNum.
entrytext = self.copy_and_filter(title)
reference = nodes.reference('', '', refid=section['ids'][0],
*entrytext)
ref_id = self.document.set_id(reference)
entry = nodes.paragraph('', '', reference)
item = nodes.list_item('', entry)
if ( self.backlinks in ('entry', 'top')
and title.next_node(nodes.reference) is None):
if self.backlinks == 'entry':
title['refid'] = ref_id
elif self.backlinks == 'top':
title['refid'] = self.toc_id
if level < depth:
subsects = self.build_contents(section, level)
item += subsects
entries.append(item)
if entries:
contents = nodes.bullet_list('', *entries)
if auto:
contents['classes'].append('auto-toc')
return contents
else:
return []
def copy_and_filter(self, node):
"""Return a copy of a title, with references, images, etc. removed."""
visitor = ContentsFilter(self.document)
node.walkabout(visitor)
return visitor.get_entry_text()
class ContentsFilter(nodes.TreeCopyVisitor):
def get_entry_text(self):
return self.get_tree_copy().children
def visit_citation_reference(self, node):
raise nodes.SkipNode
def visit_footnote_reference(self, node):
raise nodes.SkipNode
def visit_image(self, node):
if node.hasattr('alt'):
self.parent.append(nodes.Text(node['alt']))
raise nodes.SkipNode
def ignore_node_but_process_children(self, node):
raise nodes.SkipDeparture
visit_interpreted = ignore_node_but_process_children
visit_problematic = ignore_node_but_process_children
visit_reference = ignore_node_but_process_children
visit_target = ignore_node_but_process_children
| Python |
# $Id: universal.py 6112 2009-09-03 07:27:59Z milde $
# Authors: David Goodger <goodger@python.org>; Ueli Schlaepfer
# Copyright: This module has been placed in the public domain.
"""
Transforms needed by most or all documents:
- `Decorations`: Generate a document's header & footer.
- `Messages`: Placement of system messages stored in
`nodes.document.transform_messages`.
- `TestMessages`: Like `Messages`, used on test runs.
- `FinalReferences`: Resolve remaining references.
"""
__docformat__ = 'reStructuredText'
import re
import sys
import time
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class Decorations(Transform):
"""
Populate a document's decoration element (header, footer).
"""
default_priority = 820
def apply(self):
header_nodes = self.generate_header()
if header_nodes:
decoration = self.document.get_decoration()
header = decoration.get_header()
header.extend(header_nodes)
footer_nodes = self.generate_footer()
if footer_nodes:
decoration = self.document.get_decoration()
footer = decoration.get_footer()
footer.extend(footer_nodes)
def generate_header(self):
return None
def generate_footer(self):
# @@@ Text is hard-coded for now.
# Should be made dynamic (language-dependent).
settings = self.document.settings
if settings.generator or settings.datestamp or settings.source_link \
or settings.source_url:
text = []
if settings.source_link and settings._source \
or settings.source_url:
if settings.source_url:
source = settings.source_url
else:
source = utils.relative_path(settings._destination,
settings._source)
text.extend([
nodes.reference('', 'View document source',
refuri=source),
nodes.Text('.\n')])
if settings.datestamp:
datestamp = time.strftime(settings.datestamp, time.gmtime())
text.append(nodes.Text('Generated on: ' + datestamp + '.\n'))
if settings.generator:
text.extend([
nodes.Text('Generated by '),
nodes.reference('', 'Docutils', refuri=
'http://docutils.sourceforge.net/'),
nodes.Text(' from '),
nodes.reference('', 'reStructuredText', refuri='http://'
'docutils.sourceforge.net/rst.html'),
nodes.Text(' source.\n')])
return [nodes.paragraph('', '', *text)]
else:
return None
class ExposeInternals(Transform):
"""
Expose internal attributes if ``expose_internals`` setting is set.
"""
default_priority = 840
def not_Text(self, node):
return not isinstance(node, nodes.Text)
def apply(self):
if self.document.settings.expose_internals:
for node in self.document.traverse(self.not_Text):
for att in self.document.settings.expose_internals:
value = getattr(node, att, None)
if value is not None:
node['internal:' + att] = value
class Messages(Transform):
"""
Place any system messages generated after parsing into a dedicated section
of the document.
"""
default_priority = 860
def apply(self):
unfiltered = self.document.transform_messages
threshold = self.document.reporter.report_level
messages = []
for msg in unfiltered:
if msg['level'] >= threshold and not msg.parent:
messages.append(msg)
if messages:
section = nodes.section(classes=['system-messages'])
# @@@ get this from the language module?
section += nodes.title('', 'Docutils System Messages')
section += messages
self.document.transform_messages[:] = []
self.document += section
class FilterMessages(Transform):
"""
Remove system messages below verbosity threshold.
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
if node['level'] < self.document.reporter.report_level:
node.parent.remove(node)
class TestMessages(Transform):
"""
Append all post-parse system messages to the end of the document.
Used for testing purposes.
"""
default_priority = 880
def apply(self):
for msg in self.document.transform_messages:
if not msg.parent:
self.document += msg
class StripComments(Transform):
"""
Remove comment elements from the document tree (only if the
``strip_comments`` setting is enabled).
"""
default_priority = 740
def apply(self):
if self.document.settings.strip_comments:
for node in self.document.traverse(nodes.comment):
node.parent.remove(node)
class StripClassesAndElements(Transform):
"""
Remove from the document tree all elements with classes in
`self.document.settings.strip_elements_with_classes` and all "classes"
attribute values in `self.document.settings.strip_classes`.
"""
default_priority = 420
def apply(self):
if not (self.document.settings.strip_elements_with_classes
or self.document.settings.strip_classes):
return
# prepare dicts for lookup (not sets, for Python 2.2 compatibility):
self.strip_elements = dict(
[(key, None)
for key in (self.document.settings.strip_elements_with_classes
or [])])
self.strip_classes = dict(
[(key, None) for key in (self.document.settings.strip_classes
or [])])
for node in self.document.traverse(self.check_classes):
node.parent.remove(node)
def check_classes(self, node):
if isinstance(node, nodes.Element):
for class_value in node['classes'][:]:
if class_value in self.strip_classes:
node['classes'].remove(class_value)
if class_value in self.strip_elements:
return 1
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.