code
stringlengths
1
1.72M
language
stringclasses
1 value
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Utility functions for the File Manager Connector for Python """ import string, re import os import config as Config # Generic manipulation functions def removeExtension(fileName): index = fileName.rindex(".") newFileName = fileName[0:index] return newFileName def getExtension(fileName): index = fileName.rindex(".") + 1 fileExtension = fileName[index:] return fileExtension def removeFromStart(string, char): return string.lstrip(char) def removeFromEnd(string, char): return string.rstrip(char) # Path functions def combinePaths( basePath, folder ): return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' ) def getFileName(filename): " Purpose: helper function to extrapolate the filename " for splitChar in ["/", "\\"]: array = filename.split(splitChar) if (len(array) > 1): filename = array[-1] return filename def sanitizeFolderName( newFolderName ): "Do a cleanup of the folder name to avoid possible problems" # Remove . \ / | : ? * " < > and control characters return re.sub( '\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]', '_', newFolderName ) def sanitizeFileName( newFileName ): "Do a cleanup of the file name to avoid possible problems" # Replace dots in the name with underscores (only one dot can be there... security issue). if ( Config.ForceSingleExtension ): # remove dots newFileName = re.sub ( '\\.(?![^.]*$)', '_', newFileName ) ; newFileName = newFileName.replace('\\','/') # convert windows to unix path newFileName = os.path.basename (newFileName) # strip directories # Remove \ / | : ? * return re.sub ( '\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]/', '_', newFileName ) def getCurrentFolder(currentFolder): if not currentFolder: currentFolder = '/' # Check the current folder syntax (must begin and end with a slash). if (currentFolder[-1] <> "/"): currentFolder += "/" if (currentFolder[0] <> "/"): currentFolder = "/" + currentFolder # Ensure the folder path has no double-slashes while '//' in currentFolder: currentFolder = currentFolder.replace('//','/') # Check for invalid folder paths (..) if '..' in currentFolder or '\\' in currentFolder: return None # Check for invalid folder paths (..) if re.search( '(/\\.)|(//)|([\\\\:\\*\\?\\""\\<\\>\\|]|[\x00-\x1F]|[\x7f-\x9f])', currentFolder ): return None return currentFolder def mapServerPath( environ, url): " Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to " # This isn't correct but for the moment there's no other solution # If this script is under a virtual directory or symlink it will detect the problem and stop return combinePaths( getRootPath(environ), url ) def mapServerFolder(resourceTypePath, folderPath): return combinePaths ( resourceTypePath , folderPath ) def getRootPath(environ): "Purpose: returns the root path on the server" # WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python # Use Config.UserFilesAbsolutePath instead if environ.has_key('DOCUMENT_ROOT'): return environ['DOCUMENT_ROOT'] else: realPath = os.path.realpath( './' ) selfPath = environ['SCRIPT_FILENAME'] selfPath = selfPath [ : selfPath.rfind( '/' ) ] selfPath = selfPath.replace( '/', os.path.sep) position = realPath.find(selfPath) # This can check only that this script isn't run from a virtual dir # But it avoids the problems that arise if it isn't checked raise realPath if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''): raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".') return realPath[ : position ]
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). See config.py for configuration settings """ import os from fckutil import * from fckcommands import * # default command's implementation from fckoutput import * # base http, xml and html output mixins from fckconnector import FCKeditorConnectorBase # import base connector import config as Config class FCKeditorConnector( FCKeditorConnectorBase, GetFoldersCommandMixin, GetFoldersAndFilesCommandMixin, CreateFolderCommandMixin, UploadFileCommandMixin, BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ): "The Standard connector class." def doResponse(self): "Main function. Process the request, set headers and return a string as response." s = "" # Check if this connector is disabled if not(Config.Enabled): return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.") # Make sure we have valid inputs for key in ("Command","Type","CurrentFolder"): if not self.request.has_key (key): return # Get command, resource type and current folder command = self.request.get("Command") resourceType = self.request.get("Type") currentFolder = getCurrentFolder(self.request.get("CurrentFolder")) # Check for invalid paths if currentFolder is None: if (command == "FileUpload"): return self.sendUploadResults( errorNo = 102, customMsg = "" ) else: return self.sendError(102, "") # Check if it is an allowed command if ( not command in Config.ConfigAllowedCommands ): return self.sendError( 1, 'The %s command isn\'t allowed' % command ) if ( not resourceType in Config.ConfigAllowedTypes ): return self.sendError( 1, 'Invalid type specified' ) # Setup paths if command == "QuickUpload": self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType] self.webUserFilesFolder = Config.QuickUploadPath[resourceType] else: self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType] self.webUserFilesFolder = Config.FileTypesPath[resourceType] if not self.userFilesFolder: # no absolute path given (dangerous...) self.userFilesFolder = mapServerPath(self.environ, self.webUserFilesFolder) # Ensure that the directory exists. if not os.path.exists(self.userFilesFolder): try: self.createServerFolder( self.userFilesFolder ) except: return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ") # File upload doesn't have to return XML, so intercept here if (command == "FileUpload"): return self.uploadFile(resourceType, currentFolder) # Create Url url = combinePaths( self.webUserFilesFolder, currentFolder ) # Begin XML s += self.createXmlHeader(command, resourceType, currentFolder, url) # Execute the command selector = {"GetFolders": self.getFolders, "GetFoldersAndFiles": self.getFoldersAndFiles, "CreateFolder": self.createFolder, } s += selector[command](resourceType, currentFolder) s += self.createXmlFooter() return s # Running from command line (plain old CGI) if __name__ == '__main__': try: # Create a Connector Instance conn = FCKeditorConnector() data = conn.doResponse() for header in conn.headers: print '%s: %s' % header print print data except: print "Content-Type: text/plain" print import cgi cgi.print_exception()
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python and Zope. This code was not tested at all. It just was ported from pre 2.5 release, so for further reference see \editor\filemanager\browser\default\connectors\py\connector.py in previous releases. """ from fckutil import * from connector import * import config as Config class FCKeditorConnectorZope(FCKeditorConnector): """ Zope versiof FCKeditorConnector """ # Allow access (Zope) __allow_access_to_unprotected_subobjects__ = 1 def __init__(self, context=None): """ Constructor """ FCKeditorConnector.__init__(self, environ=None) # call superclass constructor # Instance Attributes self.context = context self.request = FCKeditorRequest(context) def getZopeRootContext(self): if self.zopeRootContext is None: self.zopeRootContext = self.context.getPhysicalRoot() return self.zopeRootContext def getZopeUploadContext(self): if self.zopeUploadContext is None: folderNames = self.userFilesFolder.split("/") c = self.getZopeRootContext() for folderName in folderNames: if (folderName <> ""): c = c[folderName] self.zopeUploadContext = c return self.zopeUploadContext def setHeader(self, key, value): self.context.REQUEST.RESPONSE.setHeader(key, value) def getFolders(self, resourceType, currentFolder): # Open the folders node s = "" s += """<Folders>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["Folder"]): s += """<Folder name="%s" />""" % ( convertToXmlAttribute(name) ) # Close the folders node s += """</Folders>""" return s def getZopeFoldersAndFiles(self, resourceType, currentFolder): folders = self.getZopeFolders(resourceType, currentFolder) files = self.getZopeFiles(resourceType, currentFolder) s = folders + files return s def getZopeFiles(self, resourceType, currentFolder): # Open the files node s = "" s += """<Files>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["File","Image"]): s += """<File name="%s" size="%s" />""" % ( convertToXmlAttribute(name), ((o.get_size() / 1024) + 1) ) # Close the files node s += """</Files>""" return s def findZopeFolder(self, resourceType, folderName): # returns the context of the resource / folder zopeFolder = self.getZopeUploadContext() folderName = self.removeFromStart(folderName, "/") folderName = self.removeFromEnd(folderName, "/") if (resourceType <> ""): try: zopeFolder = zopeFolder[resourceType] except: zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType) zopeFolder = zopeFolder[resourceType] if (folderName <> ""): folderNames = folderName.split("/") for folderName in folderNames: zopeFolder = zopeFolder[folderName] return zopeFolder def createFolder(self, resourceType, currentFolder): # Find out where we are zopeFolder = self.findZopeFolder(resourceType, currentFolder) errorNo = 0 errorMsg = "" if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder) else: errorNo = 102 return self.sendErrorNode ( errorNo, errorMsg ) def uploadFile(self, resourceType, currentFolder, count=None): zopeFolder = self.findZopeFolder(resourceType, currentFolder) file = self.request.get("NewFile", None) fileName = self.getFileName(file.filename) fileNameOnly = self.removeExtension(fileName) fileExtension = self.getExtension(fileName).lower() if (count): nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension) else: nid = fileName title = nid try: zopeFolder.manage_addProduct['OFSP'].manage_addFile( id=nid, title=title, file=file.read() ) except: if (count): count += 1 else: count = 1 return self.zopeFileUpload(resourceType, currentFolder, count) return self.sendUploadResults( 0 ) class FCKeditorRequest(object): "A wrapper around the request object" def __init__(self, context=None): r = context.REQUEST self.request = r def has_key(self, key): return self.request.has_key(key) def get(self, key, default=None): return self.request.get(key, default) """ Running from zope, you will need to modify this connector. If you have uploaded the FCKeditor into Zope (like me), you need to move this connector out of Zope, and replace the "connector" with an alias as below. The key to it is to pass the Zope context in, as we then have a like to the Zope context. ## Script (Python) "connector.py" ##bind container=container ##bind context=context ##bind namespace= ##bind script=script ##bind subpath=traverse_subpath ##parameters=*args, **kws ##title=ALIAS ## import Products.zope as connector return connector.FCKeditorConnectorZope(context=context).doResponse() """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). """ import os try: # Windows needs stdio set for binary mode for file upload to work. import msvcrt msvcrt.setmode (0, os.O_BINARY) # stdin = 0 msvcrt.setmode (1, os.O_BINARY) # stdout = 1 except ImportError: pass from fckutil import * from fckoutput import * import config as Config class GetFoldersCommandMixin (object): def getFolders(self, resourceType, currentFolder): """ Purpose: command to recieve a list of folders """ # Map the virtual path to our local server serverPath = mapServerFolder(self.userFilesFolder,currentFolder) s = """<Folders>""" # Open the folders node for someObject in os.listdir(serverPath): someObjectPath = mapServerFolder(serverPath, someObject) if os.path.isdir(someObjectPath): s += """<Folder name="%s" />""" % ( convertToXmlAttribute(someObject) ) s += """</Folders>""" # Close the folders node return s class GetFoldersAndFilesCommandMixin (object): def getFoldersAndFiles(self, resourceType, currentFolder): """ Purpose: command to recieve a list of folders and files """ # Map the virtual path to our local server serverPath = mapServerFolder(self.userFilesFolder,currentFolder) # Open the folders / files node folders = """<Folders>""" files = """<Files>""" for someObject in os.listdir(serverPath): someObjectPath = mapServerFolder(serverPath, someObject) if os.path.isdir(someObjectPath): folders += """<Folder name="%s" />""" % ( convertToXmlAttribute(someObject) ) elif os.path.isfile(someObjectPath): size = os.path.getsize(someObjectPath) if size > 0: size = round(size/1024) if size < 1: size = 1 files += """<File name="%s" size="%d" />""" % ( convertToXmlAttribute(someObject), size ) # Close the folders / files node folders += """</Folders>""" files += """</Files>""" return folders + files class CreateFolderCommandMixin (object): def createFolder(self, resourceType, currentFolder): """ Purpose: command to create a new folder """ errorNo = 0; errorMsg =''; if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) newFolder = sanitizeFolderName (newFolder) try: newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder)) self.createServerFolder(newFolderPath) except Exception, e: errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!! if hasattr(e,'errno'): if e.errno==17: #file already exists errorNo=0 elif e.errno==13: # permission denied errorNo = 103 elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name errorNo = 102 else: errorNo = 110 else: errorNo = 102 return self.sendErrorNode ( errorNo, errorMsg ) def createServerFolder(self, folderPath): "Purpose: physically creates a folder on the server" # No need to check if the parent exists, just create all hierachy try: permissions = Config.ChmodOnFolderCreate if not permissions: os.makedirs(folderPath) except AttributeError: #ChmodOnFolderCreate undefined permissions = 0755 if permissions: oldumask = os.umask(0) os.makedirs(folderPath,mode=0755) os.umask( oldumask ) class UploadFileCommandMixin (object): def uploadFile(self, resourceType, currentFolder): """ Purpose: command to upload files to server (same as FileUpload) """ errorNo = 0 if self.request.has_key("NewFile"): # newFile has all the contents we need newFile = self.request.get("NewFile", "") # Get the file name newFileName = newFile.filename newFileName = sanitizeFileName( newFileName ) newFileNameOnly = removeExtension(newFileName) newFileExtension = getExtension(newFileName).lower() allowedExtensions = Config.AllowedExtensions[resourceType] deniedExtensions = Config.DeniedExtensions[resourceType] if (allowedExtensions): # Check for allowed isAllowed = False if (newFileExtension in allowedExtensions): isAllowed = True elif (deniedExtensions): # Check for denied isAllowed = True if (newFileExtension in deniedExtensions): isAllowed = False else: # No extension limitations isAllowed = True if (isAllowed): # Upload to operating system # Map the virtual path to the local server path currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder) i = 0 while (True): newFilePath = os.path.join (currentFolderPath,newFileName) if os.path.exists(newFilePath): i += 1 newFileName = "%s(%d).%s" % ( newFileNameOnly, i, newFileExtension ) errorNo= 201 # file renamed else: # Read file contents and write to the desired path (similar to php's move_uploaded_file) fout = file(newFilePath, 'wb') while (True): chunk = newFile.file.read(100000) if not chunk: break fout.write (chunk) fout.close() if os.path.exists ( newFilePath ): doChmod = False try: doChmod = Config.ChmodOnUpload permissions = Config.ChmodOnUpload except AttributeError: #ChmodOnUpload undefined doChmod = True permissions = 0755 if ( doChmod ): oldumask = os.umask(0) os.chmod( newFilePath, permissions ) os.umask( oldumask ) newFileUrl = combinePaths(self.webUserFilesFolder, currentFolder) + newFileName return self.sendUploadResults( errorNo , newFileUrl, newFileName ) else: return self.sendUploadResults( errorNo = 202, customMsg = "" ) else: return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). """ from time import gmtime, strftime import string def escape(text, replace=string.replace): """ Converts the special characters '<', '>', and '&'. RFC 1866 specifies that these characters be represented in HTML as &lt; &gt; and &amp; respectively. In Python 1.5 we use the new string.replace() function for speed. """ text = replace(text, '&', '&amp;') # must be done 1st text = replace(text, '<', '&lt;') text = replace(text, '>', '&gt;') text = replace(text, '"', '&quot;') return text def convertToXmlAttribute(value): if (value is None): value = "" return escape(value) class BaseHttpMixin(object): def setHttpHeaders(self, content_type='text/xml'): "Purpose: to prepare the headers for the xml to return" # Prevent the browser from caching the result. # Date in the past self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT') # always modified self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())) # HTTP/1.1 self.setHeader('Cache-Control','no-store, no-cache, must-revalidate') self.setHeader('Cache-Control','post-check=0, pre-check=0') # HTTP/1.0 self.setHeader('Pragma','no-cache') # Set the response format. self.setHeader( 'Content-Type', content_type + '; charset=utf-8' ) return class BaseXmlMixin(object): def createXmlHeader(self, command, resourceType, currentFolder, url): "Purpose: returns the xml header" self.setHttpHeaders() # Create the XML document header s = """<?xml version="1.0" encoding="utf-8" ?>""" # Create the main connector node s += """<Connector command="%s" resourceType="%s">""" % ( command, resourceType ) # Add the current folder node s += """<CurrentFolder path="%s" url="%s" />""" % ( convertToXmlAttribute(currentFolder), convertToXmlAttribute(url), ) return s def createXmlFooter(self): "Purpose: returns the xml footer" return """</Connector>""" def sendError(self, number, text): "Purpose: in the event of an error, return an xml based error" self.setHttpHeaders() return ("""<?xml version="1.0" encoding="utf-8" ?>""" + """<Connector>""" + self.sendErrorNode (number, text) + """</Connector>""" ) def sendErrorNode(self, number, text): if number != 1: return """<Error number="%s" />""" % (number) else: return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text)) class BaseHtmlMixin(object): def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ): self.setHttpHeaders("text/html") "This is the function that sends the results of the uploading process" "Minified version of the document.domain automatic fix script (#1919)." "The original script can be found at _dev/domain_fix_template.js" return """<script type="text/javascript"> (function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})(); window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s"); </script>""" % { 'errorNumber': errorNo, 'fileUrl': fileUrl.replace ('"', '\\"'), 'fileName': fileName.replace ( '"', '\\"' ) , 'customMsg': customMsg.replace ( '"', '\\"' ), }
Python
#!/usr/bin/env python """ * FCKeditor - The text editor for Internet - http://www.fckeditor.net * Copyright (C) 2003-2010 Frederico Caldeira Knabben * * == BEGIN LICENSE == * * Licensed under the terms of any of the following licenses at your * choice: * * - GNU General Public License Version 2 or later (the "GPL") * http://www.gnu.org/licenses/gpl.html * * - GNU Lesser General Public License Version 2.1 or later (the "LGPL") * http://www.gnu.org/licenses/lgpl.html * * - Mozilla Public License Version 1.1 or later (the "MPL") * http://www.mozilla.org/MPL/MPL-1.1.html * * == END LICENSE == * * Configuration file for the File Manager Connector for Python """ # INSTALLATION NOTE: You must set up your server environment accordingly to run # python scripts. This connector requires Python 2.4 or greater. # # Supported operation modes: # * WSGI (recommended): You'll need apache + mod_python + modpython_gateway # or any web server capable of the WSGI python standard # * Plain Old CGI: Any server capable of running standard python scripts # (although mod_python is recommended for performance) # This was the previous connector version operation mode # # If you're using Apache web server, replace the htaccess.txt to to .htaccess, # and set the proper options and paths. # For WSGI and mod_python, you may need to download modpython_gateway from: # http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this # directory. # SECURITY: You must explicitly enable this "connector". (Set it to "True"). # WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only # authenticated users can access this file or use some kind of session checking. Enabled = False # Path to user files relative to the document root. UserFilesPath = '/userfiles/' # Fill the following value it you prefer to specify the absolute path for the # user files directory. Useful if you are using a virtual directory, symbolic # link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'UserFilesPath' must point to the same directory. # WARNING: GetRootPath may not work in virtual or mod_python configurations, and # may not be thread safe. Use this configuration parameter instead. UserFilesAbsolutePath = '' # Due to security issues with Apache modules, it is recommended to leave the # following setting enabled. ForceSingleExtension = True # What the user can do with this connector ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ] # Allowed Resource Types ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media'] # After file is uploaded, sometimes it is required to change its permissions # so that it was possible to access it at the later time. # If possible, it is recommended to set more restrictive permissions, like 0755. # Set to 0 to disable this feature. # Note: not needed on Windows-based servers. ChmodOnUpload = 0755 # See comments above. # Used when creating folders that does not exist. ChmodOnFolderCreate = 0755 # Do not touch this 3 lines, see "Configuration settings for each Resource Type" AllowedExtensions = {}; DeniedExtensions = {}; FileTypesPath = {}; FileTypesAbsolutePath = {}; QuickUploadPath = {}; QuickUploadAbsolutePath = {}; # Configuration settings for each Resource Type # # - AllowedExtensions: the possible extensions that can be allowed. # If it is empty then any file type can be uploaded. # - DeniedExtensions: The extensions that won't be allowed. # If it is empty then no restrictions are done here. # # For a file to be uploaded it has to fulfill both the AllowedExtensions # and DeniedExtensions (that's it: not being denied) conditions. # # - FileTypesPath: the virtual folder relative to the document root where # these resources will be located. # Attention: It must start and end with a slash: '/' # # - FileTypesAbsolutePath: the physical path to the above folder. It must be # an absolute path. # If it's an empty string then it will be autocalculated. # Useful if you are using a virtual directory, symbolic link or alias. # Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'FileTypesPath' must point to the same directory. # Attention: It must end with a slash: '/' # # # - QuickUploadPath: the virtual folder relative to the document root where # these resources will be uploaded using the Upload tab in the resources # dialogs. # Attention: It must start and end with a slash: '/' # # - QuickUploadAbsolutePath: the physical path to the above folder. It must be # an absolute path. # If it's an empty string then it will be autocalculated. # Useful if you are using a virtual directory, symbolic link or alias. # Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'QuickUploadPath' must point to the same directory. # Attention: It must end with a slash: '/' AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip'] DeniedExtensions['File'] = [] FileTypesPath['File'] = UserFilesPath + 'file/' FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or '' QuickUploadPath['File'] = FileTypesPath['File'] QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File'] AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png'] DeniedExtensions['Image'] = [] FileTypesPath['Image'] = UserFilesPath + 'image/' FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or '' QuickUploadPath['Image'] = FileTypesPath['Image'] QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image'] AllowedExtensions['Flash'] = ['swf','flv'] DeniedExtensions['Flash'] = [] FileTypesPath['Flash'] = UserFilesPath + 'flash/' FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or '' QuickUploadPath['Flash'] = FileTypesPath['Flash'] QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash'] AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv'] DeniedExtensions['Media'] = [] FileTypesPath['Media'] = UserFilesPath + 'media/' FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or '' QuickUploadPath['Media'] = FileTypesPath['Media'] QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python and Zope. This code was not tested at all. It just was ported from pre 2.5 release, so for further reference see \editor\filemanager\browser\default\connectors\py\connector.py in previous releases. """ from fckutil import * from connector import * import config as Config class FCKeditorConnectorZope(FCKeditorConnector): """ Zope versiof FCKeditorConnector """ # Allow access (Zope) __allow_access_to_unprotected_subobjects__ = 1 def __init__(self, context=None): """ Constructor """ FCKeditorConnector.__init__(self, environ=None) # call superclass constructor # Instance Attributes self.context = context self.request = FCKeditorRequest(context) def getZopeRootContext(self): if self.zopeRootContext is None: self.zopeRootContext = self.context.getPhysicalRoot() return self.zopeRootContext def getZopeUploadContext(self): if self.zopeUploadContext is None: folderNames = self.userFilesFolder.split("/") c = self.getZopeRootContext() for folderName in folderNames: if (folderName <> ""): c = c[folderName] self.zopeUploadContext = c return self.zopeUploadContext def setHeader(self, key, value): self.context.REQUEST.RESPONSE.setHeader(key, value) def getFolders(self, resourceType, currentFolder): # Open the folders node s = "" s += """<Folders>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["Folder"]): s += """<Folder name="%s" />""" % ( convertToXmlAttribute(name) ) # Close the folders node s += """</Folders>""" return s def getZopeFoldersAndFiles(self, resourceType, currentFolder): folders = self.getZopeFolders(resourceType, currentFolder) files = self.getZopeFiles(resourceType, currentFolder) s = folders + files return s def getZopeFiles(self, resourceType, currentFolder): # Open the files node s = "" s += """<Files>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["File","Image"]): s += """<File name="%s" size="%s" />""" % ( convertToXmlAttribute(name), ((o.get_size() / 1024) + 1) ) # Close the files node s += """</Files>""" return s def findZopeFolder(self, resourceType, folderName): # returns the context of the resource / folder zopeFolder = self.getZopeUploadContext() folderName = self.removeFromStart(folderName, "/") folderName = self.removeFromEnd(folderName, "/") if (resourceType <> ""): try: zopeFolder = zopeFolder[resourceType] except: zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType) zopeFolder = zopeFolder[resourceType] if (folderName <> ""): folderNames = folderName.split("/") for folderName in folderNames: zopeFolder = zopeFolder[folderName] return zopeFolder def createFolder(self, resourceType, currentFolder): # Find out where we are zopeFolder = self.findZopeFolder(resourceType, currentFolder) errorNo = 0 errorMsg = "" if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder) else: errorNo = 102 return self.sendErrorNode ( errorNo, errorMsg ) def uploadFile(self, resourceType, currentFolder, count=None): zopeFolder = self.findZopeFolder(resourceType, currentFolder) file = self.request.get("NewFile", None) fileName = self.getFileName(file.filename) fileNameOnly = self.removeExtension(fileName) fileExtension = self.getExtension(fileName).lower() if (count): nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension) else: nid = fileName title = nid try: zopeFolder.manage_addProduct['OFSP'].manage_addFile( id=nid, title=title, file=file.read() ) except: if (count): count += 1 else: count = 1 return self.zopeFileUpload(resourceType, currentFolder, count) return self.sendUploadResults( 0 ) class FCKeditorRequest(object): "A wrapper around the request object" def __init__(self, context=None): r = context.REQUEST self.request = r def has_key(self, key): return self.request.has_key(key) def get(self, key, default=None): return self.request.get(key, default) """ Running from zope, you will need to modify this connector. If you have uploaded the FCKeditor into Zope (like me), you need to move this connector out of Zope, and replace the "connector" with an alias as below. The key to it is to pass the Zope context in, as we then have a like to the Zope context. ## Script (Python) "connector.py" ##bind container=container ##bind context=context ##bind namespace= ##bind script=script ##bind subpath=traverse_subpath ##parameters=*args, **kws ##title=ALIAS ## import Products.zope as connector return connector.FCKeditorConnectorZope(context=context).doResponse() """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Base Connector for Python (CGI and WSGI). See config.py for configuration settings """ import cgi, os from fckutil import * from fckcommands import * # default command's implementation from fckoutput import * # base http, xml and html output mixins import config as Config class FCKeditorConnectorBase( object ): "The base connector class. Subclass it to extend functionality (see Zope example)" def __init__(self, environ=None): "Constructor: Here you should parse request fields, initialize variables, etc." self.request = FCKeditorRequest(environ) # Parse request self.headers = [] # Clean Headers if environ: self.environ = environ else: self.environ = os.environ # local functions def setHeader(self, key, value): self.headers.append ((key, value)) return class FCKeditorRequest(object): "A wrapper around the request object" def __init__(self, environ): if environ: # WSGI self.request = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=1) self.environ = environ else: # plain old cgi self.environ = os.environ self.request = cgi.FieldStorage() if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ: if self.environ['REQUEST_METHOD'].upper()=='POST': # we are in a POST, but GET query_string exists # cgi parses by default POST data, so parse GET QUERY_STRING too self.get_request = cgi.FieldStorage(fp=None, environ={ 'REQUEST_METHOD':'GET', 'QUERY_STRING':self.environ['QUERY_STRING'], }, ) else: self.get_request={} def has_key(self, key): return self.request.has_key(key) or self.get_request.has_key(key) def get(self, key, default=None): if key in self.request.keys(): field = self.request[key] elif key in self.get_request.keys(): field = self.get_request[key] else: return default if hasattr(field,"filename") and field.filename: #file upload, do not convert return value return field else: return field.value
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). """ import os try: # Windows needs stdio set for binary mode for file upload to work. import msvcrt msvcrt.setmode (0, os.O_BINARY) # stdin = 0 msvcrt.setmode (1, os.O_BINARY) # stdout = 1 except ImportError: pass from fckutil import * from fckoutput import * import config as Config class GetFoldersCommandMixin (object): def getFolders(self, resourceType, currentFolder): """ Purpose: command to recieve a list of folders """ # Map the virtual path to our local server serverPath = mapServerFolder(self.userFilesFolder,currentFolder) s = """<Folders>""" # Open the folders node for someObject in os.listdir(serverPath): someObjectPath = mapServerFolder(serverPath, someObject) if os.path.isdir(someObjectPath): s += """<Folder name="%s" />""" % ( convertToXmlAttribute(someObject) ) s += """</Folders>""" # Close the folders node return s class GetFoldersAndFilesCommandMixin (object): def getFoldersAndFiles(self, resourceType, currentFolder): """ Purpose: command to recieve a list of folders and files """ # Map the virtual path to our local server serverPath = mapServerFolder(self.userFilesFolder,currentFolder) # Open the folders / files node folders = """<Folders>""" files = """<Files>""" for someObject in os.listdir(serverPath): someObjectPath = mapServerFolder(serverPath, someObject) if os.path.isdir(someObjectPath): folders += """<Folder name="%s" />""" % ( convertToXmlAttribute(someObject) ) elif os.path.isfile(someObjectPath): size = os.path.getsize(someObjectPath) if size > 0: size = round(size/1024) if size < 1: size = 1 files += """<File name="%s" size="%d" />""" % ( convertToXmlAttribute(someObject), size ) # Close the folders / files node folders += """</Folders>""" files += """</Files>""" return folders + files class CreateFolderCommandMixin (object): def createFolder(self, resourceType, currentFolder): """ Purpose: command to create a new folder """ errorNo = 0; errorMsg =''; if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) newFolder = sanitizeFolderName (newFolder) try: newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder)) self.createServerFolder(newFolderPath) except Exception, e: errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!! if hasattr(e,'errno'): if e.errno==17: #file already exists errorNo=0 elif e.errno==13: # permission denied errorNo = 103 elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name errorNo = 102 else: errorNo = 110 else: errorNo = 102 return self.sendErrorNode ( errorNo, errorMsg ) def createServerFolder(self, folderPath): "Purpose: physically creates a folder on the server" # No need to check if the parent exists, just create all hierachy try: permissions = Config.ChmodOnFolderCreate if not permissions: os.makedirs(folderPath) except AttributeError: #ChmodOnFolderCreate undefined permissions = 0755 if permissions: oldumask = os.umask(0) os.makedirs(folderPath,mode=0755) os.umask( oldumask ) class UploadFileCommandMixin (object): def uploadFile(self, resourceType, currentFolder): """ Purpose: command to upload files to server (same as FileUpload) """ errorNo = 0 if self.request.has_key("NewFile"): # newFile has all the contents we need newFile = self.request.get("NewFile", "") # Get the file name newFileName = newFile.filename newFileName = sanitizeFileName( newFileName ) newFileNameOnly = removeExtension(newFileName) newFileExtension = getExtension(newFileName).lower() allowedExtensions = Config.AllowedExtensions[resourceType] deniedExtensions = Config.DeniedExtensions[resourceType] if (allowedExtensions): # Check for allowed isAllowed = False if (newFileExtension in allowedExtensions): isAllowed = True elif (deniedExtensions): # Check for denied isAllowed = True if (newFileExtension in deniedExtensions): isAllowed = False else: # No extension limitations isAllowed = True if (isAllowed): # Upload to operating system # Map the virtual path to the local server path currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder) i = 0 while (True): newFilePath = os.path.join (currentFolderPath,newFileName) if os.path.exists(newFilePath): i += 1 newFileName = "%s(%d).%s" % ( newFileNameOnly, i, newFileExtension ) errorNo= 201 # file renamed else: # Read file contents and write to the desired path (similar to php's move_uploaded_file) fout = file(newFilePath, 'wb') while (True): chunk = newFile.file.read(100000) if not chunk: break fout.write (chunk) fout.close() if os.path.exists ( newFilePath ): doChmod = False try: doChmod = Config.ChmodOnUpload permissions = Config.ChmodOnUpload except AttributeError: #ChmodOnUpload undefined doChmod = True permissions = 0755 if ( doChmod ): oldumask = os.umask(0) os.chmod( newFilePath, permissions ) os.umask( oldumask ) newFileUrl = combinePaths(self.webUserFilesFolder, currentFolder) + newFileName return self.sendUploadResults( errorNo , newFileUrl, newFileName ) else: return self.sendUploadResults( errorNo = 202, customMsg = "" ) else: return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector/QuickUpload for Python (WSGI wrapper). See config.py for configuration settings """ from connector import FCKeditorConnector from upload import FCKeditorQuickUpload import cgitb from cStringIO import StringIO # Running from WSGI capable server (recomended) def App(environ, start_response): "WSGI entry point. Run the connector" if environ['SCRIPT_NAME'].endswith("connector.py"): conn = FCKeditorConnector(environ) elif environ['SCRIPT_NAME'].endswith("upload.py"): conn = FCKeditorQuickUpload(environ) else: start_response ("200 Ok", [('Content-Type','text/html')]) yield "Unknown page requested: " yield environ['SCRIPT_NAME'] return try: # run the connector data = conn.doResponse() # Start WSGI response: start_response ("200 Ok", conn.headers) # Send response text yield data except: start_response("500 Internal Server Error",[("Content-type","text/html")]) file = StringIO() cgitb.Hook(file = file).handle() yield file.getvalue()
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). See config.py for configuration settings """ import os from fckutil import * from fckcommands import * # default command's implementation from fckoutput import * # base http, xml and html output mixins from fckconnector import FCKeditorConnectorBase # import base connector import config as Config class FCKeditorConnector( FCKeditorConnectorBase, GetFoldersCommandMixin, GetFoldersAndFilesCommandMixin, CreateFolderCommandMixin, UploadFileCommandMixin, BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ): "The Standard connector class." def doResponse(self): "Main function. Process the request, set headers and return a string as response." s = "" # Check if this connector is disabled if not(Config.Enabled): return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.") # Make sure we have valid inputs for key in ("Command","Type","CurrentFolder"): if not self.request.has_key (key): return # Get command, resource type and current folder command = self.request.get("Command") resourceType = self.request.get("Type") currentFolder = getCurrentFolder(self.request.get("CurrentFolder")) # Check for invalid paths if currentFolder is None: if (command == "FileUpload"): return self.sendUploadResults( errorNo = 102, customMsg = "" ) else: return self.sendError(102, "") # Check if it is an allowed command if ( not command in Config.ConfigAllowedCommands ): return self.sendError( 1, 'The %s command isn\'t allowed' % command ) if ( not resourceType in Config.ConfigAllowedTypes ): return self.sendError( 1, 'Invalid type specified' ) # Setup paths if command == "QuickUpload": self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType] self.webUserFilesFolder = Config.QuickUploadPath[resourceType] else: self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType] self.webUserFilesFolder = Config.FileTypesPath[resourceType] if not self.userFilesFolder: # no absolute path given (dangerous...) self.userFilesFolder = mapServerPath(self.environ, self.webUserFilesFolder) # Ensure that the directory exists. if not os.path.exists(self.userFilesFolder): try: self.createServerFolder( self.userFilesFolder ) except: return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ") # File upload doesn't have to return XML, so intercept here if (command == "FileUpload"): return self.uploadFile(resourceType, currentFolder) # Create Url url = combinePaths( self.webUserFilesFolder, currentFolder ) # Begin XML s += self.createXmlHeader(command, resourceType, currentFolder, url) # Execute the command selector = {"GetFolders": self.getFolders, "GetFoldersAndFiles": self.getFoldersAndFiles, "CreateFolder": self.createFolder, } s += selector[command](resourceType, currentFolder) s += self.createXmlFooter() return s # Running from command line (plain old CGI) if __name__ == '__main__': try: # Create a Connector Instance conn = FCKeditorConnector() data = conn.doResponse() for header in conn.headers: print '%s: %s' % header print print data except: print "Content-Type: text/plain" print import cgi cgi.print_exception()
Python
#!/usr/bin/env python """ * FCKeditor - The text editor for Internet - http://www.fckeditor.net * Copyright (C) 2003-2010 Frederico Caldeira Knabben * * == BEGIN LICENSE == * * Licensed under the terms of any of the following licenses at your * choice: * * - GNU General Public License Version 2 or later (the "GPL") * http://www.gnu.org/licenses/gpl.html * * - GNU Lesser General Public License Version 2.1 or later (the "LGPL") * http://www.gnu.org/licenses/lgpl.html * * - Mozilla Public License Version 1.1 or later (the "MPL") * http://www.mozilla.org/MPL/MPL-1.1.html * * == END LICENSE == * * Configuration file for the File Manager Connector for Python """ # INSTALLATION NOTE: You must set up your server environment accordingly to run # python scripts. This connector requires Python 2.4 or greater. # # Supported operation modes: # * WSGI (recommended): You'll need apache + mod_python + modpython_gateway # or any web server capable of the WSGI python standard # * Plain Old CGI: Any server capable of running standard python scripts # (although mod_python is recommended for performance) # This was the previous connector version operation mode # # If you're using Apache web server, replace the htaccess.txt to to .htaccess, # and set the proper options and paths. # For WSGI and mod_python, you may need to download modpython_gateway from: # http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this # directory. # SECURITY: You must explicitly enable this "connector". (Set it to "True"). # WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only # authenticated users can access this file or use some kind of session checking. Enabled = False # Path to user files relative to the document root. UserFilesPath = '/userfiles/' # Fill the following value it you prefer to specify the absolute path for the # user files directory. Useful if you are using a virtual directory, symbolic # link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'UserFilesPath' must point to the same directory. # WARNING: GetRootPath may not work in virtual or mod_python configurations, and # may not be thread safe. Use this configuration parameter instead. UserFilesAbsolutePath = '' # Due to security issues with Apache modules, it is recommended to leave the # following setting enabled. ForceSingleExtension = True # What the user can do with this connector ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ] # Allowed Resource Types ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media'] # After file is uploaded, sometimes it is required to change its permissions # so that it was possible to access it at the later time. # If possible, it is recommended to set more restrictive permissions, like 0755. # Set to 0 to disable this feature. # Note: not needed on Windows-based servers. ChmodOnUpload = 0755 # See comments above. # Used when creating folders that does not exist. ChmodOnFolderCreate = 0755 # Do not touch this 3 lines, see "Configuration settings for each Resource Type" AllowedExtensions = {}; DeniedExtensions = {}; FileTypesPath = {}; FileTypesAbsolutePath = {}; QuickUploadPath = {}; QuickUploadAbsolutePath = {}; # Configuration settings for each Resource Type # # - AllowedExtensions: the possible extensions that can be allowed. # If it is empty then any file type can be uploaded. # - DeniedExtensions: The extensions that won't be allowed. # If it is empty then no restrictions are done here. # # For a file to be uploaded it has to fulfill both the AllowedExtensions # and DeniedExtensions (that's it: not being denied) conditions. # # - FileTypesPath: the virtual folder relative to the document root where # these resources will be located. # Attention: It must start and end with a slash: '/' # # - FileTypesAbsolutePath: the physical path to the above folder. It must be # an absolute path. # If it's an empty string then it will be autocalculated. # Useful if you are using a virtual directory, symbolic link or alias. # Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'FileTypesPath' must point to the same directory. # Attention: It must end with a slash: '/' # # # - QuickUploadPath: the virtual folder relative to the document root where # these resources will be uploaded using the Upload tab in the resources # dialogs. # Attention: It must start and end with a slash: '/' # # - QuickUploadAbsolutePath: the physical path to the above folder. It must be # an absolute path. # If it's an empty string then it will be autocalculated. # Useful if you are using a virtual directory, symbolic link or alias. # Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'QuickUploadPath' must point to the same directory. # Attention: It must end with a slash: '/' AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip'] DeniedExtensions['File'] = [] FileTypesPath['File'] = UserFilesPath + 'file/' FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or '' QuickUploadPath['File'] = FileTypesPath['File'] QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File'] AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png'] DeniedExtensions['Image'] = [] FileTypesPath['Image'] = UserFilesPath + 'image/' FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or '' QuickUploadPath['Image'] = FileTypesPath['Image'] QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image'] AllowedExtensions['Flash'] = ['swf','flv'] DeniedExtensions['Flash'] = [] FileTypesPath['Flash'] = UserFilesPath + 'flash/' FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or '' QuickUploadPath['Flash'] = FileTypesPath['Flash'] QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash'] AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv'] DeniedExtensions['Media'] = [] FileTypesPath['Media'] = UserFilesPath + 'media/' FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or '' QuickUploadPath['Media'] = FileTypesPath['Media'] QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This page lists the data posted by a form. """ import cgi import os # Tell the browser to render html print "Content-Type: text/html" print "" try: # Create a cgi object form = cgi.FieldStorage() except Exception, e: print e # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Samples - Posted Data</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> """ # This is the real work print """ <h1>FCKeditor - Samples - Posted Data</h1> This page lists all data posted by the form. <hr> <table border="1" cellspacing="0" id="outputSample"> <colgroup><col width="80"><col></colgroup> <thead> <tr> <th>Field Name</th> <th>Value</th> </tr> </thead> """ for key in form.keys(): try: value = form[key].value print """ <tr> <th>%s</th> <td><pre>%s</pre></td> </tr> """ % (cgi.escape(key), cgi.escape(value)) except Exception, e: print e print "</table>" # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Sample page. """ import cgi import os # Ensure that the fckeditor.py is included in your classpath import fckeditor # Tell the browser to render html print "Content-Type: text/html" print "" # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Sample</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> <h1>FCKeditor - Python - Sample 1</h1> This sample displays a normal HTML form with an FCKeditor with full features enabled. <hr> <form action="sampleposteddata.py" method="post" target="_blank"> """ # This is the real work try: sBasePath = os.environ.get("SCRIPT_NAME") sBasePath = sBasePath[0:sBasePath.find("_samples")] oFCKeditor = fckeditor.FCKeditor('FCKeditor1') oFCKeditor.BasePath = sBasePath oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>""" print oFCKeditor.Create() except Exception, e: print e print """ <br> <input type="submit" value="Submit"> </form> """ # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This page lists the data posted by a form. """ import cgi import os # Tell the browser to render html print "Content-Type: text/html" print "" try: # Create a cgi object form = cgi.FieldStorage() except Exception, e: print e # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Samples - Posted Data</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> """ # This is the real work print """ <h1>FCKeditor - Samples - Posted Data</h1> This page lists all data posted by the form. <hr> <table border="1" cellspacing="0" id="outputSample"> <colgroup><col width="80"><col></colgroup> <thead> <tr> <th>Field Name</th> <th>Value</th> </tr> </thead> """ for key in form.keys(): try: value = form[key].value print """ <tr> <th>%s</th> <td><pre>%s</pre></td> </tr> """ % (cgi.escape(key), cgi.escape(value)) except Exception, e: print e print "</table>" # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Sample page. """ import cgi import os # Ensure that the fckeditor.py is included in your classpath import fckeditor # Tell the browser to render html print "Content-Type: text/html" print "" # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Sample</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> <h1>FCKeditor - Python - Sample 1</h1> This sample displays a normal HTML form with an FCKeditor with full features enabled. <hr> <form action="sampleposteddata.py" method="post" target="_blank"> """ # This is the real work try: sBasePath = os.environ.get("SCRIPT_NAME") sBasePath = sBasePath[0:sBasePath.find("_samples")] oFCKeditor = fckeditor.FCKeditor('FCKeditor1') oFCKeditor.BasePath = sBasePath oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>""" print oFCKeditor.Create() except Exception, e: print e print """ <br> <input type="submit" value="Submit"> </form> """ # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
""" FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This is the integration file for Python. """ import cgi import os import re import string def escape(text, replace=string.replace): """Converts the special characters '<', '>', and '&'. RFC 1866 specifies that these characters be represented in HTML as &lt; &gt; and &amp; respectively. In Python 1.5 we use the new string.replace() function for speed. """ text = replace(text, '&', '&amp;') # must be done 1st text = replace(text, '<', '&lt;') text = replace(text, '>', '&gt;') text = replace(text, '"', '&quot;') text = replace(text, "'", '&#39;') return text # The FCKeditor class class FCKeditor(object): def __init__(self, instanceName): self.InstanceName = instanceName self.BasePath = '/fckeditor/' self.Width = '100%' self.Height = '200' self.ToolbarSet = 'Default' self.Value = ''; self.Config = {} def Create(self): return self.CreateHtml() def CreateHtml(self): HtmlValue = escape(self.Value) Html = "" if (self.IsCompatible()): File = "fckeditor.html" Link = "%seditor/%s?InstanceName=%s" % ( self.BasePath, File, self.InstanceName ) if (self.ToolbarSet is not None): Link += "&amp;Toolbar=%s" % self.ToolbarSet # Render the linked hidden field Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % ( self.InstanceName, self.InstanceName, HtmlValue ) # Render the configurations hidden field Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % ( self.InstanceName, self.GetConfigFieldString() ) # Render the editor iframe Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % ( self.InstanceName, Link, self.Width, self.Height ) else: if (self.Width.find("%%") < 0): WidthCSS = "%spx" % self.Width else: WidthCSS = self.Width if (self.Height.find("%%") < 0): HeightCSS = "%spx" % self.Height else: HeightCSS = self.Height Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % ( self.InstanceName, WidthCSS, HeightCSS, HtmlValue ) return Html def IsCompatible(self): if (os.environ.has_key("HTTP_USER_AGENT")): sAgent = os.environ.get("HTTP_USER_AGENT", "") else: sAgent = "" if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0): i = sAgent.find("MSIE") iVersion = float(sAgent[i+5:i+5+3]) if (iVersion >= 5.5): return True return False elif (sAgent.find("Gecko/") >= 0): i = sAgent.find("Gecko/") iVersion = int(sAgent[i+6:i+6+8]) if (iVersion >= 20030210): return True return False elif (sAgent.find("Opera/") >= 0): i = sAgent.find("Opera/") iVersion = float(sAgent[i+6:i+6+4]) if (iVersion >= 9.5): return True return False elif (sAgent.find("AppleWebKit/") >= 0): p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE) m = p.search(sAgent) if (m.group(1) >= 522): return True return False else: return False def GetConfigFieldString(self): sParams = "" bFirst = True for sKey in self.Config.keys(): sValue = self.Config[sKey] if (not bFirst): sParams += "&amp;" else: bFirst = False if (sValue): k = escape(sKey) v = escape(sValue) if (sValue == "true"): sParams += "%s=true" % k elif (sValue == "false"): sParams += "%s=false" % k else: sParams += "%s=%s" % (k, v) return sParams
Python
#!/usr/bin/python # Copyright 2011 Google, Inc. All Rights Reserved. # simple script to walk source tree looking for third-party licenses # dumps resulting html page to stdout import os, re, mimetypes, sys # read source directories to scan from command line SOURCE = sys.argv[1:] # regex to find /* */ style comment blocks COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL) # regex used to detect if comment block is a license COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE) COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE) EXCLUDE_TYPES = [ "application/xml", "image/png", ] # list of known licenses; keys are derived by stripping all whitespace and # forcing to lowercase to help combine multiple files that have same license. KNOWN_LICENSES = {} class License: def __init__(self, license_text): self.license_text = license_text self.filenames = [] # add filename to the list of files that have the same license text def add_file(self, filename): if filename not in self.filenames: self.filenames.append(filename) LICENSE_KEY = re.compile(r"[^\w]") def find_license(license_text): # TODO(alice): a lot these licenses are almost identical Apache licenses. # Most of them differ in origin/modifications. Consider combining similar # licenses. license_key = LICENSE_KEY.sub("", license_text).lower() if license_key not in KNOWN_LICENSES: KNOWN_LICENSES[license_key] = License(license_text) return KNOWN_LICENSES[license_key] def discover_license(exact_path, filename): # when filename ends with LICENSE, assume applies to filename prefixed if filename.endswith("LICENSE"): with open(exact_path) as file: license_text = file.read() target_filename = filename[:-len("LICENSE")] if target_filename.endswith("."): target_filename = target_filename[:-1] find_license(license_text).add_file(target_filename) return None # try searching for license blocks in raw file mimetype = mimetypes.guess_type(filename) if mimetype in EXCLUDE_TYPES: return None with open(exact_path) as file: raw_file = file.read() # include comments that have both "license" and "copyright" in the text for comment in COMMENT_BLOCK.finditer(raw_file): comment = comment.group(1) if COMMENT_LICENSE.search(comment) is None: continue if COMMENT_COPYRIGHT.search(comment) is None: continue find_license(comment).add_file(filename) for source in SOURCE: for root, dirs, files in os.walk(source): for name in files: discover_license(os.path.join(root, name), name) print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>" for license in KNOWN_LICENSES.values(): print "<h3>Notices for files:</h3><ul>" filenames = license.filenames filenames.sort() for filename in filenames: print "<li>%s</li>" % (filename) print "</ul>" print "<pre>%s</pre>" % license.license_text print "</body></html>"
Python
# Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'includes': [ 'build_angle.gypi', ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
Python
# Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'angle_code': 1, }, 'target_defaults': { 'defines': [ 'ANGLE_DISABLE_TRACE', 'ANGLE_COMPILE_OPTIMIZATION_LEVEL=D3DCOMPILE_OPTIMIZATION_LEVEL1', 'ANGLE_PRELOADED_D3DCOMPILER_MODULE_NAMES={ TEXT("d3dcompiler_46.dll"), TEXT("d3dcompiler_43.dll") }', ], }, 'targets': [ { 'target_name': 'preprocessor', 'type': 'static_library', 'include_dirs': [ ], 'sources': [ 'compiler/preprocessor/DiagnosticsBase.cpp', 'compiler/preprocessor/DiagnosticsBase.h', 'compiler/preprocessor/DirectiveHandlerBase.cpp', 'compiler/preprocessor/DirectiveHandlerBase.h', 'compiler/preprocessor/DirectiveParser.cpp', 'compiler/preprocessor/DirectiveParser.h', 'compiler/preprocessor/ExpressionParser.cpp', 'compiler/preprocessor/ExpressionParser.h', 'compiler/preprocessor/Input.cpp', 'compiler/preprocessor/Input.h', 'compiler/preprocessor/length_limits.h', 'compiler/preprocessor/Lexer.cpp', 'compiler/preprocessor/Lexer.h', 'compiler/preprocessor/Macro.cpp', 'compiler/preprocessor/Macro.h', 'compiler/preprocessor/MacroExpander.cpp', 'compiler/preprocessor/MacroExpander.h', 'compiler/preprocessor/numeric_lex.h', 'compiler/preprocessor/pp_utils.h', 'compiler/preprocessor/Preprocessor.cpp', 'compiler/preprocessor/Preprocessor.h', 'compiler/preprocessor/SourceLocation.h', 'compiler/preprocessor/Token.cpp', 'compiler/preprocessor/Token.h', 'compiler/preprocessor/Tokenizer.cpp', 'compiler/preprocessor/Tokenizer.h', ], # TODO(jschuh): http://crbug.com/167187 'msvs_disabled_warnings': [ 4267, ], }, { 'target_name': 'translator_common', 'type': 'static_library', 'dependencies': ['preprocessor'], 'include_dirs': [ '.', '../include', ], 'defines': [ 'COMPILER_IMPLEMENTATION', ], 'sources': [ 'compiler/BaseTypes.h', 'compiler/BuiltInFunctionEmulator.cpp', 'compiler/BuiltInFunctionEmulator.h', 'compiler/Common.h', 'compiler/Compiler.cpp', 'compiler/ConstantUnion.h', 'compiler/debug.cpp', 'compiler/debug.h', 'compiler/DetectCallDepth.cpp', 'compiler/DetectCallDepth.h', 'compiler/Diagnostics.h', 'compiler/Diagnostics.cpp', 'compiler/DirectiveHandler.h', 'compiler/DirectiveHandler.cpp', 'compiler/ExtensionBehavior.h', 'compiler/ForLoopUnroll.cpp', 'compiler/ForLoopUnroll.h', 'compiler/glslang.h', 'compiler/glslang_lex.cpp', 'compiler/glslang_tab.cpp', 'compiler/glslang_tab.h', 'compiler/HashNames.h', 'compiler/InfoSink.cpp', 'compiler/InfoSink.h', 'compiler/Initialize.cpp', 'compiler/Initialize.h', 'compiler/InitializeDll.cpp', 'compiler/InitializeDll.h', 'compiler/InitializeGlobals.h', 'compiler/InitializeParseContext.cpp', 'compiler/InitializeParseContext.h', 'compiler/Intermediate.cpp', 'compiler/intermediate.h', 'compiler/intermOut.cpp', 'compiler/IntermTraverse.cpp', 'compiler/localintermediate.h', 'compiler/MapLongVariableNames.cpp', 'compiler/MapLongVariableNames.h', 'compiler/MMap.h', 'compiler/osinclude.h', 'compiler/parseConst.cpp', 'compiler/ParseHelper.cpp', 'compiler/ParseHelper.h', 'compiler/PoolAlloc.cpp', 'compiler/PoolAlloc.h', 'compiler/QualifierAlive.cpp', 'compiler/QualifierAlive.h', 'compiler/RemoveTree.cpp', 'compiler/RemoveTree.h', 'compiler/RenameFunction.h', 'compiler/ShHandle.h', 'compiler/SymbolTable.cpp', 'compiler/SymbolTable.h', 'compiler/Types.h', 'compiler/Uniform.cpp', 'compiler/Uniform.h', 'compiler/util.cpp', 'compiler/util.h', 'compiler/ValidateLimitations.cpp', 'compiler/ValidateLimitations.h', 'compiler/VariableInfo.cpp', 'compiler/VariableInfo.h', 'compiler/VariablePacker.cpp', 'compiler/VariablePacker.h', # Dependency graph 'compiler/depgraph/DependencyGraph.cpp', 'compiler/depgraph/DependencyGraph.h', 'compiler/depgraph/DependencyGraphBuilder.cpp', 'compiler/depgraph/DependencyGraphBuilder.h', 'compiler/depgraph/DependencyGraphOutput.cpp', 'compiler/depgraph/DependencyGraphOutput.h', 'compiler/depgraph/DependencyGraphTraverse.cpp', # Timing restrictions 'compiler/timing/RestrictFragmentShaderTiming.cpp', 'compiler/timing/RestrictFragmentShaderTiming.h', 'compiler/timing/RestrictVertexShaderTiming.cpp', 'compiler/timing/RestrictVertexShaderTiming.h', 'third_party/compiler/ArrayBoundsClamper.cpp', 'third_party/compiler/ArrayBoundsClamper.h', ], 'conditions': [ ['OS=="win"', { # TODO(jschuh): http://crbug.com/167187 size_t -> int 'msvs_disabled_warnings': [ 4267 ], 'sources': ['compiler/ossource_win.cpp'], }, { # else: posix 'sources': ['compiler/ossource_posix.cpp'], }], ], }, { 'target_name': 'translator_glsl', 'type': '<(component)', 'dependencies': ['translator_common'], 'include_dirs': [ '.', '../include', ], 'defines': [ 'COMPILER_IMPLEMENTATION', ], 'sources': [ 'compiler/CodeGenGLSL.cpp', 'compiler/OutputESSL.cpp', 'compiler/OutputESSL.h', 'compiler/OutputGLSLBase.cpp', 'compiler/OutputGLSLBase.h', 'compiler/OutputGLSL.cpp', 'compiler/OutputGLSL.h', 'compiler/ShaderLang.cpp', 'compiler/TranslatorESSL.cpp', 'compiler/TranslatorESSL.h', 'compiler/TranslatorGLSL.cpp', 'compiler/TranslatorGLSL.h', 'compiler/VersionGLSL.cpp', 'compiler/VersionGLSL.h', ], # TODO(jschuh): http://crbug.com/167187 size_t -> int 'msvs_disabled_warnings': [ 4267 ], }, ], 'conditions': [ ['OS=="win"', { 'targets': [ { 'target_name': 'translator_hlsl', 'type': '<(component)', 'dependencies': ['translator_common'], 'include_dirs': [ '.', '../include', ], 'defines': [ 'COMPILER_IMPLEMENTATION', ], 'sources': [ 'compiler/ShaderLang.cpp', 'compiler/DetectDiscontinuity.cpp', 'compiler/DetectDiscontinuity.h', 'compiler/CodeGenHLSL.cpp', 'compiler/OutputHLSL.cpp', 'compiler/OutputHLSL.h', 'compiler/TranslatorHLSL.cpp', 'compiler/TranslatorHLSL.h', 'compiler/UnfoldShortCircuit.cpp', 'compiler/UnfoldShortCircuit.h', 'compiler/SearchSymbol.cpp', 'compiler/SearchSymbol.h', ], # TODO(jschuh): http://crbug.com/167187 size_t -> int 'msvs_disabled_warnings': [ 4267 ], }, { 'target_name': 'libGLESv2', 'type': 'shared_library', 'dependencies': ['translator_hlsl'], 'include_dirs': [ '.', '../include', 'libGLESv2', ], 'sources': [ 'third_party/murmurhash/MurmurHash3.h', 'third_party/murmurhash/MurmurHash3.cpp', 'common/angleutils.h', 'common/debug.cpp', 'common/debug.h', 'common/RefCountObject.cpp', 'common/RefCountObject.h', 'common/version.h', 'libGLESv2/precompiled.h', 'libGLESv2/precompiled.cpp', 'libGLESv2/BinaryStream.h', 'libGLESv2/Buffer.cpp', 'libGLESv2/Buffer.h', 'libGLESv2/constants.h', 'libGLESv2/Context.cpp', 'libGLESv2/Context.h', 'libGLESv2/angletypes.h', 'libGLESv2/Fence.cpp', 'libGLESv2/Fence.h', 'libGLESv2/Float16ToFloat32.cpp', 'libGLESv2/Framebuffer.cpp', 'libGLESv2/Framebuffer.h', 'libGLESv2/HandleAllocator.cpp', 'libGLESv2/HandleAllocator.h', 'libGLESv2/libGLESv2.cpp', 'libGLESv2/libGLESv2.def', 'libGLESv2/libGLESv2.rc', 'libGLESv2/main.cpp', 'libGLESv2/main.h', 'libGLESv2/mathutil.h', 'libGLESv2/Program.cpp', 'libGLESv2/Program.h', 'libGLESv2/ProgramBinary.cpp', 'libGLESv2/ProgramBinary.h', 'libGLESv2/Query.h', 'libGLESv2/Query.cpp', 'libGLESv2/Renderbuffer.cpp', 'libGLESv2/Renderbuffer.h', 'libGLESv2/renderer/Blit.cpp', 'libGLESv2/renderer/Blit.h', 'libGLESv2/renderer/BufferStorage.h', 'libGLESv2/renderer/BufferStorage.cpp', 'libGLESv2/renderer/BufferStorage9.cpp', 'libGLESv2/renderer/BufferStorage9.h', 'libGLESv2/renderer/BufferStorage11.cpp', 'libGLESv2/renderer/BufferStorage11.h', 'libGLESv2/renderer/FenceImpl.h', 'libGLESv2/renderer/Fence9.cpp', 'libGLESv2/renderer/Fence9.h', 'libGLESv2/renderer/Fence11.cpp', 'libGLESv2/renderer/Fence11.h', 'libGLESv2/renderer/generatemip.h', 'libGLESv2/renderer/Image.cpp', 'libGLESv2/renderer/Image.h', 'libGLESv2/renderer/Image11.cpp', 'libGLESv2/renderer/Image11.h', 'libGLESv2/renderer/Image9.cpp', 'libGLESv2/renderer/Image9.h', 'libGLESv2/renderer/ImageSSE2.cpp', 'libGLESv2/renderer/IndexBuffer.cpp', 'libGLESv2/renderer/IndexBuffer.h', 'libGLESv2/renderer/IndexBuffer9.cpp', 'libGLESv2/renderer/IndexBuffer9.h', 'libGLESv2/renderer/IndexBuffer11.cpp', 'libGLESv2/renderer/IndexBuffer11.h', 'libGLESv2/renderer/IndexDataManager.cpp', 'libGLESv2/renderer/IndexDataManager.h', 'libGLESv2/renderer/InputLayoutCache.cpp', 'libGLESv2/renderer/InputLayoutCache.h', 'libGLESv2/renderer/QueryImpl.h', 'libGLESv2/renderer/Query9.cpp', 'libGLESv2/renderer/Query9.h', 'libGLESv2/renderer/Query11.cpp', 'libGLESv2/renderer/Query11.h', 'libGLESv2/renderer/Renderer.cpp', 'libGLESv2/renderer/Renderer.h', 'libGLESv2/renderer/Renderer11.cpp', 'libGLESv2/renderer/Renderer11.h', 'libGLESv2/renderer/renderer11_utils.cpp', 'libGLESv2/renderer/renderer11_utils.h', 'libGLESv2/renderer/Renderer9.cpp', 'libGLESv2/renderer/Renderer9.h', 'libGLESv2/renderer/renderer9_utils.cpp', 'libGLESv2/renderer/renderer9_utils.h', 'libGLESv2/renderer/RenderStateCache.cpp', 'libGLESv2/renderer/RenderStateCache.h', 'libGLESv2/renderer/RenderTarget.h', 'libGLESv2/renderer/RenderTarget11.h', 'libGLESv2/renderer/RenderTarget11.cpp', 'libGLESv2/renderer/RenderTarget9.h', 'libGLESv2/renderer/RenderTarget9.cpp', 'libGLESv2/renderer/ShaderCache.h', 'libGLESv2/renderer/ShaderExecutable.h', 'libGLESv2/renderer/ShaderExecutable9.cpp', 'libGLESv2/renderer/ShaderExecutable9.h', 'libGLESv2/renderer/ShaderExecutable11.cpp', 'libGLESv2/renderer/ShaderExecutable11.h', 'libGLESv2/renderer/SwapChain.h', 'libGLESv2/renderer/SwapChain9.cpp', 'libGLESv2/renderer/SwapChain9.h', 'libGLESv2/renderer/SwapChain11.cpp', 'libGLESv2/renderer/SwapChain11.h', 'libGLESv2/renderer/TextureStorage.cpp', 'libGLESv2/renderer/TextureStorage.h', 'libGLESv2/renderer/TextureStorage11.cpp', 'libGLESv2/renderer/TextureStorage11.h', 'libGLESv2/renderer/TextureStorage9.cpp', 'libGLESv2/renderer/TextureStorage9.h', 'libGLESv2/renderer/VertexBuffer.cpp', 'libGLESv2/renderer/VertexBuffer.h', 'libGLESv2/renderer/VertexBuffer9.cpp', 'libGLESv2/renderer/VertexBuffer9.h', 'libGLESv2/renderer/VertexBuffer11.cpp', 'libGLESv2/renderer/VertexBuffer11.h', 'libGLESv2/renderer/vertexconversion.h', 'libGLESv2/renderer/VertexDataManager.cpp', 'libGLESv2/renderer/VertexDataManager.h', 'libGLESv2/renderer/VertexDeclarationCache.cpp', 'libGLESv2/renderer/VertexDeclarationCache.h', 'libGLESv2/ResourceManager.cpp', 'libGLESv2/ResourceManager.h', 'libGLESv2/Shader.cpp', 'libGLESv2/Shader.h', 'libGLESv2/Texture.cpp', 'libGLESv2/Texture.h', 'libGLESv2/Uniform.cpp', 'libGLESv2/Uniform.h', 'libGLESv2/utilities.cpp', 'libGLESv2/utilities.h', ], # TODO(jschuh): http://crbug.com/167187 size_t -> int 'msvs_disabled_warnings': [ 4267 ], 'msvs_settings': { 'VCLinkerTool': { 'AdditionalDependencies': [ 'd3d9.lib', 'dxguid.lib', ], } }, }, { 'target_name': 'libEGL', 'type': 'shared_library', 'dependencies': ['libGLESv2'], 'include_dirs': [ '.', '../include', 'libGLESv2', ], 'sources': [ 'common/angleutils.h', 'common/debug.cpp', 'common/debug.h', 'common/RefCountObject.cpp', 'common/RefCountObject.h', 'common/version.h', 'libEGL/Config.cpp', 'libEGL/Config.h', 'libEGL/Display.cpp', 'libEGL/Display.h', 'libEGL/libEGL.cpp', 'libEGL/libEGL.def', 'libEGL/libEGL.rc', 'libEGL/main.cpp', 'libEGL/main.h', 'libEGL/Surface.cpp', 'libEGL/Surface.h', ], # TODO(jschuh): http://crbug.com/167187 size_t -> int 'msvs_disabled_warnings': [ 4267 ], 'msvs_settings': { 'VCLinkerTool': { 'AdditionalDependencies': [ 'd3d9.lib', ], } }, }, ], }], ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2: # Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file.
Python
# Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # This script generates a function that converts 16-bit precision floating # point numbers to 32-bit. # It is based on ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf. def convertMantissa(i): if i == 0: return 0 elif i < 1024: m = i << 13 e = 0 while not (m & 0x00800000): e -= 0x00800000 m = m << 1 m &= ~0x00800000 e += 0x38800000 return m | e else: return 0x38000000 + ((i - 1024) << 13) def convertExponent(i): if i == 0: return 0 elif i in range(1, 31): return i << 23 elif i == 31: return 0x47800000 elif i == 32: return 0x80000000 elif i in range(33, 63): return 0x80000000 + ((i - 32) << 23) else: return 0xC7800000 def convertOffset(i): if i == 0 or i == 32: return 0 else: return 1024 print """// // Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // This file is automatically generated. namespace gl { """ print "const static unsigned g_mantissa[2048] = {" for i in range(0, 2048): print " %#010x," % convertMantissa(i) print "};\n" print "const static unsigned g_exponent[64] = {" for i in range(0, 64): print " %#010x," % convertExponent(i) print "};\n" print "const static unsigned g_offset[64] = {" for i in range(0, 64): print " %#010x," % convertOffset(i) print "};\n" print """float float16ToFloat32(unsigned short h) { unsigned i32 = g_mantissa[g_offset[h >> 10] + (h & 0x3ff)] + g_exponent[h >> 10]; return *(float*) &i32; } } """
Python
deps = { "trunk/third_party/gyp": "http://gyp.googlecode.com/svn/trunk@1564", "trunk/third_party/googletest": "http://googletest.googlecode.com/svn/trunk@573", #release 1.6.0 "trunk/third_party/googlemock": "http://googlemock.googlecode.com/svn/trunk@387", #release 1.6.0 } hooks = [ { # A change to a .gyp, .gypi, or to GYP itself should run the generator. "pattern": ".", "action": ["python", "trunk/build/gyp_angle"], }, ]
Python
# Copyright (c) 2010 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'targets': [ { 'target_name': 'essl_to_glsl', 'type': 'executable', 'dependencies': [ '../src/build_angle.gyp:translator_glsl', ], 'include_dirs': [ '../include', ], 'sources': [ 'translator/translator.cpp', ], }, ], 'conditions': [ ['OS=="win"', { 'targets': [ { 'target_name': 'essl_to_hlsl', 'type': 'executable', 'dependencies': [ '../src/build_angle.gyp:translator_hlsl', ], 'include_dirs': [ '../include', '../src', ], 'sources': [ 'translator/translator.cpp', '../src/common/debug.cpp', ], 'msvs_settings': { 'VCLinkerTool': { 'AdditionalDependencies': ['d3d9.lib'], } } }, { 'target_name': 'es_util', 'type': 'static_library', 'dependencies': [ '../src/build_angle.gyp:libEGL', '../src/build_angle.gyp:libGLESv2', ], 'include_dirs': [ 'gles2_book/Common', '../include', ], 'sources': [ 'gles2_book/Common/esShader.c', 'gles2_book/Common/esShapes.c', 'gles2_book/Common/esTransform.c', 'gles2_book/Common/esUtil.c', 'gles2_book/Common/esUtil.h', 'gles2_book/Common/esUtil_win.h', 'gles2_book/Common/Win32/esUtil_TGA.c', 'gles2_book/Common/Win32/esUtil_win32.c', ], 'direct_dependent_settings': { 'include_dirs': [ 'gles2_book/Common', '../include', ], }, }, { 'target_name': 'hello_triangle', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/Hello_Triangle/Hello_Triangle.c', ], }, { 'target_name': 'mip_map_2d', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/MipMap2D/MipMap2D.c', ], }, { 'target_name': 'multi_texture', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/MultiTexture/MultiTexture.c', ], 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ 'gles2_book/MultiTexture/basemap.tga', 'gles2_book/MultiTexture/lightmap.tga', ], }, ], }, { 'target_name': 'particle_system', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/ParticleSystem/ParticleSystem.c', ], 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ 'gles2_book/ParticleSystem/smoke.tga', ], }, ], }, { 'target_name': 'simple_texture_2d', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/Simple_Texture2D/Simple_Texture2D.c', ], }, { 'target_name': 'simple_texture_cubemap', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/Simple_TextureCubemap/Simple_TextureCubemap.c', ], }, { 'target_name': 'simple_vertex_shader', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/Simple_VertexShader/Simple_VertexShader.c', ], }, { 'target_name': 'stencil_test', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/Stencil_Test/Stencil_Test.c', ], }, { 'target_name': 'texture_wrap', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/TextureWrap/TextureWrap.c', ], }, { 'target_name': 'post_sub_buffer', 'type': 'executable', 'dependencies': ['es_util'], 'sources': [ 'gles2_book/PostSubBuffer/PostSubBuffer.c', ], }, ], }], ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
Python
# Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'targets': [ { 'target_name': 'gtest', 'type': 'static_library', 'include_dirs': [ '../third_party/googletest', '../third_party/googletest/include', ], 'sources': [ '../third_party/googletest/src/gtest-all.cc', ], }, { 'target_name': 'gmock', 'type': 'static_library', 'include_dirs': [ '../third_party/googlemock', '../third_party/googlemock/include', '../third_party/googletest/include', ], 'sources': [ '../third_party/googlemock/src/gmock-all.cc', ], }, { 'target_name': 'preprocessor_tests', 'type': 'executable', 'dependencies': [ '../src/build_angle.gyp:preprocessor', 'gtest', 'gmock', ], 'include_dirs': [ '../src/compiler/preprocessor', '../third_party/googletest/include', '../third_party/googlemock/include', ], 'sources': [ '../third_party/googlemock/src/gmock_main.cc', 'preprocessor_tests/char_test.cpp', 'preprocessor_tests/comment_test.cpp', 'preprocessor_tests/define_test.cpp', 'preprocessor_tests/error_test.cpp', 'preprocessor_tests/extension_test.cpp', 'preprocessor_tests/identifier_test.cpp', 'preprocessor_tests/if_test.cpp', 'preprocessor_tests/input_test.cpp', 'preprocessor_tests/location_test.cpp', 'preprocessor_tests/MockDiagnostics.h', 'preprocessor_tests/MockDirectiveHandler.h', 'preprocessor_tests/number_test.cpp', 'preprocessor_tests/operator_test.cpp', 'preprocessor_tests/pragma_test.cpp', 'preprocessor_tests/PreprocessorTest.cpp', 'preprocessor_tests/PreprocessorTest.h', 'preprocessor_tests/space_test.cpp', 'preprocessor_tests/token_test.cpp', 'preprocessor_tests/version_test.cpp', ], }, { 'target_name': 'compiler_tests', 'type': 'executable', 'dependencies': [ '../src/build_angle.gyp:translator_glsl', 'gtest', 'gmock', ], 'include_dirs': [ '../include', '../src', '../third_party/googletest/include', '../third_party/googlemock/include', ], 'sources': [ '../third_party/googlemock/src/gmock_main.cc', 'compiler_tests/ExpressionLimit_test.cpp', 'compiler_tests/VariablePacker_test.cpp', ], }, ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
Python
#==================================================================== # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ==================================================================== # # This software consists of voluntary contributions made by many # individuals on behalf of the Apache Software Foundation. For more # information on the Apache Software Foundation, please see # <http://www.apache.org/>. # import os import re import tempfile import shutil ignore_pattern = re.compile('^(.svn|target|bin|classes)') java_pattern = re.compile('^.*\.java') annot_pattern = re.compile('import org\.apache\.http\.annotation\.') def process_dir(dir): files = os.listdir(dir) for file in files: f = os.path.join(dir, file) if os.path.isdir(f): if not ignore_pattern.match(file): process_dir(f) else: if java_pattern.match(file): process_source(f) def process_source(filename): tmp = tempfile.mkstemp() tmpfd = tmp[0] tmpfile = tmp[1] try: changed = False dst = os.fdopen(tmpfd, 'w') try: src = open(filename) try: for line in src: if annot_pattern.match(line): changed = True line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.') dst.write(line) finally: src.close() finally: dst.close(); if changed: shutil.move(tmpfile, filename) else: os.remove(tmpfile) except: os.remove(tmpfile) process_dir('.')
Python
#!/usr/bin/env python from os import mkdir, makedirs, environ, chdir, getcwd, system, listdir from os.path import join from shutil import copy, copytree, move, rmtree # Usage def Usage(): return 'Usage: createversion.py <version>' # Run Xcode def RunXcode(project, target): system('/usr/bin/xcodebuild -project "%s" -target "%s" -configuration Release clean build' % (project, target)) # Get version from args import sys if len(sys.argv) <= 1: print Usage() exit(1) version = sys.argv[1] # Change to root dir of Core Plot chdir('..') projectRoot = getcwd() # Remove old docset files frameworkDir = join(projectRoot, 'framework') rmtree(join(frameworkDir, 'CorePlotDocs.docset'), True) rmtree(join(frameworkDir, 'CorePlotTouchDocs.docset'), True) # Remove old build directories rmtree(join(frameworkDir, 'build'), True) examples = listdir('examples') for ex in examples: exampleDir = join('examples', ex) rmtree(join(exampleDir, 'build'), True) # Make directory bundle desktopDir = join(environ['HOME'], 'Desktop') releaseRootDir = join(desktopDir, 'CorePlot_' + version) mkdir(releaseRootDir) # Copy license and READMEs copy('License.txt', releaseRootDir) copytree('READMEs', join(releaseRootDir, 'READMEs')) # Add source code sourceDir = join(releaseRootDir, 'Source') copytree('framework', join(sourceDir, 'framework')) copytree('examples', join(sourceDir, 'examples')) copy('License.txt', sourceDir) # Binaries binariesDir = join(releaseRootDir, 'Binaries') macosDir = join(binariesDir, 'MacOS') iosDir = join(binariesDir, 'iOS') makedirs(macosDir) mkdir(iosDir) # Build Mac Framework chdir('framework') RunXcode('CorePlot.xcodeproj', 'CorePlot') macProductsDir = join(projectRoot, 'build/Release') macFramework = join(macProductsDir, 'CorePlot.framework') copytree(macFramework, join(macosDir, 'CorePlot.framework')) # Build iOS SDK RunXcode('CorePlot-CocoaTouch.xcodeproj', 'Build SDK') sdkZipFile = join(desktopDir, 'CorePlot.zip') move(sdkZipFile, iosDir) # Build Docs RunXcode('CorePlot.xcodeproj', 'Documentation') RunXcode('CorePlot-CocoaTouch.xcodeproj', 'Documentation') # Copy Docs docDir = join(releaseRootDir, 'Documentation') copytree(join(projectRoot, 'documentation'), docDir) homeDir = environ['HOME'] docsetsDir = join(homeDir, 'Library/Developer/Shared/Documentation/DocSets') copytree(join(docsetsDir, 'com.CorePlot.Framework.docset'), join(docDir, 'com.CorePlot.Framework.docset')) copytree(join(docsetsDir, 'com.CorePlotTouch.Framework.docset'), join(docDir, 'com.CorePlotTouch.Framework.docset'))
Python
dataTypes = ["CPUndefinedDataType", "CPIntegerDataType", "CPUnsignedIntegerDataType", "CPFloatingPointDataType", "CPComplexFloatingPointDataType", "CPDecimalDataType"] types = { "CPUndefinedDataType" : [], "CPIntegerDataType" : ["int8_t", "int16_t", "int32_t", "int64_t"], "CPUnsignedIntegerDataType" : ["uint8_t", "uint16_t", "uint32_t", "uint64_t"], "CPFloatingPointDataType" : ["float", "double"], "CPComplexFloatingPointDataType" : ["float complex", "double complex"], "CPDecimalDataType" : ["NSDecimal"] } nsnumber_factory = { "int8_t" : "Char", "int16_t" : "Short", "int32_t" : "Long", "int64_t" : "LongLong", "uint8_t" : "UnsignedChar", "uint16_t" : "UnsignedShort", "uint32_t" : "UnsignedLong", "uint64_t" : "UnsignedLongLong", "float" : "Float", "double" : "Double", "float complex" : "Float", "double complex" : "Double", "NSDecimal" : "Decimal" } nsnumber_methods = { "int8_t" : "char", "int16_t" : "short", "int32_t" : "long", "int64_t" : "longLong", "uint8_t" : "unsignedChar", "uint16_t" : "unsignedShort", "uint32_t" : "unsignedLong", "uint64_t" : "unsignedLongLong", "float" : "float", "double" : "double", "float complex" : "float", "double complex" : "double", "NSDecimal" : "decimal" } null_values = { "int8_t" : "0", "int16_t" : "0", "int32_t" : "0", "int64_t" : "0", "uint8_t" : "0", "uint16_t" : "0", "uint32_t" : "0", "uint64_t" : "0", "float" : "NAN", "double" : "NAN", "float complex" : "NAN", "double complex" : "NAN", "NSDecimal" : "CPDecimalNaN()" } print "[CPNumericData sampleValue:]" print "" print "switch ( self.dataTypeFormat ) {" for dt in dataTypes: print "\tcase %s:" % dt if ( len(types[dt]) == 0 ): print '\t\t[NSException raise:NSInvalidArgumentException format:@"Unsupported data type (%s)"];' % (dt) else: print "\t\tswitch ( self.sampleBytes ) {" for t in types[dt]: print "\t\t\tcase sizeof(%s):" % t if ( t == "NSDecimal" ): number_class = "NSDecimalNumber" number_method = "decimalNumber" else: number_class = "NSNumber" number_method = "number" print "\t\t\t\tresult = [%s %sWith%s:*(%s *)[self samplePointer:sample]];" % (number_class, number_method, nsnumber_factory[t], t) print "\t\t\t\tbreak;" print "\t\t}" print "\t\tbreak;" print "}" print "\n\n" print "---------------" print "\n\n" print "[CPNumericData dataFromArray:dataType:]" print "" print "switch ( newDataType.dataTypeFormat ) {" for dt in dataTypes: print "\tcase %s:" % dt if ( len(types[dt]) == 0 ): print "\t\t// Unsupported" else: print "\t\tswitch ( newDataType.sampleBytes ) {" for t in types[dt]: print "\t\t\tcase sizeof(%s): {" % t print "\t\t\t\t%s *toBytes = (%s *)sampleData.mutableBytes;" % (t, t) print "\t\t\t\tfor ( id sample in newData ) {" print "\t\t\t\t\tif ( [sample respondsToSelector:@selector(%sValue)] ) {" % nsnumber_methods[t] print "\t\t\t\t\t\t*toBytes++ = (%s)[(NSNumber *)sample %sValue];" % (t, nsnumber_methods[t]) print "\t\t\t\t\t}" print "\t\t\t\t\telse {" print "\t\t\t\t\t\t*toBytes++ = %s;" % null_values[t] print "\t\t\t\t\t}" print "\t\t\t\t}" print "\t\t\t}" print "\t\t\t\tbreak;" print "\t\t}" print "\t\tbreak;" print "}" print "\n\n" print "---------------" print "\n\n" print "[CPNumericData convertData:dataType:toData:dataType:]" print "" print "switch ( sourceDataType->dataTypeFormat ) {" for dt in dataTypes: print "\tcase %s:" % dt if ( len(types[dt]) > 0 ): print "\t\tswitch ( sourceDataType->sampleBytes ) {" for t in types[dt]: print "\t\t\tcase sizeof(%s):" % t print "\t\t\t\tswitch ( destDataType->dataTypeFormat ) {" for ndt in dataTypes: print "\t\t\t\t\tcase %s:" % ndt if ( len(types[ndt]) > 0 ): print "\t\t\t\t\t\tswitch ( destDataType->sampleBytes ) {" for nt in types[ndt]: print "\t\t\t\t\t\t\tcase sizeof(%s): { // %s -> %s" % (nt, t, nt) if ( t == nt ): print "\t\t\t\t\t\t\t\t\tmemcpy(destData.mutableBytes, sourceData.bytes, sampleCount * sizeof(%s));" % t else: print "\t\t\t\t\t\t\t\t\tconst %s *fromBytes = (%s *)sourceData.bytes;" % (t, t) print "\t\t\t\t\t\t\t\t\tconst %s *lastSample = fromBytes + sampleCount;" % t print "\t\t\t\t\t\t\t\t\t%s *toBytes = (%s *)destData.mutableBytes;" % (nt, nt) if ( t == "NSDecimal" ): print "\t\t\t\t\t\t\t\t\twhile ( fromBytes < lastSample ) *toBytes++ = CPDecimal%sValue(*fromBytes++);" % nsnumber_factory[nt] elif ( nt == "NSDecimal" ): print "\t\t\t\t\t\t\t\t\twhile ( fromBytes < lastSample ) *toBytes++ = CPDecimalFrom%s(*fromBytes++);" % nsnumber_factory[t] else: print "\t\t\t\t\t\t\t\t\twhile ( fromBytes < lastSample ) *toBytes++ = (%s)*fromBytes++;" % nt print "\t\t\t\t\t\t\t\t}" print "\t\t\t\t\t\t\t\tbreak;" print "\t\t\t\t\t\t}" print "\t\t\t\t\t\tbreak;" print "\t\t\t\t}" print "\t\t\t\tbreak;" print "\t\t}" print "\t\tbreak;" print "}"
Python
#!/usr/bin/env python "create rootfs" import sys import os import getopt support_fs_tbl = ["yaffs", "cramfs", "ramfs"] #line swith char linesep = os.linesep #option table #if option has param,must follow char':' or '=' when long opt opt_short_tbl = 'hf:v' opt_long_tbl = ["help", "fstype="] #usage string for tips usage_str = '[options] -f fsname' + linesep +\ '\t-f, --fstype=name\tfilesystem types name' + linesep +\ '\t support list:' + str(support_fs_tbl) +linesep +\ '\t-v\t\t\tverbose mode' + linesep +\ '\t-h, --help\t\tprint this message' #is verbose mode debug = False #parse type fstype = "unsupport" #my debug fucntion def mydebug(*arglist, **argdict): global debug if not debug: return 0 for i in arglist: print i, print for i in argdict: print i, argdict[i], def yaffs_fs_create(): mydebug('create yaffs') def ramfs_fs_create(): mydebug('create ramfs') def cramfs_fs_create(): mydebug('create cramfs') def usage(): global usage_str print 'usage:%s %s' % (sys.argv[0], usage_str) def main(): "main function for rootfs create dispatch" #print sys.argv #get argv count if len(sys.argv) < 2: print 'no options input.' usage() return 2 try: #parse command line options opts, args = getopt.getopt(sys.argv[1:], opt_short_tbl, opt_long_tbl) except getopt.GetoptError: print 'get options error.' usage() return 2 else: global fstype, debug for o, a in opts: if o == "-v": debug = True if o in ("-h", "--help"): usage() sys.exit() if o in ("-f", "--fstype"): fstype = a mydebug('input fstype=', a) break if fstype == support_fs_tbl[0]: yaffs_fs_create() elif fstype == support_fs_tbl[1]: cramfs_fs_create() elif fstype == support_fs_tbl[2]: ramfs_fs_create() else: print 'unsupport fs type:%s.' % (fstype) usage() return 0 if __name__ == '__main__': main()
Python
#!/usr/bin/env python "create rootfs" import sys import os import getopt import time #line swith char linesep = os.linesep #rootfs class, base is object class CRootFs(object): """ rootfs base class """ def __init__(self, name, fstype): global linesep #time stamp self.stamp = time.strftime("%Y%m%d%H%M%S") self.name = fstype self.path = name + self.stamp + '.' + self.name mydebug('Init rootfs') def info(self): print 'path is: %s%s' % (self.path, linesep) #yaffs class class CYaffsFs(CRootFs): """ yaffs """ def __init__(self, name): super(CYaffsFs, self).__init__(name, 'yaffs') mydebug('Init yaffs') #ramfs class class CRamFs(CRootFs): """ ramfs """ def __init__(self, name): super(CRamFs, self).__init__(name, 'ramfs') mydebug('Init ramfs') #cramfs class class CCramFs(CRootFs): """ cramfs """ def __init__(self, name): super(CCramFs, self).__init__(name, 'cramfs') mydebug('Init cramfs') #global variables define support_fs_tbl = { "yaffs":CYaffsFs, "ramfs":CRamFs, "cramfs":CCramFs, } #option table #if option has param,must follow char':' or '=' when long opt opt_short_tbl = 'hf:v' opt_long_tbl = ["help", "fstype="] #usage string for tips usage_str = '[options] -f fsname' + linesep +\ '\t-f, --fstype=name\tfilesystem types name' + linesep +\ '\t support list:' + str(support_fs_tbl.keys()) +linesep +\ '\t-v\t\t\tverbose mode' + linesep +\ '\t-h, --help\t\tprint this message' #is verbose mode debug = False #my debug fucntion def mydebug(*arglist, **argdict): global debug if not debug: return 0 for i in arglist: print i, print for i in argdict: print i, argdict[i], #virtual rootfs class class RootFs(object): """ rootfs """ def __init__(self, key, name): global support_fs_tbl self.key = key self.cls_tab = support_fs_tbl self.cls_name = self.cls_tab[key]; self.instance = self.cls_name(name) def dump(self, dump_name): print dump_name super(self.cls_name, self.instance).info() def usage(): global usage_str print 'usage:%s %s' % (sys.argv[0], usage_str) def main(): "main function for rootfs create dispatch" #print sys.argv #get argv count if len(sys.argv) < 2: print 'no options input.' usage() return 2 try: #parse command line options opts, args = getopt.getopt(sys.argv[1:], opt_short_tbl, opt_long_tbl) except getopt.GetoptError: print 'get options error.' usage() return 2 else: global debug fstype = "unsupport" for o, a in opts: if o == "-v": debug = True elif o in ("-h", "--help"): usage() sys.exit() elif o in ("-f", "--fstype"): fstype = a mydebug('input fstype=', a) else: pass if fstype not in support_fs_tbl.keys(): print 'unsupport fs type:%s.' % (fstype) usage() return 0 else: myrootfs = RootFs(fstype, "img") myrootfs.dump("elvon dump:") if __name__ == '__main__': main()
Python
#!/usr/bin/env python i = 1 + 2 * 4 print i, print 'test raw input' i = raw_input('pls input:\n') print int(i) print 'test while' count = 0 while count <= 10: print count count += 1 print 'test for' for i in range(11): print i print 'test if elif else' i = int(raw_input('input num\n')) if i > 0: print 'sign is +' elif i < 0: print 'sign is -' else: print 'num is 0' print 'test list OR array' numset = [1,2,3,4,5] total = 0 for i in range(5): total += numset[i] print 'total is', total for i in range(5): numset[i] = int(raw_input('pls input ' + str(i+1) + ' number\n')) print 'sum is', sum(numset) print 'average is', float(sum(numset))/5 print 'test x < y < x' while 1: if 1 <= int(raw_input('input a num between 1 - 100\n')) <= 100: break else: print 'error' print 'test sort' i = int(raw_input('input num 1\n')) j = int(raw_input('input num 2\n')) k = int(raw_input('input num 3\n')) count = 0 for count in range(2): if i > j: tmp = i i = j j = tmp if j > k: tmp = j j = k k = tmp print i, j, k print 'test Tkinter' import Tkinter top = Tkinter.Tk() hello = Tkinter.Label(top, text='hello world') hello.pack() quit = Tkinter.Button(top, text='Quit', command=top.quit, bg='red', fg='white') quit.pack(fill=Tkinter.X, expand=1) Tkinter.mainloop()
Python
#!/usr/bin/env python """ Network Monitoring System Server Mock """ import json, socket hash_number = 1; def register(params): answer = { 'command':'register' } global hash_number services='' client_hash = params['hash'] client_port = params['port'] client_service_list = params['service_list'] if client_hash == '': answer['status'] = 1 # 1 - means ok answer['hash'] = hash_number hash_number += 1 elif client_hash == -1: answer['status'] = 2 # 2 - force new hash answer['hash'] = '' else: answer['status'] = 0 # 0 - means hash reuse answer['hash'] = client_hash for i in client_service_list: services = services + i print (str(client_hash) + ';' + str(client_port) + ';' + services) return answer; # End of function def unsupported(): return { 'command' : 'unknown', 'status' : -1 } # End of funciton options = {} options['host'] = '' options['port'] = 5000 server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((options['host'],options['port'])) server.listen(5) print 'Network Monitoring Simple Server started' while 1: client, address = server.accept() clientData = client.recv(1024) question = json.loads(clientData) if question['command'] == 'register': response = register(question['params']) else: print "Unsupported command" response = unsupported() response = json.dumps(response) client.send(response) exit(0)
Python
#!/usr/bin/env python """ Network Monitoring System Server Mock """ import json, socket hash_number = 1; def register(params): answer = { 'command':'register' } global hash_number services='' client_hash = params['hash'] client_port = params['port'] client_service_list = params['service_list'] if client_hash == '': answer['status'] = 1 # 1 - means ok answer['hash'] = hash_number hash_number += 1 elif client_hash == -1: answer['status'] = 2 # 2 - force new hash answer['hash'] = '' else: answer['status'] = 0 # 0 - means hash reuse answer['hash'] = client_hash for i in client_service_list: services = services + i print (str(client_hash) + ';' + str(client_port) + ';' + services) return answer; # End of function def unsupported(): return { 'command' : 'unknown', 'status' : -1 } # End of funciton options = {} options['host'] = '' options['port'] = 5000 server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((options['host'],options['port'])) server.listen(5) print 'Network Monitoring Simple Server started' while 1: client, address = server.accept() clientData = client.recv(1024) question = json.loads(clientData) if question['command'] == 'register': response = register(question['params']) else: print "Unsupported command" response = unsupported() response = json.dumps(response) client.send(response) exit(0)
Python
#!/usr/bin/env python """ NetSpy Client by Marcin Ciechowicz for ZPR v0.01 """ import socket import json from subprocess import call import logging import argparse import os.path appName = 'NetSpyClient' scriptDir = 'scripts' logger = logging.getLogger(appName) def initLogger(): formatter = logging.Formatter('%(asctime)s %(levelname)s::%(message)s') hdlrFile = logging.FileHandler(appName + '.log') hdlrFile.setFormatter(formatter) hdlrStd = logging.StreamHandler() hdlrStd.setFormatter(formatter) logger.addHandler(hdlrFile) logger.addHandler(hdlrStd) logger.setLevel(logging.DEBUG) class Service: name = '' testerPath = '' def __init__(self, name, testerPath): """Creates service""" self.name = name self.testerPath = testerPath def getName(self): return self.name def getCheckerPath(self): return self.checkerPath def executeCheck(self): return call(testerPath,'1') class Config: options = {} def __init__(self, fileName = "netspy.conf"): """Initialize config from file """ self.options = {'port' : '', 'server_port' : '', 'server_ip': '', 'service_list': [] } self.fileName = fileName self.loadFromFile(self.fileName) def loadFromFile(self,fileName): try: logger.info("Reading config file: " + fileName) configFile = file(fileName,'r') line = configFile.readline() active_options = ('port','server_port','server_ip') while line != '' : tokens = line.strip().split(' ') if tokens[0] == 'service': if (tokens[1] == 'alive'): logger.warning("Service " + tokens[1] + " is already definied, please use different name") elif (os.path.isfile(scriptDir+'/'+tokens[2])): self.options['service_list'].append(Service(tokens[1],tokens[2])) logger.debug("New service added: " + tokens[1]) else: logger.warning("Service " + tokens[1] +" error: Can't find script : " + scriptDir + '/' + tokens[2]) logger.warning("Service " + tokens[1] +" creation failed") elif tokens[0] in active_options: self.options[tokens[0]]=tokens[1] logger.debug(tokens[0] + " set to " + tokens[1]) else: logger.warning("Unkown option " + tokens[0]) line = configFile.readline() configFile.close() except IOError: logger.error( "Can't read " + fileName) exit() def getPort(self): return self.options['port'] def getServerPort(self): return self.options['server_port'] def getServerIp(self): return self.options['server_ip'] def getServiceList(self): return self.options['service_list'] class ClientApp: config = '' def getHash(self): hashValue='' try: hashFile=file(".netspy.hash","r") hashValue = hashFile.readline() except IOError: logger.warining( "No hash found") finally: return hashValue def setHash(self,hashValue): """ Function doc """ if (hashValue!=''): try: hashFile=file(".netspy.hash","w") hashFile.write(hashValue) except IOError: logger.error( "Can't store hash value") exit(0) def setConfig(self, config): self.config = config def __init__(self,config=None): if config!=None: self.setConfig(config) def hashCheck(self,hashValue): return True def init(self): logger.info('Client - running ') def registerAtServer(self): try: server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.connect((self.config.getServerIp(), int(self.config.getServerPort()) )) except IOError: logger.error("Can't register at monitoring server - connection problem") return False service_list = []; for i in self.config.getServiceList(): service_list.append(i.getName()) service_list.append('alive') message = { 'command' : 'register', 'payload': {'hash' : self.getHash(), 'port' : self.config.getPort(), 'service_list' : service_list }} messageToSend = json.dumps(message) server.send(messageToSend) data = server.recv(1024) server.close() answer = json.loads(data) if (answer['command'] != 'register'): logger.error("Bad command type - expected 'register'") return False if (answer['payload']['status'] == 0): logger.info("Reusing old hash") return True elif (answer['payload']['status'] == 1): logger.info("Saving new hash: " + str(answer['payload']['hash'])) hashValue = answer['payload']['hash'] self.setHash(str(hashValue)) return True elif (answer['payload']['status'] == 2): clear = file('.netspy.hash','w') clear.write('') return False else: return False def performServiceCheck(self,message): try: question = json.loads(message) if question['command'] != 'check_status': logger.error("Unknown command '" + question['command'] + "'received from server") logger.error("No check performed") return else: logger.info("Performing check") resultList = [] alive = { 'alive' : 0 } resultList.append(alive) for service in self.config.getServiceList(): tmp = service.executeCheck() result = {service.getName() : tmp } resultList.append(result) answer = {'command' : 'check_status', 'payload' : {'check_result' : resultList }} return json.dumps(answer); except ValueError: logger.error("Unsupported command format") logger.error("No check performed") def run(self): logger.info("Client - registering at monitoring server") i=0 while (i<3 and not self.registerAtServer()): i=i+1 if (i==3): logger.error("Connect to monitoring server failed - can't connect to specified host") logger.error("Please check your config file") return logger.info("Client - register succesful") client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.bind(('',int(self.config.getPort()))) client.listen(5) logger.info('Client - waiting for commands from server') while 1: request, address = client.accept() message = request.recv(1024) answer = self.performServiceCheck(message) request.send(answer) request.close() def parseArgs(): parser = argparse.ArgumentParser(prog=appName, usage='%(prog)s [options]') parser.add_argument('--verbose','-v', action='store_false', help='verbose mode') parser.add_argument('--config','-c', action='store', help='config filename') args = parser.parse_args() if args.verbose == True: logger.setLevel(logging.ERROR) if args.config != None: return Config(args.config) else: return Config() #---------- main --------------------------# initLogger() client = ClientApp(parseArgs()) client.init() client.run() #-----------------------------------------#
Python
#!/usr/bin/env python """ NetSpy Client by Marcin Ciechowicz for ZPR v0.01 """ import socket import json from subprocess import call import logging import argparse import os.path appName = 'NetSpyClient' scriptDir = 'scripts' logger = logging.getLogger(appName) def initLogger(): formatter = logging.Formatter('%(asctime)s %(levelname)s::%(message)s') hdlrFile = logging.FileHandler(appName + '.log') hdlrFile.setFormatter(formatter) hdlrStd = logging.StreamHandler() hdlrStd.setFormatter(formatter) logger.addHandler(hdlrFile) logger.addHandler(hdlrStd) logger.setLevel(logging.DEBUG) class Service: name = '' testerPath = '' def __init__(self, name, testerPath): """Creates service""" self.name = name self.testerPath = testerPath def getName(self): return self.name def getCheckerPath(self): return self.checkerPath def executeCheck(self): return call(testerPath,'1') class Config: options = {} def __init__(self, fileName = "netspy.conf"): """Initialize config from file """ self.options = {'port' : '', 'server_port' : '', 'server_ip': '', 'service_list': [] } self.fileName = fileName self.loadFromFile(self.fileName) def loadFromFile(self,fileName): try: logger.info("Reading config file: " + fileName) configFile = file(fileName,'r') line = configFile.readline() active_options = ('port','server_port','server_ip') while line != '' : tokens = line.strip().split(' ') if tokens[0] == 'service': if (tokens[1] == 'alive'): logger.warning("Service " + tokens[1] + " is already definied, please use different name") elif (os.path.isfile(scriptDir+'/'+tokens[2])): self.options['service_list'].append(Service(tokens[1],tokens[2])) logger.debug("New service added: " + tokens[1]) else: logger.warning("Service " + tokens[1] +" error: Can't find script : " + scriptDir + '/' + tokens[2]) logger.warning("Service " + tokens[1] +" creation failed") elif tokens[0] in active_options: self.options[tokens[0]]=tokens[1] logger.debug(tokens[0] + " set to " + tokens[1]) else: logger.warning("Unkown option " + tokens[0]) line = configFile.readline() configFile.close() except IOError: logger.error( "Can't read " + fileName) exit() def getPort(self): return self.options['port'] def getServerPort(self): return self.options['server_port'] def getServerIp(self): return self.options['server_ip'] def getServiceList(self): return self.options['service_list'] class ClientApp: config = '' def getHash(self): hashValue='' try: hashFile=file(".netspy.hash","r") hashValue = hashFile.readline() except IOError: logger.warining( "No hash found") finally: return hashValue def setHash(self,hashValue): """ Function doc """ if (hashValue!=''): try: hashFile=file(".netspy.hash","w") hashFile.write(hashValue) except IOError: logger.error( "Can't store hash value") exit(0) def setConfig(self, config): self.config = config def __init__(self,config=None): if config!=None: self.setConfig(config) def hashCheck(self,hashValue): return True def init(self): logger.info('Client - running ') def registerAtServer(self): try: server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.connect((self.config.getServerIp(), int(self.config.getServerPort()) )) except IOError: logger.error("Can't register at monitoring server - connection problem") return False service_list = []; for i in self.config.getServiceList(): service_list.append(i.getName()) service_list.append('alive') message = { 'command' : 'register', 'payload': {'hash' : self.getHash(), 'port' : self.config.getPort(), 'service_list' : service_list }} messageToSend = json.dumps(message) server.send(messageToSend) data = server.recv(1024) server.close() answer = json.loads(data) if (answer['command'] != 'register'): logger.error("Bad command type - expected 'register'") return False if (answer['payload']['status'] == 0): logger.info("Reusing old hash") return True elif (answer['payload']['status'] == 1): logger.info("Saving new hash: " + str(answer['payload']['hash'])) hashValue = answer['payload']['hash'] self.setHash(str(hashValue)) return True elif (answer['payload']['status'] == 2): clear = file('.netspy.hash','w') clear.write('') return False else: return False def performServiceCheck(self,message): try: question = json.loads(message) if question['command'] != 'check_status': logger.error("Unknown command '" + question['command'] + "'received from server") logger.error("No check performed") return else: logger.info("Performing check") resultList = [] alive = { 'alive' : 0 } resultList.append(alive) for service in self.config.getServiceList(): tmp = service.executeCheck() result = {service.getName() : tmp } resultList.append(result) answer = {'command' : 'check_status', 'payload' : {'check_result' : resultList }} return json.dumps(answer); except ValueError: logger.error("Unsupported command format") logger.error("No check performed") def run(self): logger.info("Client - registering at monitoring server") i=0 while (i<3 and not self.registerAtServer()): i=i+1 if (i==3): logger.error("Connect to monitoring server failed - can't connect to specified host") logger.error("Please check your config file") return logger.info("Client - register succesful") client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.bind(('',int(self.config.getPort()))) client.listen(5) logger.info('Client - waiting for commands from server') while 1: request, address = client.accept() message = request.recv(1024) answer = self.performServiceCheck(message) request.send(answer) request.close() def parseArgs(): parser = argparse.ArgumentParser(prog=appName, usage='%(prog)s [options]') parser.add_argument('--verbose','-v', action='store_false', help='verbose mode') parser.add_argument('--config','-c', action='store', help='config filename') args = parser.parse_args() if args.verbose == True: logger.setLevel(logging.ERROR) if args.config != None: return Config(args.config) else: return Config() #---------- main --------------------------# initLogger() client = ClientApp(parseArgs()) client.init() client.run() #-----------------------------------------#
Python
#-------- DEFAULT --------------------------- flags = ['-O2','-Wall','-pedantic'] lib_boost = ['boost_system', 'boost_thread','boost_random','boost_program_options'] lib_cppcms = ['cppcms','cppdb'] libs = lib_boost + lib_cppcms env = Environment(CPPFLAGS=flags, LIBS=libs) netspy = 'netspy-server' dbtest = 'dbtest' sources = Split(""" main.cpp Message.cpp JSONParser.cpp ClientRegistrar.cpp RequestManager.cpp Config.cpp ClientChecker.cpp ClientManager.cpp DataProxy.cpp """) dbtestSources = Split(""" DataProxy.cpp Config.cpp DataTest.cpp """) targets = { netspy : sources, dbtest : dbtestSources } default = env.Program( target=netspy, source = env.Object(targets[netspy]) ) Default(default) env.Program(target=dbtest, source = env.Object(targets[dbtest]))
Python
#!/usr/bin/env python import gluon from gluon.fileutils import untar import os import sys def main(): path = gluon.__path__ out_path = os.getcwd() try: if sys.argv[1] and os.path.exists(sys.argv[1]):# To untar the web2py env to the selected path out_path = sys.argv[1] else: os.mkdir(sys.argv[1]) out_path = sys.argv[1] except: pass try: print "Creating a web2py env in: " + out_path untar(os.path.join(path[0],'env.tar'),out_path) except: print "Failed to create the web2py env" print "Please reinstall web2py from pip" if __name__ == '__main__': main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This is a WSGI handler for Apache Requires apache+mod_wsgi. In httpd.conf put something like: LoadModule wsgi_module modules/mod_wsgi.so WSGIScriptAlias / /path/to/wsgihandler.py """ # change these parameters as required LOGGING = False SOFTCRON = False import sys import os path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path] + [p for p in sys.path if not p == path] sys.stdout = sys.stderr import gluon.main if LOGGING: application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, logfilename='httpserver.log', profilerfilename=None) else: application = gluon.main.wsgibase if SOFTCRON: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft'
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This is a handler for lighttpd+fastcgi This file has to be in the PYTHONPATH Put something like this in the lighttpd.conf file: server.port = 8000 server.bind = '127.0.0.1' server.event-handler = 'freebsd-kqueue' server.modules = ('mod_rewrite', 'mod_fastcgi') server.error-handler-404 = '/test.fcgi' server.document-root = '/somewhere/web2py' server.errorlog = '/tmp/error.log' fastcgi.server = ('.fcgi' => ('localhost' => ('min-procs' => 1, 'socket' => '/tmp/fcgi.sock' ) ) ) """ LOGGING = False SOFTCRON = False import sys import os path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path] + [p for p in sys.path if not p == path] import gluon.main import gluon.contrib.gateways.fcgi as fcgi if LOGGING: application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, logfilename='httpserver.log', profilerfilename=None) else: application = gluon.main.wsgibase if SOFTCRON: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft' fcgi.WSGIServer(application, bindAddress='/tmp/fcgi.sock').run()
Python
def webapp_add_wsgi_middleware(app): from google.appengine.ext.appstats import recording app = recording.appstats_wsgi_middleware(app) return app
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ scgihandler.py - handler for SCGI protocol Modified by Michele Comitini <michele.comitini@glisco.it> from fcgihandler.py to support SCGI fcgihandler has the following copyright: " This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) " This is a handler for lighttpd+scgi This file has to be in the PYTHONPATH Put something like this in the lighttpd.conf file: server.document-root="/var/www/web2py/" # for >= linux-2.6 server.event-handler = "linux-sysepoll" url.rewrite-once = ( "^(/.+?/static/.+)$" => "/applications$1", "(^|/.*)$" => "/handler_web2py.scgi$1", ) scgi.server = ( "/handler_web2py.scgi" => ("handler_web2py" => ( "host" => "127.0.0.1", "port" => "4000", "check-local" => "disable", # don't forget to set "disable"! ) ) ) """ LOGGING = False SOFTCRON = False import sys import os path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path] + [p for p in sys.path if not p == path] import gluon.main # uncomment one of the two imports below depending on the SCGIWSGI server installed #import paste.util.scgiserver as scgi from wsgitools.scgi.forkpool import SCGIServer from wsgitools.filters import WSGIFilterMiddleware, GzipWSGIFilter wsgiapp = WSGIFilterMiddleware(gluon.main.wsgibase, GzipWSGIFilter) if LOGGING: application = gluon.main.appfactory(wsgiapp=wsgiapp, logfilename='httpserver.log', profilerfilename=None) else: application = wsgiapp if SOFTCRON: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft' # uncomment one of the two rows below depending on the SCGIWSGI server installed #scgi.serve_application(application, '', 4000).run() SCGIServer(application, port=4000).enable_sighandler().run()
Python
# -*- coding: utf-8 -*- # when web2py is run as a windows service (web2py.py -W) # it does not load the command line options but it # expects to find configuration settings in a file called # # web2py/options.py # # this file is an example for options.py import socket import os ip = '0.0.0.0' port = 80 interfaces = [('0.0.0.0', 80)] #,('0.0.0.0',443,'ssl_private_key.pem','ssl_certificate.pem')] password = '<recycle>' # ## <recycle> means use the previous password pid_filename = 'httpserver.pid' log_filename = 'httpserver.log' profiler_filename = None ssl_certificate = '' # 'ssl_certificate.pem' # ## path to certificate file ssl_private_key = '' # 'ssl_private_key.pem' # ## path to private key file #numthreads = 50 # ## deprecated; remove minthreads = None maxthreads = None server_name = socket.gethostname() request_queue_size = 5 timeout = 30 shutdown_timeout = 5 folder = os.getcwd() extcron = None nocron = None
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This file is based, although a rewrite, on MIT-licensed code from the Bottle web framework. """ import os import sys import optparse import urllib path = os.path.dirname(os.path.abspath(__file__)) os.chdir(path) sys.path = [path] + [p for p in sys.path if not p == path] class Servers: @staticmethod def cgi(app, address=None, **options): from wsgiref.handlers import CGIHandler CGIHandler().run(app) # Just ignore host and port here @staticmethod def flup(app, address, **options): import flup.server.fcgi flup.server.fcgi.WSGIServer(app, bindAddress=address).run() @staticmethod def wsgiref(app, address, **options): # pragma: no cover from wsgiref.simple_server import make_server, WSGIRequestHandler class QuietHandler(WSGIRequestHandler): def log_request(*args, **kw): pass options['handler_class'] = QuietHandler srv = make_server(address[0], address[1], app, **options) srv.serve_forever() @staticmethod def cherrypy(app, address, **options): from cherrypy import wsgiserver server = wsgiserver.CherryPyWSGIServer(address, app) server.start() @staticmethod def rocket(app, address, **options): from gluon.rocket import CherryPyWSGIServer server = CherryPyWSGIServer(address, app) server.start() @staticmethod def rocket_with_repoze_profiler(app, address, **options): from gluon.rocket import CherryPyWSGIServer from repoze.profile.profiler import AccumulatingProfileMiddleware from gluon.settings import global_settings global_settings.web2py_crontype = 'none' wrapped = AccumulatingProfileMiddleware( app, log_filename='wsgi.prof', discard_first_request=True, flush_at_shutdown=True, path='/__profile__' ) server = CherryPyWSGIServer(address, wrapped) server.start() @staticmethod def paste(app, address, **options): from paste import httpserver from paste.translogger import TransLogger httpserver.serve(app, host=address[0], port=address[1], **options) @staticmethod def fapws(app, address, **options): import fapws._evwsgi as evwsgi from fapws import base evwsgi.start(address[0], str(address[1])) evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return app(environ, start_response) evwsgi.wsgi_cb(('', app)) evwsgi.run() @staticmethod def gevent(app, address, **options): from gevent import pywsgi from gevent.pool import Pool pywsgi.WSGIServer(address, app, spawn='workers' in options and Pool( int(options.workers)) or 'default').serve_forever() @staticmethod def bjoern(app, address, **options): import bjoern bjoern.run(app, *address) @staticmethod def tornado(app, address, **options): import tornado.wsgi import tornado.httpserver import tornado.ioloop container = tornado.wsgi.WSGIContainer(app) server = tornado.httpserver.HTTPServer(container) server.listen(address=address[0], port=address[1]) tornado.ioloop.IOLoop.instance().start() @staticmethod def twisted(app, address, **options): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app)) reactor.listenTCP(address[1], factory, interface=address[0]) reactor.run() @staticmethod def diesel(app, address, **options): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(app, port=address[1]) app.run() @staticmethod def gunicorn(app, address, **options): from gunicorn.app.base import Application config = {'bind': "%s:%d" % address} config.update(options) sys.argv = ['anyserver.py'] class GunicornApplication(Application): def init(self, parser, opts, args): return config def load(self): return app g = GunicornApplication() g.run() @staticmethod def eventlet(app, address, **options): from eventlet import wsgi, listen wsgi.server(listen(address), app) @staticmethod def mongrel2(app, address, **options): import uuid sys.path.append(os.path.abspath(os.path.dirname(__file__))) from mongrel2 import handler conn = handler.Connection(str(uuid.uuid4()), "tcp://127.0.0.1:9997", "tcp://127.0.0.1:9996") mongrel2_handler(app, conn, debug=False) @staticmethod def motor(app, address, **options): #https://github.com/rpedroso/motor import motor app = motor.WSGIContainer(app) http_server = motor.HTTPServer(app) http_server.listen(address=address[0], port=address[1]) #http_server.start(2) motor.IOLoop.instance().start() @staticmethod def pulsar(app, address, **options): from pulsar.apps import wsgi sys.argv = ['anyserver.py'] s = wsgi.WSGIServer(callable=app, bind="%s:%d" % address) s.start() def run(servername, ip, port, softcron=True, logging=False, profiler=None): if servername == 'gevent': from gevent import monkey monkey.patch_all() import gluon.main if logging: application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, logfilename='httpserver.log', profilerfilename=profiler) else: application = gluon.main.wsgibase if softcron: from gluon.settings import global_settings global_settings.web2py_crontype = 'soft' getattr(Servers, servername)(application, (ip, int(port))) def mongrel2_handler(application, conn, debug=False): """ Based on : https://github.com/berry/Mongrel2-WSGI-Handler/blob/master/wsgi-handler.py WSGI handler based on the Python wsgiref SimpleHandler. A WSGI application should return a iterable op StringTypes. Any encoding must be handled by the WSGI application itself. """ from wsgiref.handlers import SimpleHandler try: import cStringIO as StringIO except: import StringIO # TODO - this wsgi handler executes the application and renders a page # in memory completely before returning it as a response to the client. # Thus, it does not "stream" the result back to the client. It should be # possible though. The SimpleHandler accepts file-like stream objects. So, # it should be just a matter of connecting 0MQ requests/response streams to # the SimpleHandler requests and response streams. However, the Python API # for Mongrel2 doesn't seem to support file-like stream objects for requests # and responses. Unless I have missed something. while True: if debug: print "WAITING FOR REQUEST" # receive a request req = conn.recv() if debug: print "REQUEST BODY: %r\n" % req.body if req.is_disconnect(): if debug: print "DISCONNECT" continue # effectively ignore the disconnect from the client # Set a couple of environment attributes a.k.a. header attributes # that are a must according to PEP 333 environ = req.headers environ['SERVER_PROTOCOL'] = 'HTTP/1.1' # SimpleHandler expects a server_protocol, lets assume it is HTTP 1.1 environ['REQUEST_METHOD'] = environ['METHOD'] if ':' in environ['Host']: environ['SERVER_NAME'] = environ['Host'].split(':')[0] environ['SERVER_PORT'] = environ['Host'].split(':')[1] else: environ['SERVER_NAME'] = environ['Host'] environ['SERVER_PORT'] = '' environ['SCRIPT_NAME'] = '' # empty for now environ['PATH_INFO'] = urllib.unquote(environ['PATH']) if '?' in environ['URI']: environ['QUERY_STRING'] = environ['URI'].split('?')[1] else: environ['QUERY_STRING'] = '' if 'Content-Length' in environ: environ['CONTENT_LENGTH'] = environ[ 'Content-Length'] # necessary for POST to work with Django environ['wsgi.input'] = req.body if debug: print "ENVIRON: %r\n" % environ # SimpleHandler needs file-like stream objects for # requests, errors and responses reqIO = StringIO.StringIO(req.body) errIO = StringIO.StringIO() respIO = StringIO.StringIO() # execute the application handler = SimpleHandler(reqIO, respIO, errIO, environ, multithread=False, multiprocess=False) handler.run(application) # Get the response and filter out the response (=data) itself, # the response headers, # the response status code and the response status description response = respIO.getvalue() response = response.split("\r\n") data = response[-1] headers = dict([r.split(": ") for r in response[1:-2]]) code = response[0][9:12] status = response[0][13:] # strip BOM's from response data # Especially the WSGI handler from Django seems to generate them (2 actually, huh?) # a BOM isn't really necessary and cause HTML parsing errors in Chrome and Safari # See also: http://www.xs4all.nl/~mechiel/projects/bomstrip/ # Although I still find this a ugly hack, it does work. data = data.replace('\xef\xbb\xbf', '') # Get the generated errors errors = errIO.getvalue() # return the response if debug: print "RESPONSE: %r\n" % response if errors: if debug: print "ERRORS: %r" % errors data = "%s\r\n\r\n%s" % (data, errors) conn.reply_http( req, data, code=code, status=status, headers=headers) def main(): usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P" try: version = open('VERSION','r') except IOError: version = '' parser = optparse.OptionParser(usage, None, optparse.Option, version) parser.add_option('-l', '--logging', action='store_true', default=False, dest='logging', help='log into httpserver.log') parser.add_option('-P', '--profiler', default=False, dest='profiler', help='profiler filename') servers = ', '.join(x for x in dir(Servers) if not x[0] == '_') parser.add_option('-s', '--server', default='rocket', dest='server', help='server name (%s)' % servers) parser.add_option('-i', '--ip', default='127.0.0.1', dest='ip', help='ip address') parser.add_option('-p', '--port', default='8000', dest='port', help='port number') parser.add_option('-w', '--workers', default='', dest='workers', help='number of workers number') (options, args) = parser.parse_args() print 'starting %s on %s:%s...' % ( options.server, options.ip, options.port) run(options.server, options.ip, options.port, logging=options.logging, profiler=options.profiler) if __name__ == '__main__': main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Usage: Install py2exe: http://sourceforge.net/projects/py2exe/files/ Copy script to the web2py directory c:\bin\python26\python build_windows_exe.py py2exe Adapted from http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/view/head:/static/scripts/tools/standalone_exe.py """ from distutils.core import setup import py2exe from gluon.import_all import base_modules, contributed_modules from gluon.fileutils import readlines_file from glob import glob import fnmatch import os import shutil import sys import re import zipfile #read web2py version from VERSION file web2py_version_line = readlines_file('VERSION')[0] #use regular expression to get just the version number v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+') web2py_version = v_re.search(web2py_version_line).group(0) #pull in preferences from config file import ConfigParser Config = ConfigParser.ConfigParser() Config.read('setup_exe.conf') remove_msft_dlls = Config.getboolean("Setup", "remove_microsoft_dlls") copy_apps = Config.getboolean("Setup", "copy_apps") copy_site_packages = Config.getboolean("Setup", "copy_site_packages") copy_scripts = Config.getboolean("Setup", "copy_scripts") make_zip = Config.getboolean("Setup", "make_zip") zip_filename = Config.get("Setup", "zip_filename") remove_build_files = Config.getboolean("Setup", "remove_build_files") # Python base version python_version = sys.version[:3] # List of modules deprecated in python2.6 that are in the above set py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter'] if python_version == '2.6': base_modules += ['json', 'multiprocessing'] base_modules = list(set(base_modules).difference(set(py26_deprecated))) #I don't know if this is even necessary if python_version == '2.6': # Python26 compatibility: http://www.py2exe.org/index.cgi/Tutorial#Step52 try: shutil.copytree('C:\Bin\Microsoft.VC90.CRT', 'dist/') except: print "You MUST copy Microsoft.VC90.CRT folder into the dist directory" setup( console=['web2py.py'], windows=[{'script':'web2py.py', 'dest_base':'web2py_no_console' # MUST NOT be just 'web2py' otherwise it overrides the standard web2py.exe }], name="web2py", version=web2py_version, description="web2py web framework", author="Massimo DiPierro", license="LGPL v3", data_files=[ 'ABOUT', 'LICENSE', 'VERSION', 'splashlogo.gif', 'logging.example.conf', 'options_std.py', 'app.example.yaml', 'queue.example.yaml' ], options={'py2exe': { 'packages': contributed_modules, 'includes': base_modules, }}, ) print "web2py binary successfully built" def copy_folders(source, destination): """Copy files & folders from source to destination (within dist/)""" if os.path.exists(os.path.join('dist', destination)): shutil.rmtree(os.path.join('dist', destination)) shutil.copytree(os.path.join(source), os.path.join('dist', destination)) #should we remove Windows OS dlls user is unlikely to be able to distribute if remove_msft_dlls: print "Deleted Microsoft files not licensed for open source distribution" print "You are still responsible for making sure you have the rights to distribute any other included files!" #delete the API-MS-Win-Core DLLs for f in glob('dist/API-MS-Win-*.dll'): os.unlink(f) #then delete some other files belonging to Microsoft other_ms_files = ['KERNELBASE.dll', 'MPR.dll', 'MSWSOCK.dll', 'POWRPROF.dll'] for f in other_ms_files: try: os.unlink(os.path.join('dist', f)) except: print "unable to delete dist/" + f #sys.exit(1) #Should we include applications? if copy_apps: copy_folders('applications', 'applications') print "Your application(s) have been added" else: #only copy web2py's default applications copy_folders('applications/admin', 'applications/admin') copy_folders('applications/welcome', 'applications/welcome') copy_folders('applications/examples', 'applications/examples') print "Only web2py's admin, examples & welcome applications have been added" #should we copy project's site-packages into dist/site-packages if copy_site_packages: #copy site-packages copy_folders('site-packages', 'site-packages') else: #no worries, web2py will create the (empty) folder first run print "Skipping site-packages" pass #should we copy project's scripts into dist/scripts if copy_scripts: #copy scripts copy_folders('scripts', 'scripts') else: #no worries, web2py will create the (empty) folder first run print "Skipping scripts" pass #borrowed from http://bytes.com/topic/python/answers/851018-how-zip-directory-python-using-zipfile def recursive_zip(zipf, directory, folder=""): for item in os.listdir(directory): if os.path.isfile(os.path.join(directory, item)): zipf.write(os.path.join(directory, item), folder + os.sep + item) elif os.path.isdir(os.path.join(directory, item)): recursive_zip( zipf, os.path.join(directory, item), folder + os.sep + item) #should we create a zip file of the build? if make_zip: #to keep consistent with how official web2py windows zip file is setup, #create a web2py folder & copy dist's files into it shutil.copytree('dist', 'zip_temp/web2py') #create zip file #use filename specified via command line zipf = zipfile.ZipFile( zip_filename + ".zip", "w", compression=zipfile.ZIP_DEFLATED) path = 'zip_temp' # just temp so the web2py directory is included in our zip file recursive_zip( zipf, path) # leave the first folder as None, as path is root. zipf.close() shutil.rmtree('zip_temp') print "Your Windows binary version of web2py can be found in " + \ zip_filename + ".zip" print "You may extract the archive anywhere and then run web2py/web2py.exe" #should py2exe build files be removed? if remove_build_files: shutil.rmtree('build') shutil.rmtree('deposit') shutil.rmtree('dist') print "py2exe build files removed" #final info if not make_zip and not remove_build_files: print "Your Windows binary & associated files can also be found in /dist" print "Finished!" print "Enjoy web2py " + web2py_version_line
Python
#!/usr/bin/env python import os import sys """ Author: Christopher Steel on behalf of Voice of Access Copyright: Copyrighted (c) by Massimo Di Pierro (2007-2013) web2py_clone becomes part of the web2py distribution available on Pypi via 'pip install web2py' web2py_clone is one of multiple commands that become available after running 'pip install web2py' in a virtual environment. It requires mercurial to be installed in the virtual environment. web2py_clone creates a local clone from the Web2py google code project in the directory "./web2py," a directory called web2py one directory up from the location of this script. ./bin/web2py_clone ./web2py """ def main(): iwd = cwd = os.getcwd() # set initial and current working directories script_filename = os.path.realpath(__file__) script_dirname = os.path.dirname(script_filename) try: print ("cwd now: %s" % cwd) except: print ("command failed %s" % cwd) try: os.chdir(script_dirname) cwd = os.getcwd() print ("cwd now: %s" % cwd) source = "https://code.google.com/p/web2py/" target = os.path.join('..','web2py') print ("attempting to clone %s" % source) print ("to %s" % target) if os.path.isdir(target): print ("found directory called web2py at %s" % target) print ("is web2py already installed?") print ("aborting clone attempt") else: os.system("hg clone %s %s" % (source,target)) os.chdir(iwd) # return to our initial working directory cwd = iwd # set current working directory except: print ("web2py-clone failed in second try statement %s" % cwd) if __name__ == '__main__': main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys path = os.getcwd() try: if sys.argv[1] and os.path.exists(sys.argv[1]): path = sys.argv[1] except: pass os.chdir(path) sys.path = [path]+[p for p in sys.path if not p==path] # import gluon.import_all ##### This should be uncommented for py2exe.py import gluon.widget def main(): # Start Web2py and Web2py cron service! gluon.widget.start(cron=True) if __name__ == '__main__': main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The widget is called from web2py. """ import datetime import sys import cStringIO import time import thread import threading import os import socket import signal import math import logging import newcron import getpass import main from fileutils import read_file, write_file, create_welcome_w2p from settings import global_settings from shell import run, test from utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo try: import Tkinter import tkMessageBox import contrib.taskbar_widget from winservice import register_service_handler, Web2pyService have_winservice = True except: have_winservice = False try: BaseException except NameError: BaseException = Exception ProgramName = 'web2py Web Framework' ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str( datetime.datetime.now().year) ProgramVersion = read_file('VERSION').strip() ProgramInfo = '''%s %s %s''' % (ProgramName, ProgramAuthor, ProgramVersion) if not sys.version[:3] in ['2.4', '2.5', '2.6', '2.7']: msg = 'Warning: web2py requires Python 2.4, 2.5 (recommended), 2.6 or 2.7 but you are running:\n%s' msg = msg % sys.version sys.stderr.write(msg) logger = logging.getLogger("web2py") def run_system_tests(options): """ Runs unittests for gluon.tests """ import subprocess major_version = sys.version_info[0] minor_version = sys.version_info[1] if major_version == 2: if minor_version in (5, 6): sys.stderr.write("Python 2.5 or 2.6\n") ret = subprocess.call(['unit2', '-v', 'gluon.tests']) elif minor_version in (7,): call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests'] if options.with_coverage: try: import coverage coverage_config = os.environ.get("COVERAGE_PROCESS_START", os.path.join('gluon', 'tests', 'coverage.ini') ) call_args = ['coverage', 'run', '--rcfile=%s' % coverage_config, '-m', 'unittest', '-v', 'gluon.tests'] except: sys.stderr.write('Coverage was not installed, skipping\n') sys.stderr.write("Python 2.7\n") ret = subprocess.call(call_args) else: sys.stderr.write("unknown python 2.x version\n") ret = 256 else: sys.stderr.write("Only Python 2.x supported.\n") ret = 256 sys.exit(ret and 1) class IO(object): """ """ def __init__(self): """ """ self.buffer = cStringIO.StringIO() def write(self, data): """ """ sys.__stdout__.write(data) if hasattr(self, 'callback'): self.callback(data) else: self.buffer.write(data) def get_url(host, path='/', proto='http', port=80): if ':' in host: host = '[%s]' % host else: host = host.replace('0.0.0.0', '127.0.0.1') if path.startswith('/'): path = path[1:] if proto.endswith(':'): proto = proto[:-1] if not port or port == 80: port = '' else: port = ':%s' % port return '%s://%s%s/%s' % (proto, host, port, path) def start_browser(url, startup=False): if startup: print 'please visit:' print '\t', url print 'starting browser...' try: import webbrowser webbrowser.open(url) except: print 'warning: unable to detect your browser' def presentation(root): """ Draw the splash screen """ root.withdraw() dx = root.winfo_screenwidth() dy = root.winfo_screenheight() dialog = Tkinter.Toplevel(root, bg='white') dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150)) dialog.overrideredirect(1) dialog.focus_force() canvas = Tkinter.Canvas(dialog, background='white', width=500, height=300) canvas.pack() root.update() logo = 'splashlogo.gif' if os.path.exists(logo): img = Tkinter.PhotoImage(file=logo) pnl = Tkinter.Label(canvas, image=img, background='white', bd=0) pnl.pack(side='top', fill='both', expand='yes') # Prevent garbage collection of img pnl.image = img def add_label(text='Change Me', font_size=12, foreground='#195866', height=1): return Tkinter.Label( master=canvas, width=250, height=height, text=text, font=('Helvetica', font_size), anchor=Tkinter.CENTER, foreground=foreground, background='white' ) add_label('Welcome to...').pack(side='top') add_label(ProgramName, 18, '#FF5C1F', 2).pack() add_label(ProgramAuthor).pack() add_label(ProgramVersion).pack() root.update() time.sleep(5) dialog.destroy() return class web2pyDialog(object): """ Main window dialog """ def __init__(self, root, options): """ web2pyDialog constructor """ root.title('web2py server') self.root = Tkinter.Toplevel(root) self.options = options self.scheduler_processes = {} self.menu = Tkinter.Menu(self.root) servermenu = Tkinter.Menu(self.menu, tearoff=0) httplog = os.path.join(self.options.folder, 'httpserver.log') iconphoto = 'web2py.gif' if os.path.exists(iconphoto): img = Tkinter.PhotoImage(file=iconphoto) self.root.tk.call('wm', 'iconphoto', self.root._w, img) # Building the Menu item = lambda: start_browser(httplog) servermenu.add_command(label='View httpserver.log', command=item) servermenu.add_command(label='Quit (pid:%i)' % os.getpid(), command=self.quit) self.menu.add_cascade(label='Server', menu=servermenu) self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0) self.menu.add_cascade(label='Pages', menu=self.pagesmenu) #scheduler menu self.schedmenu = Tkinter.Menu(self.menu, tearoff=0) self.menu.add_cascade(label='Scheduler', menu=self.schedmenu) #start and register schedulers from options self.update_schedulers(start=True) helpmenu = Tkinter.Menu(self.menu, tearoff=0) # Home Page item = lambda: start_browser('http://www.web2py.com/') helpmenu.add_command(label='Home Page', command=item) # About item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo) helpmenu.add_command(label='About', command=item) self.menu.add_cascade(label='Info', menu=helpmenu) self.root.config(menu=self.menu) if options.taskbar: self.root.protocol('WM_DELETE_WINDOW', lambda: self.quit(True)) else: self.root.protocol('WM_DELETE_WINDOW', self.quit) sticky = Tkinter.NW # IP Tkinter.Label(self.root, text='Server IP:', justify=Tkinter.LEFT).grid(row=0, column=0, sticky=sticky) self.ips = {} self.selected_ip = Tkinter.StringVar() row = 0 ips = [('127.0.0.1', 'Local (IPv4)')] + \ ([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \ [(ip, 'Public') for ip in options.ips] + \ [('0.0.0.0', 'Public')] for ip, legend in ips: self.ips[ip] = Tkinter.Radiobutton( self.root, text='%s (%s)' % (legend, ip), variable=self.selected_ip, value=ip) self.ips[ip].grid(row=row, column=1, sticky=sticky) if row == 0: self.ips[ip].select() row += 1 shift = row # Port Tkinter.Label(self.root, text='Server Port:', justify=Tkinter.LEFT).grid(row=shift, column=0, sticky=sticky) self.port_number = Tkinter.Entry(self.root) self.port_number.insert(Tkinter.END, self.options.port) self.port_number.grid(row=shift, column=1, sticky=sticky) # Password Tkinter.Label(self.root, text='Choose Password:', justify=Tkinter.LEFT).grid(row=shift + 1, column=0, sticky=sticky) self.password = Tkinter.Entry(self.root, show='*') self.password.bind('<Return>', lambda e: self.start()) self.password.focus_force() self.password.grid(row=shift + 1, column=1, sticky=sticky) # Prepare the canvas self.canvas = Tkinter.Canvas(self.root, width=300, height=100, bg='black') self.canvas.grid(row=shift + 2, column=0, columnspan=2) self.canvas.after(1000, self.update_canvas) # Prepare the frame frame = Tkinter.Frame(self.root) frame.grid(row=shift + 3, column=0, columnspan=2) # Start button self.button_start = Tkinter.Button(frame, text='start server', command=self.start) self.button_start.grid(row=0, column=0) # Stop button self.button_stop = Tkinter.Button(frame, text='stop server', command=self.stop) self.button_stop.grid(row=0, column=1) self.button_stop.configure(state='disabled') if options.taskbar: self.tb = contrib.taskbar_widget.TaskBarIcon() self.checkTaskBar() if options.password != '<ask>': self.password.insert(0, options.password) self.start() self.root.withdraw() else: self.tb = None def update_schedulers(self, start=False): apps = [] available_apps = [arq for arq in os.listdir('applications/')] available_apps = [arq for arq in available_apps if os.path.exists('applications/%s/models/scheduler.py' % arq)] if start: #the widget takes care of starting the scheduler if self.options.scheduler and self.options.with_scheduler: apps = [app.strip() for app in self.options.scheduler.split(',') if app in available_apps] for app in apps: self.try_start_scheduler(app) #reset the menu self.schedmenu.delete(0, len(available_apps)) for arq in available_apps: if arq not in self.scheduler_processes: item = lambda u = arq: self.try_start_scheduler(u) self.schedmenu.add_command(label="start %s" % arq, command=item) if arq in self.scheduler_processes: item = lambda u = arq: self.try_stop_scheduler(u) self.schedmenu.add_command(label="stop %s" % arq, command=item) def start_schedulers(self, app): try: from multiprocessing import Process except: sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n') return code = "from gluon import current;current._scheduler.loop()" print 'starting scheduler from widget for "%s"...' % app args = (app, True, True, None, False, code) logging.getLogger().setLevel(self.options.debuglevel) p = Process(target=run, args=args) self.scheduler_processes[app] = p self.update_schedulers() print "Currently running %s scheduler processes" % ( len(self.scheduler_processes)) p.start() print "Processes started" def try_stop_scheduler(self, app): if app in self.scheduler_processes: p = self.scheduler_processes[app] del self.scheduler_processes[app] p.terminate() p.join() self.update_schedulers() def try_start_scheduler(self, app): if app not in self.scheduler_processes: t = threading.Thread(target=self.start_schedulers, args=(app,)) t.start() def checkTaskBar(self): """ Check taskbar status """ if self.tb.status: if self.tb.status[0] == self.tb.EnumStatus.QUIT: self.quit() elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE: if self.root.state() == 'withdrawn': self.root.deiconify() else: self.root.withdraw() elif self.tb.status[0] == self.tb.EnumStatus.STOP: self.stop() elif self.tb.status[0] == self.tb.EnumStatus.START: self.start() elif self.tb.status[0] == self.tb.EnumStatus.RESTART: self.stop() self.start() del self.tb.status[0] self.root.after(1000, self.checkTaskBar) def update(self, text): """ Update app text """ try: self.text.configure(state='normal') self.text.insert('end', text) self.text.configure(state='disabled') except: pass # ## this should only happen in case app is destroyed def connect_pages(self): """ Connect pages """ #reset the menu available_apps = [arq for arq in os.listdir('applications/') if os.path.exists('applications/%s/__init__.py' % arq)] self.pagesmenu.delete(0, len(available_apps)) for arq in available_apps: url = self.url + arq self.pagesmenu.add_command(label=url, command=lambda u=url: start_browser(u)) def quit(self, justHide=False): """ Finish the program execution """ if justHide: self.root.withdraw() else: try: scheds = self.scheduler_processes.keys() for t in scheds: self.try_stop_scheduler(t) except: pass try: newcron.stopcron() except: pass try: self.server.stop() except: pass try: self.tb.Destroy() except: pass self.root.destroy() sys.exit(0) def error(self, message): """ Show error message """ tkMessageBox.showerror('web2py start server', message) def start(self): """ Start web2py server """ password = self.password.get() if not password: self.error('no password, no web admin interface') ip = self.selected_ip.get() if not is_valid_ip_address(ip): return self.error('invalid host ip address') try: port = int(self.port_number.get()) except: return self.error('invalid port number') # Check for non default value for ssl inputs if (len(self.options.ssl_certificate) > 0) or (len(self.options.ssl_private_key) > 0): proto = 'https' else: proto = 'http' self.url = get_url(ip, proto=proto, port=port) self.connect_pages() self.button_start.configure(state='disabled') try: options = self.options req_queue_size = options.request_queue_size self.server = main.HttpServer( ip, port, password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_filename=options.profiler_filename, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=req_queue_size, timeout=options.timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder, interfaces=options.interfaces) thread.start_new_thread(self.server.start, ()) except Exception, e: self.button_start.configure(state='normal') return self.error(str(e)) if not self.server_ready(): self.button_start.configure(state='normal') return self.button_stop.configure(state='normal') if not options.taskbar: thread.start_new_thread(start_browser, (get_url(ip, proto=proto, port=port), True)) self.password.configure(state='readonly') [ip.configure(state='disabled') for ip in self.ips.values()] self.port_number.configure(state='readonly') if self.tb: self.tb.SetServerRunning() def server_ready(self): for listener in self.server.server.listeners: if listener.ready: return True return False def stop(self): """ Stop web2py server """ self.button_start.configure(state='normal') self.button_stop.configure(state='disabled') self.password.configure(state='normal') [ip.configure(state='normal') for ip in self.ips.values()] self.port_number.configure(state='normal') self.server.stop() if self.tb: self.tb.SetServerStopped() def update_canvas(self): """ Update canvas """ try: t1 = os.path.getsize('httpserver.log') except: self.canvas.after(1000, self.update_canvas) return try: fp = open('httpserver.log', 'r') fp.seek(self.t0) data = fp.read(t1 - self.t0) fp.close() value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))] self.p0 = value for i in xrange(len(self.p0) - 1): c = self.canvas.coords(self.q0[i]) self.canvas.coords(self.q0[i], (c[0], self.p0[i], c[2], self.p0[i + 1])) self.t0 = t1 except BaseException: self.t0 = time.time() self.t0 = t1 self.p0 = [100] * 300 self.q0 = [self.canvas.create_line(i, 100, i + 1, 100, fill='green') for i in xrange(len(self.p0) - 1)] self.canvas.after(1000, self.update_canvas) def console(): """ Defines the behavior of the console web2py execution """ import optparse import textwrap usage = "python web2py.py" description = """\ web2py Web Framework startup script. ATTENTION: unless a password is specified (-a 'passwd') web2py will attempt to run a GUI. In this case command line options are ignored.""" description = textwrap.dedent(description) parser = optparse.OptionParser( usage, None, optparse.Option, ProgramVersion) parser.description = description msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); ' 'Note: This value is ignored when using the \'interfaces\' option.') parser.add_option('-i', '--ip', default='127.0.0.1', dest='ip', help=msg) parser.add_option('-p', '--port', default='8000', dest='port', type='int', help='port of server (8000)') msg = ('password to be used for administration ' '(use -a "<recycle>" to reuse the last password))') parser.add_option('-a', '--password', default='<ask>', dest='password', help=msg) parser.add_option('-c', '--ssl_certificate', default='', dest='ssl_certificate', help='file that contains ssl certificate') parser.add_option('-k', '--ssl_private_key', default='', dest='ssl_private_key', help='file that contains ssl private key') msg = ('Use this file containing the CA certificate to validate X509 ' 'certificates from clients') parser.add_option('--ca-cert', action='store', dest='ssl_ca_certificate', default=None, help=msg) parser.add_option('-d', '--pid_filename', default='httpserver.pid', dest='pid_filename', help='file to store the pid of the server') parser.add_option('-l', '--log_filename', default='httpserver.log', dest='log_filename', help='file to log connections') parser.add_option('-n', '--numthreads', default=None, type='int', dest='numthreads', help='number of threads (deprecated)') parser.add_option('--minthreads', default=None, type='int', dest='minthreads', help='minimum number of server threads') parser.add_option('--maxthreads', default=None, type='int', dest='maxthreads', help='maximum number of server threads') parser.add_option('-s', '--server_name', default=socket.gethostname(), dest='server_name', help='server name for the web server') msg = 'max number of queued requests when server unavailable' parser.add_option('-q', '--request_queue_size', default='5', type='int', dest='request_queue_size', help=msg) parser.add_option('-o', '--timeout', default='10', type='int', dest='timeout', help='timeout for individual request (10 seconds)') parser.add_option('-z', '--shutdown_timeout', default='5', type='int', dest='shutdown_timeout', help='timeout on shutdown of server (5 seconds)') parser.add_option('--socket-timeout', default=5, type='int', dest='socket_timeout', help='timeout for socket (5 second)') parser.add_option('-f', '--folder', default=os.getcwd(), dest='folder', help='folder from which to run web2py') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='increase --test verbosity') parser.add_option('-Q', '--quiet', action='store_true', dest='quiet', default=False, help='disable all output') msg = ('set debug output level (0-100, 0 means all, 100 means none; ' 'default is 30)') parser.add_option('-D', '--debug', dest='debuglevel', default=30, type='int', help=msg) msg = ('run web2py in interactive shell or IPython (if installed) with ' 'specified appname (if app does not exist it will be created). ' 'APPNAME like a/c/f (c,f optional)') parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME', help=msg) msg = ('run web2py in interactive shell or bpython (if installed) with ' 'specified appname (if app does not exist it will be created).\n' 'Use combined with --shell') parser.add_option('-B', '--bpython', action='store_true', default=False, dest='bpython', help=msg) msg = 'only use plain python shell; should be used with --shell option' parser.add_option('-P', '--plain', action='store_true', default=False, dest='plain', help=msg) msg = ('auto import model files; default is False; should be used ' 'with --shell option') parser.add_option('-M', '--import_models', action='store_true', default=False, dest='import_models', help=msg) msg = ('run PYTHON_FILE in web2py environment; ' 'should be used with --shell option') parser.add_option('-R', '--run', dest='run', metavar='PYTHON_FILE', default='', help=msg) msg = ('run scheduled tasks for the specified apps: expects a list of ' 'app names as -K app1,app2,app3 ' 'or a list of app:groups as -K app1:group1:group2,app2:group1 ' 'to override specific group_names. (only strings, no spaces ' 'allowed. Requires a scheduler defined in the models') parser.add_option('-K', '--scheduler', dest='scheduler', default=None, help=msg) msg = 'run schedulers alongside webserver, needs -K app1 and -a too' parser.add_option('-X', '--with-scheduler', action='store_true', default=False, dest='with_scheduler', help=msg) msg = ('run doctests in web2py environment; ' 'TEST_PATH like a/c/f (c,f optional)') parser.add_option('-T', '--test', dest='test', metavar='TEST_PATH', default=None, help=msg) parser.add_option('-W', '--winservice', dest='winservice', default='', help='-W install|start|stop as Windows service') msg = 'trigger a cron run manually; usually invoked from a system crontab' parser.add_option('-C', '--cron', action='store_true', dest='extcron', default=False, help=msg) msg = 'triggers the use of softcron' parser.add_option('--softcron', action='store_true', dest='softcron', default=False, help=msg) parser.add_option('-Y', '--run-cron', action='store_true', dest='runcron', default=False, help='start the background cron process') parser.add_option('-J', '--cronjob', action='store_true', dest='cronjob', default=False, help='identify cron-initiated command') parser.add_option('-L', '--config', dest='config', default='', help='config file') parser.add_option('-F', '--profiler', dest='profiler_filename', default=None, help='profiler filename') parser.add_option('-t', '--taskbar', action='store_true', dest='taskbar', default=False, help='use web2py gui and run in taskbar (system tray)') parser.add_option('', '--nogui', action='store_true', default=False, dest='nogui', help='text-only, no GUI') msg = ('should be followed by a list of arguments to be passed to script, ' 'to be used with -S, -A must be the last option') parser.add_option('-A', '--args', action='store', dest='args', default=None, help=msg) parser.add_option('--no-banner', action='store_true', default=False, dest='nobanner', help='Do not print header banner') msg = ('listen on multiple addresses: ' '"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." ' '(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in ' 'square [] brackets)') parser.add_option('--interfaces', action='store', dest='interfaces', default=None, help=msg) msg = 'runs web2py tests' parser.add_option('--run_system_tests', action='store_true', dest='run_system_tests', default=False, help=msg) msg = ('adds coverage reporting (needs --run_system_tests), ' 'python 2.7 and the coverage module installed. ' 'You can alter the default path setting the environmental ' 'var "COVERAGE_PROCESS_START". ' 'By default it takes gluon/tests/coverage.ini') parser.add_option('--with_coverage', action='store_true', dest='with_coverage', default=False, help=msg) if '-A' in sys.argv: k = sys.argv.index('-A') elif '--args' in sys.argv: k = sys.argv.index('--args') else: k = len(sys.argv) sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:] (options, args) = parser.parse_args() options.args = [options.run] + other_args global_settings.cmd_options = options global_settings.cmd_args = args try: options.ips = list(set( # no duplicates [addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn()) if not is_loopback_ip_address(addrinfo=addrinfo)])) except socket.gaierror: options.ips = [] if options.run_system_tests: run_system_tests(options) if options.quiet: capture = cStringIO.StringIO() sys.stdout = capture logger.setLevel(logging.CRITICAL + 1) else: logger.setLevel(options.debuglevel) if options.config[-3:] == '.py': options.config = options.config[:-3] if options.cronjob: global_settings.cronjob = True # tell the world options.plain = True # cronjobs use a plain shell options.nobanner = True options.nogui = True options.folder = os.path.abspath(options.folder) # accept --interfaces in the form # "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3" # (no spaces; optional key:cert indicate SSL) if isinstance(options.interfaces, str): interfaces = options.interfaces.split(';') options.interfaces = [] for interface in interfaces: if interface.startswith('['): # IPv6 ip, if_remainder = interface.split(']', 1) ip = ip[1:] if_remainder = if_remainder[1:].split(':') if_remainder[0] = int(if_remainder[0]) # numeric port options.interfaces.append(tuple([ip] + if_remainder)) else: # IPv4 interface = interface.split(':') interface[1] = int(interface[1]) # numeric port options.interfaces.append(tuple(interface)) # accepts --scheduler in the form # "app:group1,group2,app2:group1" scheduler = [] options.scheduler_groups = None if isinstance(options.scheduler, str): if ':' in options.scheduler: for opt in options.scheduler.split(','): scheduler.append(opt.split(':')) options.scheduler = ','.join([app[0] for app in scheduler]) options.scheduler_groups = scheduler if options.numthreads is not None and options.minthreads is None: options.minthreads = options.numthreads # legacy create_welcome_w2p() if not options.cronjob: # If we have the applications package or if we should upgrade if not os.path.exists('applications/__init__.py'): write_file('applications/__init__.py', '') return (options, args) def check_existent_app(options, appname): if os.path.isdir(os.path.join(options.folder, 'applications', appname)): return True def get_code_for_scheduler(app, options): if len(app) == 1 or app[1] is None: code = "from gluon import current;current._scheduler.loop()" else: code = "from gluon import current;current._scheduler.group_names = ['%s'];" code += "current._scheduler.loop()" code = code % ("','".join(app[1:])) app_ = app[0] if not check_existent_app(options, app_): print "Application '%s' doesn't exist, skipping" % (app_) return None, None return app_, code def start_schedulers(options): try: from multiprocessing import Process except: sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n') return processes = [] apps = [(app.strip(), None) for app in options.scheduler.split(',')] if options.scheduler_groups: apps = options.scheduler_groups code = "from gluon import current;current._scheduler.loop()" logging.getLogger().setLevel(options.debuglevel) if len(apps) == 1 and not options.with_scheduler: app_, code = get_code_for_scheduler(apps[0], options) if not app_: return print 'starting single-scheduler for "%s"...' % app_ run(app_, True, True, None, False, code) return for app in apps: app_, code = get_code_for_scheduler(app, options) if not app_: continue print 'starting scheduler for "%s"...' % app_ args = (app_, True, True, None, False, code) p = Process(target=run, args=args) processes.append(p) print "Currently running %s scheduler processes" % (len(processes)) p.start() ##to avoid bashing the db at the same time time.sleep(0.7) print "Processes started" for p in processes: try: p.join() except (KeyboardInterrupt, SystemExit): print "Processes stopped" except: p.terminate() p.join() def start(cron=True): """ Start server """ # ## get command line arguments (options, args) = console() if not options.nobanner: print ProgramName print ProgramAuthor print ProgramVersion from dal import DRIVERS if not options.nobanner: print 'Database drivers available: %s' % ', '.join(DRIVERS) # ## if -L load options from options.config file if options.config: try: options2 = __import__(options.config, {}, {}, '') except Exception: try: # Jython doesn't like the extra stuff options2 = __import__(options.config) except Exception: print 'Cannot import config file [%s]' % options.config sys.exit(1) for key in dir(options2): if hasattr(options, key): setattr(options, key, getattr(options2, key)) if False and not os.path.exists('logging.conf') and \ os.path.exists('logging.example.conf'): import shutil sys.stdout.write("Copying logging.conf.example to logging.conf ... ") shutil.copyfile('logging.example.conf', 'logging.conf') sys.stdout.write("OK\n") # ## if -T run doctests (no cron) if hasattr(options, 'test') and options.test: test(options.test, verbose=options.verbose) return # ## if -S start interactive shell (also no cron) if options.shell: if not options.args is None: sys.argv[:] = options.args run(options.shell, plain=options.plain, bpython=options.bpython, import_models=options.import_models, startfile=options.run) return # ## if -C start cron run (extcron) and exit # ## -K specifies optional apps list (overloading scheduler) if options.extcron: logger.debug('Starting extcron...') global_settings.web2py_crontype = 'external' if options.scheduler: # -K apps = [app.strip() for app in options.scheduler.split( ',') if check_existent_app(options, app.strip())] else: apps = None extcron = newcron.extcron(options.folder, apps=apps) extcron.start() extcron.join() return # ## if -K if options.scheduler and not options.with_scheduler: try: start_schedulers(options) except KeyboardInterrupt: pass return # ## if -W install/start/stop web2py as service if options.winservice: if os.name == 'nt': if have_winservice: register_service_handler( argv=['', options.winservice], opt_file=options.config, cls=Web2pyService) else: print 'Error: Missing python module winservice' sys.exit(1) else: print 'Error: Windows services not supported on this platform' sys.exit(1) return # ## if -H cron is enabled in this *process* # ## if --softcron use softcron # ## use hardcron in all other cases if cron and options.runcron and options.softcron: print 'Using softcron (but this is not very efficient)' global_settings.web2py_crontype = 'soft' elif cron and options.runcron: logger.debug('Starting hardcron...') global_settings.web2py_crontype = 'hard' newcron.hardcron(options.folder).start() # ## if no password provided and havetk start Tk interface # ## or start interface if we want to put in taskbar (system tray) try: options.taskbar except: options.taskbar = False if options.taskbar and os.name != 'nt': print 'Error: taskbar not supported on this platform' sys.exit(1) root = None if not options.nogui: try: import Tkinter havetk = True except ImportError: logger.warn( 'GUI not available because Tk library is not installed') havetk = False if options.password == '<ask>' and havetk or options.taskbar and havetk: try: root = Tkinter.Tk() except: pass if root: root.focus_force() # Mac OS X - make the GUI window rise to the top if os.path.exists("/usr/bin/osascript"): applescript = """ tell application "System Events" set proc to first process whose unix id is %d set frontmost of proc to true end tell """ % (os.getpid()) os.system("/usr/bin/osascript -e '%s'" % applescript) if not options.quiet: presentation(root) master = web2pyDialog(root, options) signal.signal(signal.SIGTERM, lambda a, b: master.quit()) try: root.mainloop() except: master.quit() sys.exit() # ## if no tk and no password, ask for a password if not root and options.password == '<ask>': options.password = getpass.getpass('choose a password:') if not options.password and not options.nobanner: print 'no password, no admin interface' # ##-X (if no tk, the widget takes care of it himself) if not root and options.scheduler and options.with_scheduler: t = threading.Thread(target=start_schedulers, args=(options,)) t.start() # ## start server # Use first interface IP and port if interfaces specified, since the # interfaces option overrides the IP (and related) options. if not options.interfaces: (ip, port) = (options.ip, int(options.port)) else: first_if = options.interfaces[0] (ip, port) = first_if[0], first_if[1] # Check for non default value for ssl inputs if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0): proto = 'https' else: proto = 'http' url = get_url(ip, proto=proto, port=port) if not options.nobanner: print 'please visit:' print '\t', url print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid() server = main.HttpServer(ip=ip, port=port, password=options.password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_filename=options.profiler_filename, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, ssl_ca_certificate=options.ssl_ca_certificate, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=options.request_queue_size, timeout=options.timeout, socket_timeout=options.socket_timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder, interfaces=options.interfaces) try: server.start() except KeyboardInterrupt: server.stop() try: t.join() except: pass logging.shutdown()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- # portalocker.py # Cross-platform (posix/nt) API for flock-style file locking. # Requires python 1.5.2 or better. """ Cross-platform (posix/nt) API for flock-style file locking. Synopsis: import portalocker file = open(\"somefile\", \"r+\") portalocker.lock(file, portalocker.LOCK_EX) file.seek(12) file.write(\"foo\") file.close() If you know what you're doing, you may choose to portalocker.unlock(file) before closing the file, but why? Methods: lock( file, flags ) unlock( file ) Constants: LOCK_EX LOCK_SH LOCK_NB I learned the win32 technique for locking files from sample code provided by John Nielsen <nielsenjf@my-deja.com> in the documentation that accompanies the win32 modules. Author: Jonathan Feinberg <jdf@pobox.com> Version: $Id: portalocker.py,v 1.3 2001/05/29 18:47:55 Administrator Exp $ """ import logging import platform logger = logging.getLogger("web2py") os_locking = None try: import google.appengine os_locking = 'gae' except: try: import fcntl os_locking = 'posix' except: try: import win32con import win32file import pywintypes os_locking = 'windows' except: pass if os_locking == 'windows': LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK LOCK_SH = 0 # the default LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # is there any reason not to reuse the following structure? __overlapped = pywintypes.OVERLAPPED() def lock(file, flags): hfile = win32file._get_osfhandle(file.fileno()) win32file.LockFileEx(hfile, flags, 0, 0x7fff0000, __overlapped) def unlock(file): hfile = win32file._get_osfhandle(file.fileno()) win32file.UnlockFileEx(hfile, 0, 0x7fff0000, __overlapped) elif os_locking == 'posix': LOCK_EX = fcntl.LOCK_EX LOCK_SH = fcntl.LOCK_SH LOCK_NB = fcntl.LOCK_NB def lock(file, flags): fcntl.flock(file.fileno(), flags) def unlock(file): fcntl.flock(file.fileno(), fcntl.LOCK_UN) else: if platform.system() == 'Windows': logger.error('no file locking, you must install the win32 extensions from: http://sourceforge.net/projects/pywin32/files/') elif os_locking != 'gae': logger.debug('no file locking, this will cause problems') LOCK_EX = None LOCK_SH = None LOCK_NB = None def lock(file, flags): pass def unlock(file): pass class LockedFile(object): def __init__(self, filename, mode='rb'): self.filename = filename self.mode = mode self.file = None if 'r' in mode: self.file = open(filename, mode) lock(self.file, LOCK_SH) elif 'w' in mode or 'a' in mode: self.file = open(filename, mode.replace('w', 'a')) lock(self.file, LOCK_EX) if not 'a' in mode: self.file.seek(0) self.file.truncate() else: raise RuntimeError("invalid LockedFile(...,mode)") def read(self, size=None): return self.file.read() if size is None else self.file.read(size) def readline(self): return self.file.readline() def readlines(self): return self.file.readlines() def write(self, data): self.file.write(data) self.file.flush() def close(self): if not self.file is None: unlock(self.file) self.file.close() self.file = None def __del__(self): if not self.file is None: self.close() def read_locked(filename): fp = LockedFile(filename, 'r') data = fp.read() fp.close() return data def write_locked(filename, data): fp = LockedFile(filename, 'w') data = fp.write(data) fp.close() if __name__ == '__main__': f = LockedFile('test.txt', mode='wb') f.write('test ok') f.close() f = LockedFile('test.txt', mode='rb') sys.stdout.write(f.read()+'\n') f.close()
Python
""" This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import os import sys import socket import platform from storage import Storage global_settings = Storage() settings = global_settings # legacy compatibility if not hasattr(os, 'mkdir'): global_settings.db_sessions = True if global_settings.db_sessions is not True: global_settings.db_sessions = set() global_settings.gluon_parent = \ os.environ.get('web2py_path', os.getcwd()) global_settings.applications_parent = global_settings.gluon_parent global_settings.app_folders = set() global_settings.debugging = False global_settings.is_pypy = \ hasattr(platform, 'python_implementation') and \ platform.python_implementation() == 'PyPy' global_settings.is_jython = \ 'java' in sys.platform.lower() or \ hasattr(sys, 'JYTHON_JAR') or \ str(sys.copyright).find('Jython') > 0
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import re __all__ = ['HTTP', 'redirect'] defined_status = { 200: 'OK', 201: 'CREATED', 202: 'ACCEPTED', 203: 'NON-AUTHORITATIVE INFORMATION', 204: 'NO CONTENT', 205: 'RESET CONTENT', 206: 'PARTIAL CONTENT', 301: 'MOVED PERMANENTLY', 302: 'FOUND', 303: 'SEE OTHER', 304: 'NOT MODIFIED', 305: 'USE PROXY', 307: 'TEMPORARY REDIRECT', 400: 'BAD REQUEST', 401: 'UNAUTHORIZED', 403: 'FORBIDDEN', 404: 'NOT FOUND', 405: 'METHOD NOT ALLOWED', 406: 'NOT ACCEPTABLE', 407: 'PROXY AUTHENTICATION REQUIRED', 408: 'REQUEST TIMEOUT', 409: 'CONFLICT', 410: 'GONE', 411: 'LENGTH REQUIRED', 412: 'PRECONDITION FAILED', 413: 'REQUEST ENTITY TOO LARGE', 414: 'REQUEST-URI TOO LONG', 415: 'UNSUPPORTED MEDIA TYPE', 416: 'REQUESTED RANGE NOT SATISFIABLE', 417: 'EXPECTATION FAILED', 422: 'UNPROCESSABLE ENTITY', 500: 'INTERNAL SERVER ERROR', 501: 'NOT IMPLEMENTED', 502: 'BAD GATEWAY', 503: 'SERVICE UNAVAILABLE', 504: 'GATEWAY TIMEOUT', 505: 'HTTP VERSION NOT SUPPORTED', } # If web2py is executed with python2.4 we need # to use Exception instead of BaseException try: BaseException except NameError: BaseException = Exception regex_status = re.compile('^\d{3} \w+$') class HTTP(BaseException): def __init__( self, status, body='', cookies=None, status_message='', **headers ): self.status = status self.body = body self.headers = headers self.cookies2headers(cookies) self.status_message = status_message def cookies2headers(self, cookies): if cookies and len(cookies) > 0: self.headers['Set-Cookie'] = [ str(cookie)[11:] for cookie in cookies.values()] def to(self, responder, env=None): env = env or {} status = self.status headers = self.headers status_message = status if status in defined_status: if status_message: status = str(status) + ' ' + str(status_message) else: status = '%d %s' % (status, defined_status[status]) else: status = str(status) + ' ' + status_message if not regex_status.match(status): status = '500 %s' % (defined_status[500]) headers.setdefault('Content-Type', 'text/html; charset=UTF-8') body = self.body if status[:1] == '4': if not body: body = status if isinstance(body, str): headers['Content-Length'] = len(body) rheaders = [] for k, v in headers.iteritems(): if isinstance(v, list): rheaders += [(k, str(item)) for item in v] elif not v is None: rheaders.append((k, str(v))) responder(status, rheaders) if env.get('request_method', '') == 'HEAD': return [''] elif isinstance(body, str): return [body] elif hasattr(body, '__iter__'): return body else: return [str(body)] @property def message(self): """ compose a message describing this exception "status defined_status [web2py_error]" message elements that are not defined are omitted """ msg = '%(status)d' status_message = '' if self.status_message: status_message = self.status_message elif self.status in defined_status: status_message = defined_status.get(self.status) if status_message: msg = '%(status)d %(defined_status)s' if 'web2py_error' in self.headers: msg += ' [%(web2py_error)s]' return msg % dict(status=self.status, defined_status=status_message, web2py_error=self.headers.get('web2py_error')) def __str__(self): "stringify me" return self.message def redirect(location='', how=303, client_side=False): if location: from gluon import current loc = location.replace('\r', '%0D').replace('\n', '%0A') if client_side and current.request.ajax: raise HTTP(200, **{'web2py-redirect-location': loc}) else: raise HTTP(how, 'You are being redirected <a href="%s">here</a>' % loc, Location=loc) else: from gluon import current if client_side and current.request.ajax: raise HTTP(200, **{'web2py-component-command': 'window.location.reload(true)'})
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>, limodou <limodou@gmail.com> and srackham <srackham@gmail.com>. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import logging import os import pdb import Queue import sys logger = logging.getLogger("web2py") class Pipe(Queue.Queue): def __init__(self, name, mode='r', *args, **kwargs): self.__name = name Queue.Queue.__init__(self, *args, **kwargs) def write(self, data): logger.debug("debug %s writting %s" % (self.__name, data)) self.put(data) def flush(self): # mark checkpoint (complete message) logger.debug("debug %s flushing..." % self.__name) self.put(None) # wait until it is processed self.join() logger.debug("debug %s flush done" % self.__name) def read(self, count=None, timeout=None): logger.debug("debug %s reading..." % (self.__name, )) data = self.get(block=True, timeout=timeout) # signal that we are ready self.task_done() logger.debug("debug %s read %s" % (self.__name, data)) return data def readline(self): logger.debug("debug %s readline..." % (self.__name, )) return self.read() pipe_in = Pipe('in') pipe_out = Pipe('out') debugger = pdb.Pdb(completekey=None, stdin=pipe_in, stdout=pipe_out,) def set_trace(): "breakpoint shortcut (like pdb)" logger.info("DEBUG: set_trace!") debugger.set_trace(sys._getframe().f_back) def stop_trace(): "stop waiting for the debugger (called atexit)" # this should prevent communicate is wait forever a command result # and the main thread has finished logger.info("DEBUG: stop_trace!") pipe_out.write("debug finished!") pipe_out.write(None) #pipe_out.flush() def communicate(command=None): "send command to debbuger, wait result" if command is not None: logger.info("DEBUG: sending command %s" % command) pipe_in.write(command) #pipe_in.flush() result = [] while True: data = pipe_out.read() if data is None: break result.append(data) logger.info("DEBUG: result %s" % repr(result)) return ''.join(result) # New debugger implementation using qdb and a web UI import gluon.contrib.qdb as qdb from threading import RLock interact_lock = RLock() run_lock = RLock() def check_interaction(fn): "Decorator to clean and prevent interaction when not available" def check_fn(self, *args, **kwargs): interact_lock.acquire() try: if self.filename: self.clear_interaction() return fn(self, *args, **kwargs) finally: interact_lock.release() return check_fn class WebDebugger(qdb.Frontend): "Qdb web2py interface" def __init__(self, pipe, completekey='tab', stdin=None, stdout=None): qdb.Frontend.__init__(self, pipe) self.clear_interaction() def clear_interaction(self): self.filename = None self.lineno = None self.exception_info = None self.context = None # redefine Frontend methods: def run(self): run_lock.acquire() try: while self.pipe.poll(): qdb.Frontend.run(self) finally: run_lock.release() def interaction(self, filename, lineno, line, **context): # store current status interact_lock.acquire() try: self.filename = filename self.lineno = lineno self.context = context finally: interact_lock.release() def exception(self, title, extype, exvalue, trace, request): self.exception_info = {'title': title, 'extype': extype, 'exvalue': exvalue, 'trace': trace, 'request': request} @check_interaction def do_continue(self): qdb.Frontend.do_continue(self) @check_interaction def do_step(self): qdb.Frontend.do_step(self) @check_interaction def do_return(self): qdb.Frontend.do_return(self) @check_interaction def do_next(self): qdb.Frontend.do_next(self) @check_interaction def do_quit(self): qdb.Frontend.do_quit(self) def do_exec(self, statement): interact_lock.acquire() try: # check to see if we're inside interaction if self.filename: # avoid spurious interaction notifications: self.set_burst(2) # execute the statement in the remote debugger: return qdb.Frontend.do_exec(self, statement) finally: interact_lock.release() # create the connection between threads: parent_queue, child_queue = Queue.Queue(), Queue.Queue() front_conn = qdb.QueuePipe("parent", parent_queue, child_queue) child_conn = qdb.QueuePipe("child", child_queue, parent_queue) web_debugger = WebDebugger(front_conn) # frontend qdb_debugger = qdb.Qdb( pipe=child_conn, redirect_stdio=False, skip=None) # backend dbg = qdb_debugger # enable getting context (stack, globals/locals) at interaction qdb_debugger.set_params(dict(call_stack=True, environment=True)) import gluon.main gluon.main.global_settings.debugging = True
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This file specifically includes utilities for security. """ import threading import struct import hashlib import hmac import uuid import random import time import os import re import sys import logging import socket import base64 import zlib python_version = sys.version_info[0] if python_version == 2: import cPickle as pickle else: import pickle try: from Crypto.Cipher import AES except ImportError: import contrib.aes as AES try: from contrib.pbkdf2 import pbkdf2_hex HAVE_PBKDF2 = True except ImportError: try: from .pbkdf2 import pbkdf2_hex HAVE_PBKDF2 = True except (ImportError, ValueError): HAVE_PBKDF2 = False logger = logging.getLogger("web2py") def AES_new(key, IV=None): """ Returns an AES cipher object and random IV if None specified """ if IV is None: IV = fast_urandom16() return AES.new(key, AES.MODE_CBC, IV), IV def compare(a, b): """ compares two strings and not vulnerable to timing attacks """ if len(a) != len(b): return False result = 0 for x, y in zip(a, b): result |= ord(x) ^ ord(y) return result == 0 def md5_hash(text): """ Generate a md5 hash with the given text """ return hashlib.md5(text).hexdigest() def simple_hash(text, key='', salt='', digest_alg='md5'): """ Generates hash with the given text using the specified digest hashing algorithm """ if not digest_alg: raise RuntimeError("simple_hash with digest_alg=None") elif not isinstance(digest_alg, str): # manual approach h = digest_alg(text + key + salt) elif digest_alg.startswith('pbkdf2'): # latest and coolest! iterations, keylen, alg = digest_alg[7:-1].split(',') return pbkdf2_hex(text, salt, int(iterations), int(keylen), get_digest(alg)) elif key: # use hmac digest_alg = get_digest(digest_alg) h = hmac.new(key + salt, text, digest_alg) else: # compatible with third party systems h = hashlib.new(digest_alg) h.update(text + salt) return h.hexdigest() def get_digest(value): """ Returns a hashlib digest algorithm from a string """ if not isinstance(value, str): return value value = value.lower() if value == "md5": return hashlib.md5 elif value == "sha1": return hashlib.sha1 elif value == "sha224": return hashlib.sha224 elif value == "sha256": return hashlib.sha256 elif value == "sha384": return hashlib.sha384 elif value == "sha512": return hashlib.sha512 else: raise ValueError("Invalid digest algorithm: %s" % value) DIGEST_ALG_BY_SIZE = { 128 / 4: 'md5', 160 / 4: 'sha1', 224 / 4: 'sha224', 256 / 4: 'sha256', 384 / 4: 'sha384', 512 / 4: 'sha512', } def pad(s, n=32, padchar=' '): return s + (32 - len(s) % 32) * padchar def secure_dumps(data, encryption_key, hash_key=None, compression_level=None): if not hash_key: hash_key = hashlib.sha1(encryption_key).hexdigest() dump = pickle.dumps(data) if compression_level: dump = zlib.compress(dump, compression_level) key = pad(encryption_key[:32]) cipher, IV = AES_new(key) encrypted_data = base64.urlsafe_b64encode(IV + cipher.encrypt(pad(dump))) signature = hmac.new(hash_key, encrypted_data).hexdigest() return signature + ':' + encrypted_data def secure_loads(data, encryption_key, hash_key=None, compression_level=None): if not ':' in data: return None if not hash_key: hash_key = hashlib.sha1(encryption_key).hexdigest() signature, encrypted_data = data.split(':', 1) actual_signature = hmac.new(hash_key, encrypted_data).hexdigest() if not compare(signature, actual_signature): return None key = pad(encryption_key[:32]) encrypted_data = base64.urlsafe_b64decode(encrypted_data) IV, encrypted_data = encrypted_data[:16], encrypted_data[16:] cipher, _ = AES_new(key, IV=IV) try: data = cipher.decrypt(encrypted_data) data = data.rstrip(' ') if compression_level: data = zlib.decompress(data) return pickle.loads(data) except (TypeError, pickle.UnpicklingError): return None ### compute constant CTOKENS def initialize_urandom(): """ This function and the web2py_uuid follow from the following discussion: http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09 At startup web2py compute a unique ID that identifies the machine by adding uuid.getnode() + int(time.time() * 1e3) This is a 48-bit number. It converts the number into 16 8-bit tokens. It uses this value to initialize the entropy source ('/dev/urandom') and to seed random. If os.random() is not supported, it falls back to using random and issues a warning. """ node_id = uuid.getnode() microseconds = int(time.time() * 1e6) ctokens = [((node_id + microseconds) >> ((i % 6) * 8)) % 256 for i in range(16)] random.seed(node_id + microseconds) try: os.urandom(1) have_urandom = True try: # try to add process-specific entropy frandom = open('/dev/urandom', 'wb') try: if python_version == 2: frandom.write(''.join(chr(t) for t in ctokens)) # python 2 else: frandom.write(bytes([]).join(bytes([t]) for t in ctokens)) # python 3 finally: frandom.close() except IOError: # works anyway pass except NotImplementedError: have_urandom = False logger.warning( """Cryptographically secure session management is not possible on your system because your system does not provide a cryptographically secure entropy source. This is not specific to web2py; consider deploying on a different operating system.""") if python_version == 2: packed = ''.join(chr(x) for x in ctokens) # python 2 else: packed = bytes([]).join(bytes([x]) for x in ctokens) # python 3 unpacked_ctokens = struct.unpack('=QQ', packed) return unpacked_ctokens, have_urandom UNPACKED_CTOKENS, HAVE_URANDOM = initialize_urandom() def fast_urandom16(urandom=[], locker=threading.RLock()): """ this is 4x faster than calling os.urandom(16) and prevents the "too many files open" issue with concurrent access to os.urandom() """ try: return urandom.pop() except IndexError: try: locker.acquire() ur = os.urandom(16 * 1024) urandom += [ur[i:i + 16] for i in xrange(16, 1024 * 16, 16)] return ur[0:16] finally: locker.release() def web2py_uuid(ctokens=UNPACKED_CTOKENS): """ This function follows from the following discussion: http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09 It works like uuid.uuid4 except that tries to use os.urandom() if possible and it XORs the output with the tokens uniquely associated with this machine. """ rand_longs = (random.getrandbits(64), random.getrandbits(64)) if HAVE_URANDOM: urand_longs = struct.unpack('=QQ', fast_urandom16()) byte_s = struct.pack('=QQ', rand_longs[0] ^ urand_longs[0] ^ ctokens[0], rand_longs[1] ^ urand_longs[1] ^ ctokens[1]) else: byte_s = struct.pack('=QQ', rand_longs[0] ^ ctokens[0], rand_longs[1] ^ ctokens[1]) return str(uuid.UUID(bytes=byte_s, version=4)) REGEX_IPv4 = re.compile('(\d+)\.(\d+)\.(\d+)\.(\d+)') def is_valid_ip_address(address): """ >>> is_valid_ip_address('127.0') False >>> is_valid_ip_address('127.0.0.1') True >>> is_valid_ip_address('2001:660::1') True """ # deal with special cases if address.lower() in ('127.0.0.1', 'localhost', '::1', '::ffff:127.0.0.1'): return True elif address.lower() in ('unknown', ''): return False elif address.count('.') == 3: # assume IPv4 if address.startswith('::ffff:'): address = address[7:] if hasattr(socket, 'inet_aton'): # try validate using the OS try: socket.inet_aton(address) return True except socket.error: # invalid address return False else: # try validate using Regex match = REGEX_IPv4.match(address) if match and all(0 <= int(match.group(i)) < 256 for i in (1, 2, 3, 4)): return True return False elif hasattr(socket, 'inet_pton'): # assume IPv6, try using the OS try: socket.inet_pton(socket.AF_INET6, address) return True except socket.error: # invalid address return False else: # do not know what to do? assume it is a valid address return True def is_loopback_ip_address(ip=None, addrinfo=None): """ Determines whether the address appears to be a loopback address. This assumes that the IP is valid. """ if addrinfo: # see socket.getaddrinfo() for layout of addrinfo tuple if addrinfo[0] == socket.AF_INET or addrinfo[0] == socket.AF_INET6: ip = addrinfo[4] if not isinstance(ip, basestring): return False # IPv4 or IPv6-embedded IPv4 or IPv4-compatible IPv6 if ip.count('.') == 3: return ip.lower().startswith(('127', '::127', '0:0:0:0:0:0:127', '::ffff:127', '0:0:0:0:0:ffff:127')) return ip == '::1' or ip == '0:0:0:0:0:0:0:1' # IPv6 loopback def getipaddrinfo(host): """ Filter out non-IP and bad IP addresses from getaddrinfo """ try: return [addrinfo for addrinfo in socket.getaddrinfo(host, None) if (addrinfo[0] == socket.AF_INET or addrinfo[0] == socket.AF_INET6) and isinstance(addrinfo[4][0], basestring)] except socket.error: return []
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Provides: - List; like list but returns None instead of IndexOutOfBounds - Storage; like dictionary allowing also for `obj.foo` for `obj['foo']` """ import cPickle import portalocker __all__ = ['List', 'Storage', 'Settings', 'Messages', 'StorageList', 'load_storage', 'save_storage'] DEFAULT = lambda:0 class Storage(dict): """ A Storage object is like a dictionary except `obj.foo` can be used in addition to `obj['foo']`, and setting obj.foo = None deletes item foo. >>> o = Storage(a=1) >>> print o.a 1 >>> o['a'] 1 >>> o.a = 2 >>> print o['a'] 2 >>> del o.a >>> print o.a None """ __slots__ = () __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ __getitem__ = dict.get __getattr__ = dict.get __repr__ = lambda self: '<Storage %s>' % dict.__repr__(self) # http://stackoverflow.com/questions/5247250/why-does-pickle-getstate-accept-as-a-return-value-the-very-instance-it-requi __getstate__ = lambda self: None __copy__ = lambda self: Storage(self) def getlist(self, key): """ Return a Storage value as a list. If the value is a list it will be returned as-is. If object is None, an empty list will be returned. Otherwise, [value] will be returned. Example output for a query string of ?x=abc&y=abc&y=def >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getlist('x') ['abc'] >>> request.vars.getlist('y') ['abc', 'def'] >>> request.vars.getlist('z') [] """ value = self.get(key, []) if value is None or isinstance(value, (list, tuple)): return value else: return [value] def getfirst(self, key, default=None): """ Return the first or only value when given a request.vars-style key. If the value is a list, its first item will be returned; otherwise, the value will be returned as-is. Example output for a query string of ?x=abc&y=abc&y=def >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getfirst('x') 'abc' >>> request.vars.getfirst('y') 'abc' >>> request.vars.getfirst('z') """ values = self.getlist(key) return values[0] if values else default def getlast(self, key, default=None): """ Returns the last or only single value when given a request.vars-style key. If the value is a list, the last item will be returned; otherwise, the value will be returned as-is. Simulated output with a query string of ?x=abc&y=abc&y=def >>> request = Storage() >>> request.vars = Storage() >>> request.vars.x = 'abc' >>> request.vars.y = ['abc', 'def'] >>> request.vars.getlast('x') 'abc' >>> request.vars.getlast('y') 'def' >>> request.vars.getlast('z') """ values = self.getlist(key) return values[-1] if values else default PICKABLE = (str, int, long, float, bool, list, dict, tuple, set) class StorageList(Storage): """ like Storage but missing elements default to [] instead of None """ def __getitem__(self, key): return self.__getattr__(key) def __getattr__(self, key): if key in self: return getattr(self, key) else: r = [] setattr(self, key, r) return r def load_storage(filename): fp = None try: fp = portalocker.LockedFile(filename, 'rb') storage = cPickle.load(fp) finally: if fp: fp.close() return Storage(storage) def save_storage(storage, filename): fp = None try: fp = portalocker.LockedFile(filename, 'wb') cPickle.dump(dict(storage), fp) finally: if fp: fp.close() class Settings(Storage): def __setattr__(self, key, value): if key != 'lock_keys' and self['lock_keys'] and key not in self: raise SyntaxError('setting key \'%s\' does not exist' % key) if key != 'lock_values' and self['lock_values']: raise SyntaxError('setting value cannot be changed: %s' % key) self[key] = value class Messages(Settings): def __init__(self, T): Storage.__init__(self, T=T) def __getattr__(self, key): value = self[key] if isinstance(value, str): return str(self.T(value)) return value class FastStorage(dict): """ Eventually this should replace class Storage but causes memory leak because of http://bugs.python.org/issue1469629 >>> s = FastStorage() >>> s.a = 1 >>> s.a 1 >>> s['a'] 1 >>> s.b >>> s['b'] >>> s['b']=2 >>> s['b'] 2 >>> s.b 2 >>> isinstance(s,dict) True >>> dict(s) {'a': 1, 'b': 2} >>> dict(FastStorage(s)) {'a': 1, 'b': 2} >>> import pickle >>> s = pickle.loads(pickle.dumps(s)) >>> dict(s) {'a': 1, 'b': 2} >>> del s.b >>> del s.a >>> s.a >>> s.b >>> s['a'] >>> s['b'] """ def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) self.__dict__ = self def __getattr__(self, key): return getattr(self, key) if key in self else None def __getitem__(self, key): return dict.get(self, key, None) def copy(self): self.__dict__ = {} s = FastStorage(self) self.__dict__ = self return s def __repr__(self): return '<Storage %s>' % dict.__repr__(self) def __getstate__(self): return dict(self) def __setstate__(self, sdict): dict.__init__(self, sdict) self.__dict__ = self def update(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) self.__dict__ = self class List(list): """ Like a regular python list but a[i] if i is out of bounds return None instead of IndexOutOfBounds """ def __call__(self, i, default=DEFAULT, cast=None, otherwise=None): """ request.args(0,default=0,cast=int,otherwise='http://error_url') request.args(0,default=0,cast=int,otherwise=lambda:...) """ n = len(self) if 0 <= i < n or -n <= i < 0: value = self[i] elif default is DEFAULT: value = None else: value, cast = default, False if cast: try: value = cast(value) except (ValueError, TypeError): from http import HTTP, redirect if otherwise is None: raise HTTP(404) elif isinstance(otherwise, str): redirect(otherwise) elif callable(otherwise): return otherwise() else: raise RuntimeError("invalid otherwise") return value if __name__ == '__main__': import doctest doctest.testmod()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import re # pattern to find defined tables regex_tables = re.compile( """^[\w]+\.define_table\(\s*[\'\"](?P<name>\w+)[\'\"]""", flags=re.M) # pattern to find exposed functions in controller regex_expose = re.compile( '^def\s+(?P<name>_?[a-zA-Z0-9]\w*)\( *\)\s*:', flags=re.M) regex_include = re.compile( '(?P<all>\{\{\s*include\s+[\'"](?P<name>[^\'"]*)[\'"]\s*\}\})') regex_extend = re.compile( '^\s*(?P<all>\{\{\s*extend\s+[\'"](?P<name>[^\'"]+)[\'"]\s*\}\})', re.MULTILINE)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Created by Vladyslav Kozlovskyy (Ukraine) <dbdevelop©gmail.com> for Web2py project Utilities and class for UTF8 strings managing =========================================== """ import __builtin__ __all__ = ['Utf8'] repr_escape_tab = {} for i in range(1, 32): repr_escape_tab[i] = ur'\x%02x' % i repr_escape_tab[7] = u'\\a' repr_escape_tab[8] = u'\\b' repr_escape_tab[9] = u'\\t' repr_escape_tab[10] = u'\\n' repr_escape_tab[11] = u'\\v' repr_escape_tab[12] = u'\\f' repr_escape_tab[13] = u'\\r' repr_escape_tab[ord('\\')] = u'\\\\' repr_escape_tab2 = repr_escape_tab.copy() repr_escape_tab2[ord('\'')] = u"\\'" def sort_key(s): """ Unicode Collation Algorithm (UCA) (http://www.unicode.org/reports/tr10/) is used for utf-8 and unicode strings sorting and for utf-8 strings comparison NOTE: pyuca is a very memory cost module! It loads the whole "allkey.txt" file (~2mb!) into the memory. But this functionality is needed only when sort_key() is called as a part of sort() function or when Utf8 strings are compared. So, it is a lazy "sort_key" function which (ONLY ONCE, ON ITS FIRST CALL) imports pyuca and replaces itself with a real sort_key() function """ global sort_key try: from contrib.pyuca import unicode_collator unicode_sort_key = unicode_collator.sort_key sort_key = lambda s: unicode_sort_key( unicode(s, 'utf-8') if isinstance(s, str) else s) except: sort_key = lambda s: ( unicode(s, 'utf-8') if isinstance(s, str) else s).lower() return sort_key(s) def ord(char): """ returns unicode id for utf8 or unicode *char* character SUPPOSE that *char* is an utf-8 or unicode character only """ if isinstance(char, unicode): return __builtin__.ord(char) return __builtin__.ord(unicode(char, 'utf-8')) def chr(code): """ return utf8-character with *code* unicode id """ return Utf8(unichr(code)) def size(string): """ return length of utf-8 string in bytes NOTE! The length of correspondent utf-8 string is returned for unicode string """ return Utf8(string).__size__() def truncate(string, length, dots='...'): """ returns string of length < *length* or truncate string with adding *dots* suffix to the string's end args: length (int): max length of string dots (str or unicode): string suffix, when string is cutted returns: (utf8-str): original or cutted string """ text = unicode(string, 'utf-8') dots = unicode(dots, 'utf-8') if isinstance(dots, str) else dots if len(text) > length: text = text[:length - len(dots)] + dots return str.__new__(Utf8, text.encode('utf-8')) class Utf8(str): """ Class for utf8 string storing and manipulations The base presupposition of this class usage is: "ALL strings in the application are either of utf-8 or unicode type, even when simple str type is used. UTF-8 is only a "packed" version of unicode, so Utf-8 and unicode strings are interchangeable." CAUTION! This class is slower than str/unicode! Do NOT use it inside intensive loops. Simply decode string(s) to unicode before loop and encode it back to utf-8 string(s) after intensive calculation. You can see the benefit of this class in doctests() below """ def __new__(cls, content='', codepage='utf-8'): if isinstance(content, unicode): return str.__new__(cls, unicode.encode(content, 'utf-8')) elif codepage in ('utf-8', 'utf8') or isinstance(content, cls): return str.__new__(cls, content) else: return str.__new__(cls, unicode(content, codepage).encode('utf-8')) def __repr__(self): r''' # note that we use raw strings to avoid having to use double back slashes below NOTE! This function is a clone of web2py:gluon.languages.utf_repl() function utf8.__repr__() works same as str.repr() when processing ascii string >>> repr(Utf8('abc')) == repr(Utf8("abc")) == repr('abc') == repr("abc") == "'abc'" True >>> repr(Utf8('a"b"c')) == repr('a"b"c') == '\'a"b"c\'' True >>> repr(Utf8("a'b'c")) == repr("a'b'c") == '"a\'b\'c"' True >>> repr(Utf8('a\'b"c')) == repr('a\'b"c') == repr(Utf8("a'b\"c")) == repr("a'b\"c") == '\'a\\\'b"c\'' True >>> repr(Utf8('a\r\nb')) == repr('a\r\nb') == "'a\\r\\nb'" # Test for \r, \n True Unlike str.repr(), Utf8.__repr__() remains utf8 content when processing utf8 string >>> repr(Utf8('中文字')) == repr(Utf8("中文字")) == "'中文字'" != repr('中文字') True >>> repr(Utf8('中"文"字')) == "'中\"文\"字'" != repr('中"文"字') True >>> repr(Utf8("中'文'字")) == '"中\'文\'字"' != repr("中'文'字") True >>> repr(Utf8('中\'文"字')) == repr(Utf8("中'文\"字")) == '\'中\\\'文"字\'' != repr('中\'文"字') == repr("中'文\"字") True >>> repr(Utf8('中\r\n文')) == "'中\\r\\n文'" != repr('中\r\n文') # Test for \r, \n True ''' if str.find(self, "'") >= 0 and str.find(self, '"') < 0: # only single quote exists return '"' + unicode(self, 'utf-8').translate(repr_escape_tab).encode('utf-8') + '"' else: return "'" + unicode(self, 'utf-8').translate(repr_escape_tab2).encode('utf-8') + "'" def __size__(self): """ length of utf-8 string in bytes """ return str.__len__(self) def __contains__(self, other): return str.__contains__(self, Utf8(other)) def __getitem__(self, index): return str.__new__(Utf8, unicode(self, 'utf-8')[index].encode('utf-8')) def __getslice__(self, begin, end): return str.__new__(Utf8, unicode(self, 'utf-8')[begin:end].encode('utf-8')) def __add__(self, other): return str.__new__(Utf8, str.__add__(self, unicode.encode(other, 'utf-8') if isinstance(other, unicode) else other)) def __len__(self): return len(unicode(self, 'utf-8')) def __mul__(self, integer): return str.__new__(Utf8, str.__mul__(self, integer)) def __eq__(self, string): return str.__eq__(self, Utf8(string)) def __ne__(self, string): return str.__ne__(self, Utf8(string)) def capitalize(self): return str.__new__(Utf8, unicode(self, 'utf-8').capitalize().encode('utf-8')) def center(self, length): return str.__new__(Utf8, unicode(self, 'utf-8').center(length).encode('utf-8')) def upper(self): return str.__new__(Utf8, unicode(self, 'utf-8').upper().encode('utf-8')) def lower(self): return str.__new__(Utf8, unicode(self, 'utf-8').lower().encode('utf-8')) def title(self): return str.__new__(Utf8, unicode(self, 'utf-8').title().encode('utf-8')) def index(self, string): return unicode(self, 'utf-8').index(string if isinstance(string, unicode) else unicode(string, 'utf-8')) def isalnum(self): return unicode(self, 'utf-8').isalnum() def isalpha(self): return unicode(self, 'utf-8').isalpha() def isdigit(self): return unicode(self, 'utf-8').isdigit() def islower(self): return unicode(self, 'utf-8').islower() def isspace(self): return unicode(self, 'utf-8').isspace() def istitle(self): return unicode(self, 'utf-8').istitle() def isupper(self): return unicode(self, 'utf-8').isupper() def zfill(self, length): return str.__new__(Utf8, unicode(self, 'utf-8').zfill(length).encode('utf-8')) def join(self, iter): return str.__new__(Utf8, str.join(self, [Utf8(c) for c in list(unicode(iter, 'utf-8') if isinstance(iter, str) else iter)])) def lstrip(self, chars=None): return str.__new__(Utf8, str.lstrip(self, None if chars is None else Utf8(chars))) def rstrip(self, chars=None): return str.__new__(Utf8, str.rstrip(self, None if chars is None else Utf8(chars))) def strip(self, chars=None): return str.__new__(Utf8, str.strip(self, None if chars is None else Utf8(chars))) def swapcase(self): return str.__new__(Utf8, unicode(self, 'utf-8').swapcase().encode('utf-8')) def count(self, sub, start=0, end=None): unistr = unicode(self, 'utf-8') return unistr.count( unicode(sub, 'utf-8') if isinstance(sub, str) else sub, start, len(unistr) if end is None else end) def decode(self, encoding='utf-8', errors='strict'): return str.decode(self, encoding, errors) def encode(self, encoding, errors='strict'): return unicode(self, 'utf-8').encode(encoding, errors) def expandtabs(self, tabsize=8): return str.__new__(Utf8, unicode(self, 'utf-8').expandtabs(tabsize).encode('utf-8')) def find(self, sub, start=None, end=None): return unicode(self, 'utf-8').find(unicode(sub, 'utf-8') if isinstance(sub, str) else sub, start, end) def ljust(self, width, fillchar=' '): return str.__new__(Utf8, unicode(self, 'utf-8').ljust(width, unicode(fillchar, 'utf-8') if isinstance(fillchar, str) else fillchar).encode('utf-8')) def partition(self, sep): (head, sep, tail) = str.partition(self, Utf8(sep)) return (str.__new__(Utf8, head), str.__new__(Utf8, sep), str.__new__(Utf8, tail)) def replace(self, old, new, count=-1): return str.__new__(Utf8, str.replace(self, Utf8(old), Utf8(new), count)) def rfind(self, sub, start=None, end=None): return unicode(self, 'utf-8').rfind(unicode(sub, 'utf-8') if isinstance(sub, str) else sub, start, end) def rindex(self, string): return unicode(self, 'utf-8').rindex(string if isinstance(string, unicode) else unicode(string, 'utf-8')) def rjust(self, width, fillchar=' '): return str.__new__(Utf8, unicode(self, 'utf-8').rjust(width, unicode(fillchar, 'utf-8') if isinstance(fillchar, str) else fillchar).encode('utf-8')) def rpartition(self, sep): (head, sep, tail) = str.rpartition(self, Utf8(sep)) return (str.__new__(Utf8, head), str.__new__(Utf8, sep), str.__new__(Utf8, tail)) def rsplit(self, sep=None, maxsplit=-1): return [str.__new__(Utf8, part) for part in str.rsplit(self, None if sep is None else Utf8(sep), maxsplit)] def split(self, sep=None, maxsplit=-1): return [str.__new__(Utf8, part) for part in str.split(self, None if sep is None else Utf8(sep), maxsplit)] def splitlines(self, keepends=False): return [str.__new__(Utf8, part) for part in str.splitlines(self, keepends)] def startswith(self, prefix, start=0, end=None): unistr = unicode(self, 'utf-8') if isinstance(prefix, tuple): prefix = tuple(unicode( s, 'utf-8') if isinstance(s, str) else s for s in prefix) elif isinstance(prefix, str): prefix = unicode(prefix, 'utf-8') return unistr.startswith(prefix, start, len(unistr) if end is None else end) def translate(self, table, deletechars=''): if isinstance(table, dict): return str.__new__(Utf8, unicode(self, 'utf-8').translate(table).encode('utf-8')) else: return str.__new__(Utf8, str.translate(self, table, deletechars)) def endswith(self, prefix, start=0, end=None): unistr = unicode(self, 'utf-8') if isinstance(prefix, tuple): prefix = tuple(unicode( s, 'utf-8') if isinstance(s, str) else s for s in prefix) elif isinstance(prefix, str): prefix = unicode(prefix, 'utf-8') return unistr.endswith(prefix, start, len(unistr) if end is None else end) if hasattr(str, 'format'): # Python 2.5 hasn't got str.format() method def format(self, *args, **kwargs): args = [unicode( s, 'utf-8') if isinstance(s, str) else s for s in args] kwargs = dict((unicode(k, 'utf-8') if isinstance(k, str) else k, unicode(v, 'utf-8') if isinstance(v, str) else v) for k, v in kwargs.iteritems()) return str.__new__(Utf8, unicode(self, 'utf-8'). format(*args, **kwargs).encode('utf-8')) def __mod__(self, right): if isinstance(right, tuple): right = tuple(unicode(v, 'utf-8') if isinstance(v, str) else v for v in right) elif isinstance(right, dict): right = dict((unicode(k, 'utf-8') if isinstance(k, str) else k, unicode(v, 'utf-8') if isinstance(v, str) else v) for k, v in right.iteritems()) elif isinstance(right, str): right = unicode(right, 'utf-8') return str.__new__(Utf8, unicode(self, 'utf-8').__mod__(right).encode('utf-8')) def __ge__(self, string): return sort_key(self) >= sort_key(string) def __gt__(self, string): return sort_key(self) > sort_key(string) def __le__(self, string): return sort_key(self) <= sort_key(string) def __lt__(self, string): return sort_key(self) < sort_key(string) if __name__ == '__main__': def doctests(): u""" doctests: >>> test_unicode=u'ПРоба Є PRobe' >>> test_unicode_word=u'ПРоба' >>> test_number_str='12345' >>> test_unicode u'\\u041f\\u0420\\u043e\\u0431\\u0430 \\u0404 PRobe' >>> print test_unicode ПРоба Є PRobe >>> test_word=test_unicode_word.encode('utf-8') >>> test_str=test_unicode.encode('utf-8') >>> s=Utf8(test_str) >>> s 'ПРоба Є PRobe' >>> type(s) <class '__main__.Utf8'> >>> s == test_str True >>> len(test_str) # wrong length of utf8-string! 19 >>> len(test_unicode) # RIGHT! 13 >>> len(s) # RIGHT! 13 >>> size(test_str) # size of utf-8 string (in bytes) == len(str) 19 >>> size(test_unicode) # size of unicode string in bytes (packed to utf-8 string) 19 >>> size(s) # size of utf-8 string in bytes 19 >>> try: # utf-8 is a multibyte string. Convert it to unicode for use with builtin ord() ... __builtin__.ord('б') # ascii string ... except Exception, e: ... print 'Exception:', e Exception: ord() expected a character, but string of length 2 found >>> ord('б') # utf8.ord() is used(!!!) 1073 >>> ord(u'б') # utf8.ord() is used(!!!) 1073 >>> ord(s[3]) # utf8.ord() is used(!!!) 1073 >>> chr(ord(s[3])) # utf8.chr() and utf8.chr() is used(!!!) 'б' >>> type(chr(1073)) # utf8.chr() is used(!!!) <class '__main__.Utf8'> >>> s=Utf8(test_unicode) >>> s 'ПРоба Є PRobe' >>> s == test_str True >>> test_str == s True >>> s == test_unicode True >>> test_unicode == s True >>> print test_str.upper() # only ASCII characters uppered ПРоба Є PROBE >>> print test_unicode.upper() # unicode gives right result ПРОБА Є PROBE >>> s.upper() # utf8 class use unicode.upper() 'ПРОБА Є PROBE' >>> type(s.upper()) <class '__main__.Utf8'> >>> s.lower() 'проба є probe' >>> type(s.lower()) <class '__main__.Utf8'> >>> s.capitalize() 'Проба є probe' >>> type(s.capitalize()) <class '__main__.Utf8'> >>> len(s) 13 >>> len(test_unicode) 13 >>> s+'. Probe is проба' 'ПРоба Є PRobe. Probe is проба' >>> type(s+'. Probe is проба') <class '__main__.Utf8'> >>> s+u'. Probe is проба' 'ПРоба Є PRobe. Probe is проба' >>> type(s+u'. Probe is проба') <class '__main__.Utf8'> >>> s+s 'ПРоба Є PRobeПРоба Є PRobe' >>> type(s+s) <class '__main__.Utf8'> >>> a=s >>> a+=s >>> a+=test_unicode >>> a+=test_str >>> a 'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe' >>> type(a) <class '__main__.Utf8'> >>> s*3 'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe' >>> type(s*3) <class '__main__.Utf8'> >>> a=Utf8("-проба-") >>> a*=10 >>> a '-проба--проба--проба--проба--проба--проба--проба--проба--проба--проба-' >>> type(a) <class '__main__.Utf8'> >>> print "'"+test_str.center(17)+"'" # WRONG RESULT! 'ПРоба Є PRobe' >>> s.center(17) # RIGHT! ' ПРоба Є PRobe ' >>> type(s.center(17)) <class '__main__.Utf8'> >>> (test_word+test_number_str).isalnum() # WRONG RESULT! non ASCII chars are detected as non alpha False >>> Utf8(test_word+test_number_str).isalnum() True >>> s.isalnum() False >>> test_word.isalpha() # WRONG RESULT! Non ASCII characters are detected as non alpha False >>> Utf8(test_word).isalpha() # RIGHT! True >>> s.lower().islower() True >>> s.upper().isupper() True >>> print test_str.zfill(17) # WRONG RESULT! ПРоба Є PRobe >>> s.zfill(17) # RIGHT! '0000ПРоба Є PRobe' >>> type(s.zfill(17)) <class '__main__.Utf8'> >>> s.istitle() False >>> s.title().istitle() True >>> Utf8('1234').isdigit() True >>> Utf8(' \t').isspace() True >>> s.join('•|•') '•ПРоба Є PRobe|ПРоба Є PRobe•' >>> s.join((str('(utf8 тест1)'), unicode('(unicode тест2)','utf-8'), '(ascii test3)')) '(utf8 тест1)ПРоба Є PRobe(unicode тест2)ПРоба Є PRobe(ascii test3)' >>> type(s) <class '__main__.Utf8'> >>> s==test_str True >>> s==test_unicode True >>> s.swapcase() 'прОБА є prOBE' >>> type(s.swapcase()) <class '__main__.Utf8'> >>> truncate(s, 10) 'ПРоба Є...' >>> truncate(s, 20) 'ПРоба Є PRobe' >>> truncate(s, 10, '•••') # utf-8 string as *dots* 'ПРоба Є•••' >>> truncate(s, 10, u'®') # you can use unicode string as *dots* 'ПРоба Є P®' >>> type(truncate(s, 10)) <class '__main__.Utf8'> >>> Utf8(s.encode('koi8-u'), 'koi8-u') 'ПРоба Є PRobe' >>> s.decode() # convert utf-8 string to unicode u'\\u041f\\u0420\\u043e\\u0431\\u0430 \\u0404 PRobe' >>> a='про\\tba' >>> str_tmp=a.expandtabs() >>> utf8_tmp=Utf8(a).expandtabs() >>> utf8_tmp.replace(' ','.') # RIGHT! (default tabsize is 8) 'про.....ba' >>> utf8_tmp.index('b') 8 >>> print "'"+str_tmp.replace(' ','.')+"'" # WRONG STRING LENGTH! 'про..ba' >>> str_tmp.index('b') # WRONG index of 'b' character 8 >>> print "'"+a.expandtabs(4).replace(' ','.')+"'" # WRONG RESULT! 'про..ba' >>> Utf8(a).expandtabs(4).replace(' ','.') # RIGHT! 'про.ba' >>> s.find('Є') 6 >>> s.find(u'Є') 6 >>> s.find(' ', 6) 7 >>> s.rfind(' ') 7 >>> s.partition('Є') ('ПРоба ', 'Є', ' PRobe') >>> s.partition(u'Є') ('ПРоба ', 'Є', ' PRobe') >>> (a,b,c) = s.partition('Є') >>> type(a), type(b), type(c) (<class '__main__.Utf8'>, <class '__main__.Utf8'>, <class '__main__.Utf8'>) >>> s.partition(' ') ('ПРоба', ' ', 'Є PRobe') >>> s.rpartition(' ') ('ПРоба Є', ' ', 'PRobe') >>> s.index('Є') 6 >>> s.rindex(u'Є') 6 >>> s.index(' ') 5 >>> s.rindex(' ') 7 >>> a=Utf8('а б ц д е а б ц д е а\\tб ц д е') >>> a.split() ['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е'] >>> a.rsplit() ['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д', 'е'] >>> a.expandtabs().split('б') ['а ', ' ц д е а ', ' ц д е а ', ' ц д е'] >>> a.expandtabs().rsplit('б') ['а ', ' ц д е а ', ' ц д е а ', ' ц д е'] >>> a.expandtabs().split(u'б', 1) ['а ', ' ц д е а б ц д е а б ц д е'] >>> a.expandtabs().rsplit(u'б', 1) ['а б ц д е а б ц д е а ', ' ц д е'] >>> a=Utf8("рядок1\\nрядок2\\nрядок3") >>> a.splitlines() ['рядок1', 'рядок2', 'рядок3'] >>> a.splitlines(True) ['рядок1\\n', 'рядок2\\n', 'рядок3'] >>> s[6] 'Є' >>> s[0] 'П' >>> s[-1] 'e' >>> s[:10] 'ПРоба Є PR' >>> s[2:-2:2] 'оаЄPo' >>> s[::-1] 'eboRP Є абоРП' >>> s.startswith('ПР') True >>> s.startswith(('ПР', u'об'),0) True >>> s.startswith(u'об', 2, 4) True >>> s.endswith('be') True >>> s.endswith(('be', 'PR', u'Є')) True >>> s.endswith('PR', 8, 10) True >>> s.endswith('Є', -7, -6) True >>> s.count(' ') 2 >>> s.count(' ',6) 1 >>> s.count(u'Є') 1 >>> s.count('Є', 0, 5) 0 >>> Utf8( "Parameters: '%(проба)s', %(probe)04d, %(проба2)s") % { u"проба": s, ... "not used": "???", "probe": 2, "проба2": u"ПРоба Probe" } "Parameters: 'ПРоба Є PRobe', 0002, ПРоба Probe" >>> a=Utf8(u"Параметр: (%s)-(%s)-[%s]") >>> a%=(s, s[::-1], 1000) >>> a 'Параметр: (ПРоба Є PRobe)-(eboRP Є абоРП)-[1000]' >>> if hasattr(Utf8, 'format'): ... Utf8("Проба <{0}>, {1}, {param1}, {param2}").format(s, u"中文字", ... param1="барабан", param2=1000) == 'Проба <ПРоба Є PRobe>, 中文字, барабан, 1000' ... else: # format() method is not used in python with version <2.6: ... print True True >>> u'Б'<u'Ї' # WRONG ORDER! False >>> 'Б'<'Ї' # WRONG ORDER! False >>> Utf8('Б')<'Ї' # RIGHT! True >>> u'д'>u'ґ' # WRONG ORDER! False >>> Utf8('д')>Utf8('ґ') # RIGHT! True >>> u'є'<=u'ж' # WRONG ORDER! False >>> Utf8('є')<=u'ж' # RIGHT! True >>> Utf8('є')<=u'є' True >>> u'Ї'>=u'И' # WRONG ORDER! False >>> Utf8(u'Ї') >= u'И' # RIGHT True >>> Utf8('Є') >= 'Є' True >>> a="яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ" # str type >>> b=u"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ" # unicode type >>> c=Utf8("яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ") # utf8 class >>> result = "".join(sorted(a)) >>> result[0:20] # result is not utf8 string, because bytes, not utf8-characters were sorted '\\x80\\x81\\x82\\x83\\x84\\x84\\x85\\x86\\x86\\x87\\x87\\x88\\x89\\x8c\\x8e\\x8f\\x90\\x90\\x91\\x91' >>> try: ... unicode(result, 'utf-8') # try to convert result (utf-8?) to unicode ... except Exception, e: ... print 'Exception:', e Exception: 'utf8' codec can't decode byte 0x80 in position 0: unexpected code byte >>> try: # FAILED! (working with bytes, not with utf8-charactes) ... "".join( sorted(a, key=sort_key) ) # utf8.sort_key may be used with utf8 or unicode strings only! ... except Exception, e: ... print 'Exception:', e Exception: 'utf8' codec can't decode byte 0xd1 in position 0: unexpected end of data >>> print "".join( sorted(Utf8(a))) # converting *a* to unicode or utf8-string gives us correct result аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ >>> print u"".join( sorted(b) ) # WRONG ORDER! Default sort key is used ЄІЇАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯабвгдежзийклмнопрстуфхцчшщьюяєіїҐґ >>> print u"".join( sorted(b, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ >>> print "".join( sorted(c) ) # RIGHT ORDER! Utf8 "rich comparison" methods are used аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ >>> print "".join( sorted(c, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ >>> Utf8().join(sorted(c.decode(), key=sort_key)) # convert to unicode for better performance 'аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ' >>> for result in sorted( ["Іа", "Астро", u"гала", Utf8("Гоша"), "Єва", "шовк", "аякс", "Їжа", ... "ґанок", Utf8("Дар'я"), "білінг", "веб", u"Жужа", "проба", u"тест", ... "абетка", "яблуко", "Юляся", "Київ", "лимонад", "ложка", "Матриця", ... ], key=sort_key): ... print result.ljust(20), type(result) абетка <type 'str'> Астро <type 'str'> аякс <type 'str'> білінг <type 'str'> веб <type 'str'> гала <type 'unicode'> ґанок <type 'str'> Гоша <class '__main__.Utf8'> Дар'я <class '__main__.Utf8'> Єва <type 'str'> Жужа <type 'unicode'> Іа <type 'str'> Їжа <type 'str'> Київ <type 'str'> лимонад <type 'str'> ложка <type 'str'> Матриця <type 'str'> проба <type 'str'> тест <type 'unicode'> шовк <type 'str'> Юляся <type 'str'> яблуко <type 'str'> >>> a=Utf8("中文字") >>> L=list(a) >>> L ['中', '文', '字'] >>> a="".join(L) >>> print a 中文字 >>> type(a) <type 'str'> >>> a="中文字" # standard str type >>> L=list(a) >>> L ['\\xe4', '\\xb8', '\\xad', '\\xe6', '\\x96', '\\x87', '\\xe5', '\\xad', '\\x97'] >>> from string import maketrans >>> str_tab=maketrans('PRobe','12345') >>> unicode_tab={ord(u'П'):ord(u'Ж'), ... ord(u'Р') : u'Ш', ... ord(Utf8('о')) : None, # utf8.ord() is used ... ord('б') : None, # -//-//- ... ord(u'а') : u"中文字", ... ord(u'Є') : Utf8('•').decode(), # only unicode type is supported ... } >>> s.translate(unicode_tab).translate(str_tab, deletechars=' ') 'ЖШ中文字•12345' """ import sys reload(sys) sys.setdefaultencoding("UTF-8") import doctest print "DOCTESTS STARTED..." doctest.testmod() print "DOCTESTS FINISHED" doctests()
Python
#!/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Thanks to * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support * Denes * Chris Clark * clach05 * Denes Lengyel * and many others who have contributed to current and previous versions This file contains the DAL support for many relational databases, including: - SQLite & SpatiaLite - MySQL - Postgres - Firebird - Oracle - MS SQL - DB2 - Interbase - Ingres - Informix (9+ and SE) - SapDB (experimental) - Cubrid (experimental) - CouchDB (experimental) - MongoDB (in progress) - Google:nosql - Google:sql - Teradata - IMAP (experimental) Example of usage: >>> # from dal import DAL, Field ### create DAL connection (and create DB if it doesn't exist) >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), ... folder=None) ### define a table 'person' (create/alter as necessary) >>> person = db.define_table('person',Field('name','string')) ### insert a record >>> id = person.insert(name='James') ### retrieve it by id >>> james = person(id) ### retrieve it by name >>> james = person(name='James') ### retrieve it by arbitrary query >>> query = (person.name=='James') & (person.name.startswith('J')) >>> james = db(query).select(person.ALL)[0] ### update one record >>> james.update_record(name='Jim') <Row {'id': 1, 'name': 'Jim'}> ### update multiple records by query >>> db(person.name.like('J%')).update(name='James') 1 ### delete records by query >>> db(person.name.lower() == 'jim').delete() 0 ### retrieve multiple records (rows) >>> people = db(person).select(orderby=person.name, ... groupby=person.name, limitby=(0,100)) ### further filter them >>> james = people.find(lambda row: row.name == 'James').first() >>> print james.id, james.name 1 James ### check aggregates >>> counter = person.id.count() >>> print db(person).select(counter).first()(counter) 1 ### delete one record >>> james.delete_record() 1 ### delete (drop) entire database table >>> person.drop() Supported field types: id string text boolean integer double decimal password upload blob time date datetime Supported DAL URI strings: 'sqlite://test.db' 'spatialite://test.db' 'sqlite:memory' 'spatialite:memory' 'jdbc:sqlite://test.db' 'mysql://root:none@localhost/test' 'postgres://mdipierro:password@localhost/test' 'postgres:psycopg2://mdipierro:password@localhost/test' 'postgres:pg8000://mdipierro:password@localhost/test' 'jdbc:postgres://mdipierro:none@localhost/test' 'mssql://web2py:none@A64X2/web2py_test' 'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 'oracle://username:password@database' 'firebird://user:password@server:3050/database' 'db2://DSN=dsn;UID=user;PWD=pass' 'firebird://username:password@hostname/database' 'firebird_embedded://username:password@c://path' 'informix://user:password@server:3050/database' 'informixu://user:password@server:3050/database' # unicode informix 'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 'google:datastore' # for google app engine datastore 'google:sql' # for google app engine with sql (mysql compatible) 'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 'imap://user:password@server:port' # experimental 'mongodb://user:password@server:port/database' # experimental For more info: help(DAL) help(Field) """ ################################################################################### # this file only exposes DAL and Field ################################################################################### __all__ = ['DAL', 'Field'] MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length DEFAULTLENGTH = {'string':512, 'password':512, 'upload':512, 'text':2**15, 'blob':2**31} TIMINGSSIZE = 100 SPATIALLIBS = { 'Windows':'libspatialite', 'Linux':'libspatialite.so', 'Darwin':'libspatialite.dylib' } DEFAULT_URI = 'sqlite://dummy.db' import re import sys import locale import os import types import datetime import threading import time import csv import cgi import copy import socket import logging import base64 import shutil import marshal import decimal import struct import urllib import hashlib import uuid import glob import traceback import platform PYTHON_VERSION = sys.version_info[0] if PYTHON_VERSION == 2: import cPickle as pickle import cStringIO as StringIO import copy_reg as copyreg hashlib_md5 = hashlib.md5 bytes, unicode = str, unicode else: import pickle from io import StringIO as StringIO import copyreg long = int hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) bytes, unicode = bytes, str CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType) TABLE_ARGS = set( ('migrate','primarykey','fake_migrate','format','redefine', 'singular','plural','trigger_name','sequence_name', 'common_filter','polymodel','table_class','on_define','actual_name')) SELECT_ARGS = set( ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) ogetattr = object.__getattribute__ osetattr = object.__setattr__ exists = os.path.exists pjoin = os.path.join ################################################################################### # following checks allow the use of dal without web2py, as a standalone module ################################################################################### try: from utils import web2py_uuid except (ImportError, SystemError): import uuid def web2py_uuid(): return str(uuid.uuid4()) try: import portalocker have_portalocker = True except ImportError: have_portalocker = False try: import serializers have_serializers = True except ImportError: have_serializers = False try: import json as simplejson except ImportError: try: import gluon.contrib.simplejson as simplejson except ImportError: simplejson = None try: import validators have_validators = True except (ImportError, SyntaxError): have_validators = False LOGGER = logging.getLogger("web2py.dal") DEFAULT = lambda:0 GLOBAL_LOCKER = threading.RLock() THREAD_LOCAL = threading.local() # internal representation of tables with field # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] REGEX_TYPE = re.compile('^([\w\_\:]+)') REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') REGEX_W = re.compile('^\w+$') REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') REGEX_QUOTES = re.compile("'[^']*'") REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') REGEX_PASSWORD = re.compile('\://([^:@]*)\:') REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' # list of drivers will be built on the fly # and lists only what is available DRIVERS = [] try: from new import classobj from google.appengine.ext import db as gae from google.appengine.api import namespace_manager, rdbms from google.appengine.api.datastore_types import Key ### for belongs on ID from google.appengine.ext.db.polymodel import PolyModel DRIVERS.append('google') except ImportError: pass if not 'google' in DRIVERS: try: from pysqlite2 import dbapi2 as sqlite2 DRIVERS.append('SQLite(sqlite2)') except ImportError: LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') try: from sqlite3 import dbapi2 as sqlite3 DRIVERS.append('SQLite(sqlite3)') except ImportError: LOGGER.debug('no SQLite drivers sqlite3') try: # first try contrib driver, then from site-packages (if installed) try: import contrib.pymysql as pymysql # monkeypatch pymysql because they havent fixed the bug: # https://github.com/petehunt/PyMySQL/issues/86 pymysql.ESCAPE_REGEX = re.compile("'") pymysql.ESCAPE_MAP = {"'": "''"} # end monkeypatch except ImportError: import pymysql DRIVERS.append('MySQL(pymysql)') except ImportError: LOGGER.debug('no MySQL driver pymysql') try: import MySQLdb DRIVERS.append('MySQL(MySQLdb)') except ImportError: LOGGER.debug('no MySQL driver MySQLDB') try: import psycopg2 from psycopg2.extensions import adapt as psycopg2_adapt DRIVERS.append('PostgreSQL(psycopg2)') except ImportError: LOGGER.debug('no PostgreSQL driver psycopg2') try: # first try contrib driver, then from site-packages (if installed) try: import contrib.pg8000.dbapi as pg8000 except ImportError: import pg8000.dbapi as pg8000 DRIVERS.append('PostgreSQL(pg8000)') except ImportError: LOGGER.debug('no PostgreSQL driver pg8000') try: import cx_Oracle DRIVERS.append('Oracle(cx_Oracle)') except ImportError: LOGGER.debug('no Oracle driver cx_Oracle') try: try: import pyodbc except ImportError: try: import contrib.pypyodbc as pyodbc except Exception, e: raise ImportError(str(e)) DRIVERS.append('MSSQL(pyodbc)') DRIVERS.append('DB2(pyodbc)') DRIVERS.append('Teradata(pyodbc)') DRIVERS.append('Ingres(pyodbc)') except ImportError: LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') try: import Sybase DRIVERS.append('Sybase(Sybase)') except ImportError: LOGGER.debug('no Sybase driver') try: import kinterbasdb DRIVERS.append('Interbase(kinterbasdb)') DRIVERS.append('Firebird(kinterbasdb)') except ImportError: LOGGER.debug('no Firebird/Interbase driver kinterbasdb') try: import fdb DRIVERS.append('Firebird(fdb)') except ImportError: LOGGER.debug('no Firebird driver fdb') ##### try: import firebirdsql DRIVERS.append('Firebird(firebirdsql)') except ImportError: LOGGER.debug('no Firebird driver firebirdsql') try: import informixdb DRIVERS.append('Informix(informixdb)') LOGGER.warning('Informix support is experimental') except ImportError: LOGGER.debug('no Informix driver informixdb') try: import sapdb DRIVERS.append('SQL(sapdb)') LOGGER.warning('SAPDB support is experimental') except ImportError: LOGGER.debug('no SAP driver sapdb') try: import cubriddb DRIVERS.append('Cubrid(cubriddb)') LOGGER.warning('Cubrid support is experimental') except ImportError: LOGGER.debug('no Cubrid driver cubriddb') try: from com.ziclix.python.sql import zxJDBC import java.sql # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ from org.sqlite import JDBC # required by java.sql; ensure we have it zxJDBC_sqlite = java.sql.DriverManager DRIVERS.append('PostgreSQL(zxJDBC)') DRIVERS.append('SQLite(zxJDBC)') LOGGER.warning('zxJDBC support is experimental') is_jdbc = True except ImportError: LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') is_jdbc = False try: import couchdb DRIVERS.append('CouchDB(couchdb)') except ImportError: LOGGER.debug('no Couchdb driver couchdb') try: import pymongo DRIVERS.append('MongoDB(pymongo)') except: LOGGER.debug('no MongoDB driver pymongo') try: import imaplib DRIVERS.append('IMAP(imaplib)') except: LOGGER.debug('no IMAP driver imaplib') PLURALIZE_RULES = [ (re.compile('child$'), re.compile('child$'), 'children'), (re.compile('oot$'), re.compile('oot$'), 'eet'), (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), (re.compile('sis$'), re.compile('sis$'), 'ses'), (re.compile('man$'), re.compile('man$'), 'men'), (re.compile('ife$'), re.compile('ife$'), 'ives'), (re.compile('eau$'), re.compile('eau$'), 'eaux'), (re.compile('lf$'), re.compile('lf$'), 'lves'), (re.compile('[sxz]$'), re.compile('$'), 'es'), (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), (re.compile('$'), re.compile('$'), 's'), ] def pluralize(singular, rules=PLURALIZE_RULES): for line in rules: re_search, re_sub, replace = line plural = re_search.search(singular) and re_sub.sub(replace, singular) if plural: return plural def hide_password(uri): if isinstance(uri,(list,tuple)): return [hide_password(item) for item in uri] return REGEX_NOPASSWD.sub('******',uri) def OR(a,b): return a|b def AND(a,b): return a&b def IDENTITY(x): return x def varquote_aux(name,quotestr='%s'): return name if REGEX_W.match(name) else quotestr % name def quote_keyword(a,keyword='timestamp'): regex = re.compile('\.keyword(?=\w)') a = regex.sub('."%s"' % keyword,a) return a if 'google' in DRIVERS: is_jdbc = False class GAEDecimalProperty(gae.Property): """ GAE decimal implementation """ data_type = decimal.Decimal def __init__(self, precision, scale, **kwargs): super(GAEDecimalProperty, self).__init__(self, **kwargs) d = '1.' for x in range(scale): d += '0' self.round = decimal.Decimal(d) def get_value_for_datastore(self, model_instance): value = super(GAEDecimalProperty, self)\ .get_value_for_datastore(model_instance) if value is None or value == '': return None else: return str(value) def make_value_from_datastore(self, value): if value is None or value == '': return None else: return decimal.Decimal(value).quantize(self.round) def validate(self, value): value = super(GAEDecimalProperty, self).validate(value) if value is None or isinstance(value, decimal.Decimal): return value elif isinstance(value, basestring): return decimal.Decimal(value) raise gae.BadValueError("Property %s must be a Decimal or string."\ % self.name) ################################################################################### # class that handles connection pooling (all adapters are derived from this one) ################################################################################### class ConnectionPool(object): POOLS = {} check_active_connection = True @staticmethod def set_folder(folder): THREAD_LOCAL.folder = folder # ## this allows gluon to commit/rollback all dbs in this thread def close(self,action='commit',really=True): if action: if callable(action): action(self) else: getattr(self, action)() # ## if you want pools, recycle this connection if self.pool_size: GLOBAL_LOCKER.acquire() pool = ConnectionPool.POOLS[self.uri] if len(pool) < self.pool_size: pool.append(self.connection) really = False GLOBAL_LOCKER.release() if really: self.close_connection() self.connection = None @staticmethod def close_all_instances(action): """ to close cleanly databases in a multithreaded environment """ dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() for db_uid, db_group in dbs: for db in db_group: if hasattr(db,'_adapter'): db._adapter.close(action) getattr(THREAD_LOCAL,'db_instances',{}).clear() getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() if callable(action): action(None) return def find_or_make_work_folder(self): """ this actually does not make the folder. it has to be there """ self.folder = getattr(THREAD_LOCAL,'folder','') # Creating the folder if it does not exist if False and self.folder and not exists(self.folder): os.mkdir(self.folder) def after_connection_hook(self): """hook for the after_connection parameter""" if callable(self._after_connection): self._after_connection(self) self.after_connection() def after_connection(self): """ this it is supposed to be overloaded by adapters""" pass def reconnect(self, f=None, cursor=True): """ this function defines: self.connection and self.cursor (iff cursor is True) if self.pool_size>0 it will try pull the connection from the pool if the connection is not active (closed by db server) it will loop if not self.pool_size or no active connections in pool makes a new one """ if getattr(self,'connection', None) != None: return if f is None: f = self.connector if not hasattr(self, "driver") or self.driver is None: LOGGER.debug("Skipping connection since there's no driver") return if not self.pool_size: self.connection = f() self.cursor = cursor and self.connection.cursor() else: uri = self.uri POOLS = ConnectionPool.POOLS while True: GLOBAL_LOCKER.acquire() if not uri in POOLS: POOLS[uri] = [] if POOLS[uri]: self.connection = POOLS[uri].pop() GLOBAL_LOCKER.release() self.cursor = cursor and self.connection.cursor() try: if self.cursor and self.check_active_connection: self.execute('SELECT 1;') break except: pass else: GLOBAL_LOCKER.release() self.connection = f() self.cursor = cursor and self.connection.cursor() break self.after_connection_hook() ################################################################################### # this is a generic adapter that does nothing; all others are derived from this one ################################################################################### class BaseAdapter(ConnectionPool): native_json = False driver = None driver_name = None drivers = () # list of drivers from which to pick connection = None maxcharlength = MAXCHARLENGTH commit_on_alter_table = False support_distributed_transaction = False uploads_in_blob = False can_select_for_update = True TRUE = 'T' FALSE = 'F' T_SEP = ' ' types = { 'boolean': 'CHAR(1)', 'string': 'CHAR(%(length)s)', 'text': 'TEXT', 'json': 'TEXT', 'password': 'CHAR(%(length)s)', 'blob': 'BLOB', 'upload': 'CHAR(%(length)s)', 'integer': 'INTEGER', 'bigint': 'INTEGER', 'float':'DOUBLE', 'double': 'DOUBLE', 'decimal': 'DOUBLE', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'TEXT', 'list:string': 'TEXT', 'list:reference': 'TEXT', # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', } def id_query(self, table): return table._id != None def adapt(self, obj): return "'%s'" % obj.replace("'", "''") def smart_adapt(self, obj): if isinstance(obj,(int,float)): return str(obj) return self.adapt(str(obj)) def integrity_error(self): return self.driver.IntegrityError def operational_error(self): return self.driver.OperationalError def file_exists(self, filename): """ to be used ONLY for files that on GAE may not be on filesystem """ return exists(filename) def file_open(self, filename, mode='rb', lock=True): """ to be used ONLY for files that on GAE may not be on filesystem """ if have_portalocker and lock: fileobj = portalocker.LockedFile(filename,mode) else: fileobj = open(filename,mode) return fileobj def file_close(self, fileobj): """ to be used ONLY for files that on GAE may not be on filesystem """ if fileobj: fileobj.close() def file_delete(self, filename): os.unlink(filename) def find_driver(self,adapter_args,uri=None): if getattr(self,'driver',None) != None: return drivers_available = [driver for driver in self.drivers if driver in globals()] if uri: items = uri.split('://',1)[0].split(':') request_driver = items[1] if len(items)>1 else None else: request_driver = None request_driver = request_driver or adapter_args.get('driver') if request_driver: if request_driver in drivers_available: self.driver_name = request_driver self.driver = globals().get(request_driver) else: raise RuntimeError("driver %s not available" % request_driver) elif drivers_available: self.driver_name = drivers_available[0] self.driver = globals().get(self.driver_name) else: raise RuntimeError("no driver available %s" % str(self.drivers)) def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={},do_connect=True, after_connection=None): self.db = db self.dbengine = "None" self.uri = uri self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection class Dummy(object): lastrowid = 1 def __getattr__(self, value): return lambda *a, **b: [] self.connection = Dummy() self.cursor = Dummy() def sequence_name(self,tablename): return '%s_sequence' % tablename def trigger_name(self,tablename): return '%s_sequence' % tablename def varquote(self,name): return name def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): db = table._db fields = [] # PostGIS geo fields are added after the table has been created postcreation_fields = [] sql_fields = {} sql_fields_aux = {} TFK = {} tablename = table._tablename sortable = 0 types = self.types for field in table: sortable += 1 field_name = field.name field_type = field.type if isinstance(field_type,SQLCustomType): ftype = field_type.native or field_type.type elif field_type.startswith('reference'): referenced = field_type[10:].strip() if referenced == '.': referenced = tablename constraint_name = self.constraint_name(tablename, field_name) if not '.' in referenced \ and referenced != tablename \ and hasattr(table,'_primarykey'): ftype = types['integer'] else: if hasattr(table,'_primarykey'): rtablename,rfieldname = referenced.split('.') rtable = db[rtablename] rfield = rtable[rfieldname] # must be PK reference or unique if rfieldname in rtable._primarykey or \ rfield.unique: ftype = types[rfield.type[:9]] % \ dict(length=rfield.length) # multicolumn primary key reference? if not rfield.unique and len(rtable._primarykey)>1: # then it has to be a table level FK if rtablename not in TFK: TFK[rtablename] = {} TFK[rtablename][rfieldname] = field_name else: ftype = ftype + \ types['reference FK'] % dict( constraint_name = constraint_name, # should be quoted foreign_key = '%s (%s)' % (rtablename, rfieldname), table_name = tablename, field_name = field_name, on_delete_action=field.ondelete) else: # make a guess here for circular references if referenced in db: id_fieldname = db[referenced]._id.name elif referenced == tablename: id_fieldname = table._id.name else: #make a guess id_fieldname = 'id' ftype = types[field_type[:9]] % dict( index_name = field_name+'__idx', field_name = field_name, constraint_name = constraint_name, foreign_key = '%s (%s)' % (referenced, id_fieldname), on_delete_action=field.ondelete) elif field_type.startswith('list:reference'): ftype = types[field_type[:14]] elif field_type.startswith('decimal'): precision, scale = map(int,field_type[8:-1].split(',')) ftype = types[field_type[:7]] % \ dict(precision=precision,scale=scale) elif field_type.startswith('geo'): if not hasattr(self,'srid'): raise RuntimeError('Adapter does not support geometry') srid = self.srid geotype, parms = field_type[:-1].split('(') if not geotype in types: raise SyntaxError( 'Field: unknown field type: %s for %s' \ % (field_type, field_name)) ftype = types[geotype] if self.dbengine == 'postgres' and geotype == 'geometry': # parameters: schema, srid, dimension dimension = 2 # GIS.dimension ??? parms = parms.split(',') if len(parms) == 3: schema, srid, dimension = parms elif len(parms) == 2: schema, srid = parms else: schema = parms[0] ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] ftype = ftype % dict(schema=schema, tablename=tablename, fieldname=field_name, srid=srid, dimension=dimension) postcreation_fields.append(ftype) elif not field_type in types: raise SyntaxError('Field: unknown field type: %s for %s' % \ (field_type, field_name)) else: ftype = types[field_type]\ % dict(length=field.length) if not field_type.startswith('id') and \ not field_type.startswith('reference'): if field.notnull: ftype += ' NOT NULL' else: ftype += self.ALLOW_NULL() if field.unique: ftype += ' UNIQUE' if field.custom_qualifier: ftype += ' %s' % field.custom_qualifier # add to list of fields sql_fields[field_name] = dict( length=field.length, unique=field.unique, notnull=field.notnull, sortable=sortable, type=str(field_type), sql=ftype) if field.notnull and not field.default is None: # Caveat: sql_fields and sql_fields_aux # differ for default values. # sql_fields is used to trigger migrations and sql_fields_aux # is used for create tables. # The reason is that we do not want to trigger # a migration simply because a default value changes. not_null = self.NOT_NULL(field.default, field_type) ftype = ftype.replace('NOT NULL', not_null) sql_fields_aux[field_name] = dict(sql=ftype) # Postgres - PostGIS: # geometry fields are added after the table has been created, not now if not (self.dbengine == 'postgres' and \ field_type.startswith('geom')): fields.append('%s %s' % (field_name, ftype)) other = ';' # backend-specific extensions to fields if self.dbengine == 'mysql': if not hasattr(table, "_primarykey"): fields.append('PRIMARY KEY(%s)' % table._id.name) other = ' ENGINE=InnoDB CHARACTER SET utf8;' fields = ',\n '.join(fields) for rtablename in TFK: rfields = TFK[rtablename] pkeys = db[rtablename]._primarykey fkeys = [ rfields[k] for k in pkeys ] fields = fields + ',\n ' + \ types['reference TFK'] % dict( table_name = tablename, field_name=', '.join(fkeys), foreign_table = rtablename, foreign_key = ', '.join(pkeys), on_delete_action = field.ondelete) if getattr(table,'_primarykey',None): query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) else: query = "CREATE TABLE %s(\n %s\n)%s" % \ (tablename, fields, other) if self.uri.startswith('sqlite:///') \ or self.uri.startswith('spatialite:///'): path_encoding = sys.getfilesystemencoding() \ or locale.getdefaultlocale()[1] or 'utf8' dbpath = self.uri[9:self.uri.rfind('/')]\ .decode('utf8').encode(path_encoding) else: dbpath = self.folder if not migrate: return query elif self.uri.startswith('sqlite:memory')\ or self.uri.startswith('spatialite:memory'): table._dbt = None elif isinstance(migrate, str): table._dbt = pjoin(dbpath, migrate) else: table._dbt = pjoin( dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) if table._dbt: table._loggername = pjoin(dbpath, 'sql.log') logfile = self.file_open(table._loggername, 'a') else: logfile = None if not table._dbt or not self.file_exists(table._dbt): if table._dbt: logfile.write('timestamp: %s\n' % datetime.datetime.today().isoformat()) logfile.write(query + '\n') if not fake_migrate: self.create_sequence_and_triggers(query,table) table._db.commit() # Postgres geom fields are added now, # after the table has been created for query in postcreation_fields: self.execute(query) table._db.commit() if table._dbt: tfile = self.file_open(table._dbt, 'w') pickle.dump(sql_fields, tfile) self.file_close(tfile) if fake_migrate: logfile.write('faked!\n') else: logfile.write('success!\n') else: tfile = self.file_open(table._dbt, 'r') try: sql_fields_old = pickle.load(tfile) except EOFError: self.file_close(tfile) self.file_close(logfile) raise RuntimeError('File %s appears corrupted' % table._dbt) self.file_close(tfile) if sql_fields != sql_fields_old: self.migrate_table(table, sql_fields, sql_fields_old, sql_fields_aux, logfile, fake_migrate=fake_migrate) self.file_close(logfile) return query def migrate_table( self, table, sql_fields, sql_fields_old, sql_fields_aux, logfile, fake_migrate=False, ): db = table._db db._migrated.append(table._tablename) tablename = table._tablename def fix(item): k,v=item if not isinstance(v,dict): v=dict(type='unkown',sql=v) return k.lower(),v # make sure all field names are lower case to avoid # migrations because of case cahnge sql_fields = dict(map(fix,sql_fields.iteritems())) sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) if db._debug: logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) keys = sql_fields.keys() for key in sql_fields_old: if not key in keys: keys.append(key) new_add = self.concat_add(tablename) metadata_change = False sql_fields_current = copy.copy(sql_fields_old) for key in keys: query = None if not key in sql_fields_old: sql_fields_current[key] = sql_fields[key] if self.dbengine in ('postgres',) and \ sql_fields[key]['type'].startswith('geometry'): # 'sql' == ftype in sql query = [ sql_fields[key]['sql'] ] else: query = ['ALTER TABLE %s ADD %s %s;' % \ (tablename, key, sql_fields_aux[key]['sql'].replace(', ', new_add))] metadata_change = True elif self.dbengine in ('sqlite', 'spatialite'): if key in sql_fields: sql_fields_current[key] = sql_fields[key] metadata_change = True elif not key in sql_fields: del sql_fields_current[key] ftype = sql_fields_old[key]['type'] if self.dbengine in ('postgres',) \ and ftype.startswith('geometry'): geotype, parms = ftype[:-1].split('(') schema = parms.split(',')[0] query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % dict(schema=schema, table=tablename, field=key,) ] elif not self.dbengine in ('firebird',): query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] else: query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] metadata_change = True elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ and not (key in table.fields and isinstance(table[key].type, SQLCustomType)) \ and not sql_fields[key]['type'].startswith('reference')\ and not sql_fields[key]['type'].startswith('double')\ and not sql_fields[key]['type'].startswith('id'): sql_fields_current[key] = sql_fields[key] t = tablename tt = sql_fields_aux[key]['sql'].replace(', ', new_add) if not self.dbengine in ('firebird',): query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] else: query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 'ALTER TABLE %s DROP %s;' % (t, key), 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] metadata_change = True elif sql_fields[key]['type'] != sql_fields_old[key]['type']: sql_fields_current[key] = sql_fields[key] metadata_change = True if query: logfile.write('timestamp: %s\n' % datetime.datetime.today().isoformat()) db['_lastsql'] = '\n'.join(query) for sub_query in query: logfile.write(sub_query + '\n') if not fake_migrate: self.execute(sub_query) # Caveat: mysql, oracle and firebird do not allow multiple alter table # in one transaction so we must commit partial transactions and # update table._dbt after alter table. if db._adapter.commit_on_alter_table: db.commit() tfile = self.file_open(table._dbt, 'w') pickle.dump(sql_fields_current, tfile) self.file_close(tfile) logfile.write('success!\n') else: logfile.write('faked!\n') elif metadata_change: tfile = self.file_open(table._dbt, 'w') pickle.dump(sql_fields_current, tfile) self.file_close(tfile) if metadata_change and \ not (query and self.dbengine in ('mysql','oracle','firebird')): db.commit() tfile = self.file_open(table._dbt, 'w') pickle.dump(sql_fields_current, tfile) self.file_close(tfile) def LOWER(self, first): return 'LOWER(%s)' % self.expand(first) def UPPER(self, first): return 'UPPER(%s)' % self.expand(first) def COUNT(self, first, distinct=None): return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ % self.expand(first) def EXTRACT(self, first, what): return "EXTRACT(%s FROM %s)" % (what, self.expand(first)) def EPOCH(self, first): return self.EXTRACT(first, 'epoch') def LENGTH(self, first): return "LENGTH(%s)" % self.expand(first) def AGGREGATE(self, first, what): return "%s(%s)" % (what, self.expand(first)) def JOIN(self): return 'JOIN' def LEFT_JOIN(self): return 'LEFT JOIN' def RANDOM(self): return 'Random()' def NOT_NULL(self, default, field_type): return 'NOT NULL DEFAULT %s' % self.represent(default,field_type) def COALESCE(self, first, second): expressions = [self.expand(first)]+[self.expand(e) for e in second] return 'COALESCE(%s)' % ','.join(expressions) def COALESCE_ZERO(self, first): return 'COALESCE(%s,0)' % self.expand(first) def RAW(self, first): return first def ALLOW_NULL(self): return '' def SUBSTRING(self, field, parameters): return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) def PRIMARY_KEY(self, key): return 'PRIMARY KEY(%s)' % key def _drop(self, table, mode): return ['DROP TABLE %s;' % table] def drop(self, table, mode=''): db = table._db if table._dbt: logfile = self.file_open(table._loggername, 'a') queries = self._drop(table, mode) for query in queries: if table._dbt: logfile.write(query + '\n') self.execute(query) db.commit() del db[table._tablename] del db.tables[db.tables.index(table._tablename)] db._remove_references_to(table) if table._dbt: self.file_delete(table._dbt) logfile.write('success!\n') def _insert(self, table, fields): if fields: keys = ','.join(f.name for f, v in fields) values = ','.join(self.expand(v, f.type) for f, v in fields) return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) else: return self._insert_empty(table) def _insert_empty(self, table): return 'INSERT INTO %s DEFAULT VALUES;' % table def insert(self, table, fields): query = self._insert(table,fields) try: self.execute(query) except Exception: e = sys.exc_info()[1] if isinstance(e,self.integrity_error_class()): return None raise e if hasattr(table,'_primarykey'): return dict([(k[0].name, k[1]) for k in fields \ if k[0].name in table._primarykey]) id = self.lastrowid(table) if not isinstance(id,int): return id rid = Reference(id) (rid._table, rid._record) = (table, None) return rid def bulk_insert(self, table, items): return [self.insert(table,item) for item in items] def NOT(self, first): return '(NOT %s)' % self.expand(first) def AND(self, first, second): return '(%s AND %s)' % (self.expand(first), self.expand(second)) def OR(self, first, second): return '(%s OR %s)' % (self.expand(first), self.expand(second)) def BELONGS(self, first, second): if isinstance(second, str): return '(%s IN (%s))' % (self.expand(first), second[:-1]) elif not second: return '(1=0)' items = ','.join(self.expand(item, first.type) for item in second) return '(%s IN (%s))' % (self.expand(first), items) def REGEXP(self, first, second): "regular expression operator" raise NotImplementedError def LIKE(self, first, second): "case sensitive like operator" raise NotImplementedError def ILIKE(self, first, second): "case in-sensitive like operator" return '(%s LIKE %s)' % (self.expand(first), self.expand(second, 'string')) def STARTSWITH(self, first, second): return '(%s LIKE %s)' % (self.expand(first), self.expand(second+'%', 'string')) def ENDSWITH(self, first, second): return '(%s LIKE %s)' % (self.expand(first), self.expand('%'+second, 'string')) def CONTAINS(self,first,second,case_sensitive=False): if first.type in ('string','text', 'json'): second = Expression(None,self.CONCAT('%',Expression( None,self.REPLACE(second,('%','%%'))),'%')) elif first.type.startswith('list:'): second = Expression(None,self.CONCAT('%|',Expression(None,self.REPLACE( Expression(None,self.REPLACE(second,('%','%%'))),('|','||'))),'|%')) op = case_sensitive and self.LIKE or self.ILIKE return op(first,second) def EQ(self, first, second=None): if second is None: return '(%s IS NULL)' % self.expand(first) return '(%s = %s)' % (self.expand(first), self.expand(second, first.type)) def NE(self, first, second=None): if second is None: return '(%s IS NOT NULL)' % self.expand(first) return '(%s <> %s)' % (self.expand(first), self.expand(second, first.type)) def LT(self,first,second=None): if second is None: raise RuntimeError("Cannot compare %s < None" % first) return '(%s < %s)' % (self.expand(first), self.expand(second,first.type)) def LE(self,first,second=None): if second is None: raise RuntimeError("Cannot compare %s <= None" % first) return '(%s <= %s)' % (self.expand(first), self.expand(second,first.type)) def GT(self,first,second=None): if second is None: raise RuntimeError("Cannot compare %s > None" % first) return '(%s > %s)' % (self.expand(first), self.expand(second,first.type)) def GE(self,first,second=None): if second is None: raise RuntimeError("Cannot compare %s >= None" % first) return '(%s >= %s)' % (self.expand(first), self.expand(second,first.type)) def is_numerical_type(self, ftype): return ftype in ('integer','boolean','double','bigint') or \ ftype.startswith('decimal') def REPLACE(self, first, (second, third)): return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), self.expand(second,'string'), self.expand(third,'string')) def CONCAT(self, *items): return '(%s)' % ' || '.join(self.expand(x,'string') for x in items) def ADD(self, first, second): if self.is_numerical_type(first.type): return '(%s + %s)' % (self.expand(first), self.expand(second, first.type)) else: return self.CONCAT(first, second) def SUB(self, first, second): return '(%s - %s)' % (self.expand(first), self.expand(second, first.type)) def MUL(self, first, second): return '(%s * %s)' % (self.expand(first), self.expand(second, first.type)) def DIV(self, first, second): return '(%s / %s)' % (self.expand(first), self.expand(second, first.type)) def MOD(self, first, second): return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type)) def AS(self, first, second): return '%s AS %s' % (self.expand(first), second) def ON(self, first, second): if use_common_filters(second): second = self.common_filter(second,[first._tablename]) return '%s ON %s' % (self.expand(first), self.expand(second)) def INVERT(self, first): return '%s DESC' % self.expand(first) def COMMA(self, first, second): return '%s, %s' % (self.expand(first), self.expand(second)) def expand(self, expression, field_type=None): if isinstance(expression, Field): return '%s.%s' % (expression.tablename, expression.name) elif isinstance(expression, (Expression, Query)): first = expression.first second = expression.second op = expression.op optional_args = expression.optional_args or {} if not second is None: return op(first, second, **optional_args) elif not first is None: return op(first,**optional_args) elif isinstance(op, str): if op.endswith(';'): op=op[:-1] return '(%s)' % op else: return op() elif field_type: return str(self.represent(expression,field_type)) elif isinstance(expression,(list,tuple)): return ','.join(self.represent(item,field_type) \ for item in expression) elif isinstance(expression, bool): return '1' if expression else '0' else: return str(expression) def table_alias(self,name): return str(name if isinstance(name,Table) else self.db[name]) def alias(self, table, alias): """ Given a table object, makes a new table object with alias name. """ other = copy.copy(table) other['_ot'] = other._ot or other._tablename other['ALL'] = SQLALL(other) other['_tablename'] = alias for fieldname in other.fields: other[fieldname] = copy.copy(other[fieldname]) other[fieldname]._tablename = alias other[fieldname].tablename = alias other[fieldname].table = other table._db[alias] = other return other def _truncate(self, table, mode=''): tablename = table._tablename return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')] def truncate(self, table, mode= ' '): # Prepare functions "write_to_logfile" and "close_logfile" if table._dbt: logfile = self.file_open(table._loggername, 'a') else: class Logfile(object): def write(self, value): pass def close(self): pass logfile = Logfile() try: queries = table._db._adapter._truncate(table, mode) for query in queries: logfile.write(query + '\n') self.execute(query) table._db.commit() logfile.write('success!\n') finally: logfile.close() def _update(self, tablename, query, fields): if query: if use_common_filters(query): query = self.common_filter(query, [tablename]) sql_w = ' WHERE ' + self.expand(query) else: sql_w = '' sql_v = ','.join(['%s=%s' % (field.name, self.expand(value, field.type)) \ for (field, value) in fields]) return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w) def update(self, tablename, query, fields): sql = self._update(tablename, query, fields) self.execute(sql) try: return self.cursor.rowcount except: return None def _delete(self, tablename, query): if query: if use_common_filters(query): query = self.common_filter(query, [tablename]) sql_w = ' WHERE ' + self.expand(query) else: sql_w = '' return 'DELETE FROM %s%s;' % (tablename, sql_w) def delete(self, tablename, query): sql = self._delete(tablename, query) ### special code to handle CASCADE in SQLite & SpatiaLite db = self.db table = db[tablename] if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: deleted = [x[table._id.name] for x in db(query).select(table._id)] ### end special code to handle CASCADE in SQLite & SpatiaLite self.execute(sql) try: counter = self.cursor.rowcount except: counter = None ### special code to handle CASCADE in SQLite & SpatiaLite if self.dbengine in ('sqlite', 'spatialite') and counter: for field in table._referenced_by: if field.type=='reference '+table._tablename \ and field.ondelete=='CASCADE': db(field.belongs(deleted)).delete() ### end special code to handle CASCADE in SQLite & SpatiaLite return counter def get_table(self, query): tablenames = self.tables(query) if len(tablenames)==1: return tablenames[0] elif len(tablenames)<1: raise RuntimeError("No table selected") else: raise RuntimeError("Too many tables selected") def expand_all(self, fields, tablenames): db = self.db new_fields = [] append = new_fields.append for item in fields: if isinstance(item,SQLALL): new_fields += item._table elif isinstance(item,str): if REGEX_TABLE_DOT_FIELD.match(item): tablename,fieldname = item.split('.') append(db[tablename][fieldname]) else: append(Expression(db,lambda item=item:item)) else: append(item) # ## if no fields specified take them all from the requested tables if not new_fields: for table in tablenames: for field in db[table]: append(field) return new_fields def _select(self, query, fields, attributes): tables = self.tables for key in set(attributes.keys())-SELECT_ARGS: raise SyntaxError('invalid select attribute: %s' % key) args_get = attributes.get tablenames = tables(query) tablenames_for_common_filters = tablenames for field in fields: if isinstance(field, basestring) \ and REGEX_TABLE_DOT_FIELD.match(field): tn,fn = field.split('.') field = self.db[tn][fn] for tablename in tables(field): if not tablename in tablenames: tablenames.append(tablename) if len(tablenames) < 1: raise SyntaxError('Set: no tables selected') self._colnames = map(self.expand, fields) def geoexpand(field): if isinstance(field.type,str) and field.type.startswith('geometry'): field = field.st_astext() return self.expand(field) sql_f = ', '.join(map(geoexpand, fields)) sql_o = '' sql_s = '' left = args_get('left', False) inner_join = args_get('join', False) distinct = args_get('distinct', False) groupby = args_get('groupby', False) orderby = args_get('orderby', False) having = args_get('having', False) limitby = args_get('limitby', False) orderby_on_limitby = args_get('orderby_on_limitby', True) for_update = args_get('for_update', False) if self.can_select_for_update is False and for_update is True: raise SyntaxError('invalid select attribute: for_update') if distinct is True: sql_s += 'DISTINCT' elif distinct: sql_s += 'DISTINCT ON (%s)' % distinct if inner_join: icommand = self.JOIN() if not isinstance(inner_join, (tuple, list)): inner_join = [inner_join] ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)] ijoinon = [t for t in inner_join if isinstance(t, Expression)] itables_to_merge={} #issue 490 [itables_to_merge.update( dict.fromkeys(tables(t))) for t in ijoinon] ijoinont = [t.first._tablename for t in ijoinon] [itables_to_merge.pop(t) for t in ijoinont if t in itables_to_merge] #issue 490 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() iexcluded = [t for t in tablenames if not t in iimportant_tablenames] if left: join = attributes['left'] command = self.LEFT_JOIN() if not isinstance(join, (tuple, list)): join = [join] joint = [t._tablename for t in join if not isinstance(t, Expression)] joinon = [t for t in join if isinstance(t, Expression)] #patch join+left patch (solves problem with ordering in left joins) tables_to_merge={} [tables_to_merge.update( dict.fromkeys(tables(t))) for t in joinon] joinont = [t.first._tablename for t in joinon] [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] tablenames_for_common_filters = [t for t in tablenames if not t in joinont ] important_tablenames = joint + joinont + tables_to_merge.keys() excluded = [t for t in tablenames if not t in important_tablenames ] else: excluded = tablenames if use_common_filters(query): query = self.common_filter(query,tablenames_for_common_filters) sql_w = ' WHERE ' + self.expand(query) if query else '' if inner_join and not left: sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ itables_to_merge.keys()]) for t in ijoinon: sql_t += ' %s %s' % (icommand, t) elif not inner_join and left: sql_t = ', '.join([self.table_alias(t) for t in excluded + \ tables_to_merge.keys()]) if joint: sql_t += ' %s %s' % (command, ','.join([self.table_alias(t) for t in joint])) for t in joinon: sql_t += ' %s %s' % (command, t) elif inner_join and left: all_tables_in_query = set(important_tablenames + \ iimportant_tablenames + \ tablenames) tables_in_joinon = set(joinont + ijoinont) tables_not_in_joinon = \ all_tables_in_query.difference(tables_in_joinon) sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) for t in ijoinon: sql_t += ' %s %s' % (icommand, t) if joint: sql_t += ' %s %s' % (command, ','.join([self.table_alias(t) for t in joint])) for t in joinon: sql_t += ' %s %s' % (command, t) else: sql_t = ', '.join(self.table_alias(t) for t in tablenames) if groupby: if isinstance(groupby, (list, tuple)): groupby = xorify(groupby) sql_o += ' GROUP BY %s' % self.expand(groupby) if having: sql_o += ' HAVING %s' % attributes['having'] if orderby: if isinstance(orderby, (list, tuple)): orderby = xorify(orderby) if str(orderby) == '<random>': sql_o += ' ORDER BY %s' % self.RANDOM() else: sql_o += ' ORDER BY %s' % self.expand(orderby) if limitby: if orderby_on_limitby and not orderby and tablenames: sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) # oracle does not support limitby sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) if for_update and self.can_select_for_update is True: sql = sql.rstrip(';') + ' FOR UPDATE;' return sql def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) return 'SELECT %s %s FROM %s%s%s;' % \ (sql_s, sql_f, sql_t, sql_w, sql_o) def _fetchall(self): return self.cursor.fetchall() def _select_aux(self,sql,fields,attributes): args_get = attributes.get cache = args_get('cache',None) if not cache: self.execute(sql) rows = self._fetchall() else: (cache_model, time_expire) = cache key = self.uri + '/' + sql + '/rows' if len(key)>200: key = hashlib_md5(key).hexdigest() def _select_aux2(): self.execute(sql) return self._fetchall() rows = cache_model(key,_select_aux2,time_expire) if isinstance(rows,tuple): rows = list(rows) limitby = args_get('limitby', None) or (0,) rows = self.rowslice(rows,limitby[0],None) processor = args_get('processor',self.parse) cacheable = args_get('cacheable',False) return processor(rows,fields,self._colnames,cacheable=cacheable) def select(self, query, fields, attributes): """ Always returns a Rows object, possibly empty. """ sql = self._select(query, fields, attributes) cache = attributes.get('cache', None) if cache and attributes.get('cacheable',False): del attributes['cache'] (cache_model, time_expire) = cache key = self.uri + '/' + sql if len(key)>200: key = hashlib_md5(key).hexdigest() args = (sql,fields,attributes) return cache_model( key, lambda self=self,args=args:self._select_aux(*args), time_expire) else: return self._select_aux(sql,fields,attributes) def _count(self, query, distinct=None): tablenames = self.tables(query) if query: if use_common_filters(query): query = self.common_filter(query, tablenames) sql_w = ' WHERE ' + self.expand(query) else: sql_w = '' sql_t = ','.join(self.table_alias(t) for t in tablenames) if distinct: if isinstance(distinct,(list, tuple)): distinct = xorify(distinct) sql_d = self.expand(distinct) return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ (sql_d, sql_t, sql_w) return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w) def count(self, query, distinct=None): self.execute(self._count(query, distinct)) return self.cursor.fetchone()[0] def tables(self, *queries): tables = set() for query in queries: if isinstance(query, Field): tables.add(query.tablename) elif isinstance(query, (Expression, Query)): if not query.first is None: tables = tables.union(self.tables(query.first)) if not query.second is None: tables = tables.union(self.tables(query.second)) return list(tables) def commit(self): if self.connection: return self.connection.commit() def rollback(self): if self.connection: return self.connection.rollback() def close_connection(self): if self.connection: return self.connection.close() def distributed_transaction_begin(self, key): return def prepare(self, key): if self.connection: self.connection.prepare() def commit_prepared(self, key): if self.connection: self.connection.commit() def rollback_prepared(self, key): if self.connection: self.connection.rollback() def concat_add(self, tablename): return ', ADD ' def constraint_name(self, table, fieldname): return '%s_%s__constraint' % (table,fieldname) def create_sequence_and_triggers(self, query, table, **args): self.execute(query) def log_execute(self, *a, **b): if not self.connection: return None command = a[0] if hasattr(self,'filter_sql_command'): command = self.filter_sql_command(command) if self.db._debug: LOGGER.debug('SQL: %s' % command) self.db._lastsql = command t0 = time.time() ret = self.cursor.execute(command, *a[1:], **b) self.db._timings.append((command,time.time()-t0)) del self.db._timings[:-TIMINGSSIZE] return ret def execute(self, *a, **b): return self.log_execute(*a, **b) def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith if isinstance(obj, CALLABLETYPES): obj = obj() if isinstance(fieldtype, SQLCustomType): value = fieldtype.encoder(obj) if fieldtype.type in ('string','text', 'json'): return self.adapt(value) return value if isinstance(obj, (Expression, Field)): return str(obj) if field_is_type('list:'): if not obj: obj = [] elif not isinstance(obj, (list, tuple)): obj = [obj] if field_is_type('list:string'): obj = map(str,obj) else: obj = map(int,obj) # we don't want to bar_encode json objects if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): obj = bar_encode(obj) if obj is None: return 'NULL' if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: return 'NULL' r = self.represent_exceptions(obj, fieldtype) if not r is None: return r if fieldtype == 'boolean': if obj and not str(obj)[:1].upper() in '0F': return self.smart_adapt(self.TRUE) else: return self.smart_adapt(self.FALSE) if fieldtype == 'id' or fieldtype == 'integer': return str(long(obj)) if field_is_type('decimal'): return str(obj) elif field_is_type('reference'): # reference if fieldtype.find('.')>0: return repr(obj) elif isinstance(obj, (Row, Reference)): return str(obj['id']) return str(long(obj)) elif fieldtype == 'double': return repr(float(obj)) if isinstance(obj, unicode): obj = obj.encode(self.db_codec) if fieldtype == 'blob': obj = base64.b64encode(str(obj)) elif fieldtype == 'date': if isinstance(obj, (datetime.date, datetime.datetime)): obj = obj.isoformat()[:10] else: obj = str(obj) elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat(self.T_SEP)[:19] elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: obj = str(obj) elif fieldtype == 'time': if isinstance(obj, datetime.time): obj = obj.isoformat()[:10] else: obj = str(obj) elif fieldtype == 'json': if not self.native_json: if have_serializers: obj = serializers.json(obj) elif simplejson: obj = simplejson.dumps(items) else: raise RuntimeError("missing simplejson") if not isinstance(obj,bytes): obj = bytes(obj) try: obj.decode(self.db_codec) except: obj = obj.decode('latin1').encode(self.db_codec) return self.adapt(obj) def represent_exceptions(self, obj, fieldtype): return None def lastrowid(self, table): return None def integrity_error_class(self): return type(None) def rowslice(self, rows, minimum=0, maximum=None): """ By default this function does nothing; overload when db does not do slicing. """ return rows def parse_value(self, value, field_type, blob_decode=True): if field_type != 'blob' and isinstance(value, str): try: value = value.decode(self.db._db_codec) except Exception: pass if isinstance(value, unicode): value = value.encode('utf-8') if isinstance(field_type, SQLCustomType): value = field_type.decoder(value) if not isinstance(field_type, str) or value is None: return value elif field_type in ('string', 'text', 'password', 'upload', 'dict'): return value elif field_type.startswith('geo'): return value elif field_type == 'blob' and not blob_decode: return value else: key = REGEX_TYPE.match(field_type).group(0) return self.parsemap[key](value,field_type) def parse_reference(self, value, field_type): referee = field_type[10:].strip() if not '.' in referee: value = Reference(value) value._table, value._record = self.db[referee], None return value def parse_boolean(self, value, field_type): return value == True or str(value)[:1].lower() == 't' def parse_date(self, value, field_type): if isinstance(value, datetime.datetime): return value.date() if not isinstance(value, (datetime.date,datetime.datetime)): (y, m, d) = map(int, str(value)[:10].strip().split('-')) value = datetime.date(y, m, d) return value def parse_time(self, value, field_type): if not isinstance(value, datetime.time): time_items = map(int,str(value)[:8].strip().split(':')[:3]) if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] value = datetime.time(h, mi, s) return value def parse_datetime(self, value, field_type): if not isinstance(value, datetime.datetime): value = str(value) date_part,time_part,timezone = value[:10],value[11:19],value[19:] if '+' in timezone: ms,tz = timezone.split('+') h,m = tz.split(':') dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) elif '-' in timezone: ms,tz = timezone.split('-') h,m = tz.split(':') dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) else: dt = None (y, m, d) = map(int,date_part.split('-')) time_parts = time_part and time_part.split(':')[:3] or (0,0,0) while len(time_parts)<3: time_parts.append(0) time_items = map(int,time_parts) (h, mi, s) = time_items value = datetime.datetime(y, m, d, h, mi, s) if dt: value = value + dt return value def parse_blob(self, value, field_type): return base64.b64decode(str(value)) def parse_decimal(self, value, field_type): decimals = int(field_type[8:-1].split(',')[-1]) if self.dbengine in ('sqlite', 'spatialite'): value = ('%.' + str(decimals) + 'f') % value if not isinstance(value, decimal.Decimal): value = decimal.Decimal(str(value)) return value def parse_list_integers(self, value, field_type): if not isinstance(self, NoSQLAdapter): value = bar_decode_integer(value) return value def parse_list_references(self, value, field_type): if not isinstance(self, NoSQLAdapter): value = bar_decode_integer(value) return [self.parse_reference(r, field_type[5:]) for r in value] def parse_list_strings(self, value, field_type): if not isinstance(self, NoSQLAdapter): value = bar_decode_string(value) return value def parse_id(self, value, field_type): return long(value) def parse_integer(self, value, field_type): return long(value) def parse_double(self, value, field_type): return float(value) def parse_json(self, value, field_type): if not self.native_json: if not isinstance(value, basestring): raise RuntimeError('json data not a string') if isinstance(value, unicode): value = value.encode('utf-8') if have_serializers: value = serializers.loads_json(value) elif simplejson: value = simplejson.loads(value) else: raise RuntimeError("missing simplejson") return value def build_parsemap(self): self.parsemap = { 'id':self.parse_id, 'integer':self.parse_integer, 'bigint':self.parse_integer, 'float':self.parse_double, 'double':self.parse_double, 'reference':self.parse_reference, 'boolean':self.parse_boolean, 'date':self.parse_date, 'time':self.parse_time, 'datetime':self.parse_datetime, 'blob':self.parse_blob, 'decimal':self.parse_decimal, 'json':self.parse_json, 'list:integer':self.parse_list_integers, 'list:reference':self.parse_list_references, 'list:string':self.parse_list_strings, } def parse(self, rows, fields, colnames, blob_decode=True, cacheable = False): self.build_parsemap() db = self.db virtualtables = [] new_rows = [] tmps = [] for colname in colnames: if not REGEX_TABLE_DOT_FIELD.match(colname): tmps.append(None) else: (tablename, fieldname) = colname.split('.') table = db[tablename] field = table[fieldname] ft = field.type tmps.append((tablename,fieldname,table,field,ft)) for (i,row) in enumerate(rows): new_row = Row() for (j,colname) in enumerate(colnames): value = row[j] tmp = tmps[j] if tmp: (tablename,fieldname,table,field,ft) = tmp if tablename in new_row: colset = new_row[tablename] else: colset = new_row[tablename] = Row() if tablename not in virtualtables: virtualtables.append(tablename) value = self.parse_value(value,ft,blob_decode) if field.filter_out: value = field.filter_out(value) colset[fieldname] = value # for backward compatibility if ft=='id' and fieldname!='id' and \ not 'id' in table.fields: colset['id'] = value if ft == 'id' and not cacheable: # temporary hack to deal with # GoogleDatastoreAdapter # references if isinstance(self, GoogleDatastoreAdapter): id = value.key().id_or_name() colset[fieldname] = id colset.gae_item = value else: id = value colset.update_record = RecordUpdater(colset,table,id) colset.delete_record = RecordDeleter(table,id) for rfield in table._referenced_by: referee_link = db._referee_name and \ db._referee_name % dict( table=rfield.tablename,field=rfield.name) if referee_link and not referee_link in colset: colset[referee_link] = LazySet(rfield,id) else: if not '_extra' in new_row: new_row['_extra'] = Row() new_row['_extra'][colname] = \ self.parse_value(value, fields[j].type,blob_decode) new_column_name = \ REGEX_SELECT_AS_PARSER.search(colname) if not new_column_name is None: column_name = new_column_name.groups(0) setattr(new_row,column_name[0],value) new_rows.append(new_row) rowsobj = Rows(db, new_rows, colnames, rawrows=rows) for tablename in virtualtables: ### new style virtual fields table = db[tablename] fields_virtual = [(f,v) for (f,v) in table.iteritems() if isinstance(v,FieldVirtual)] fields_lazy = [(f,v) for (f,v) in table.iteritems() if isinstance(v,FieldMethod)] if fields_virtual or fields_lazy: for row in rowsobj.records: box = row[tablename] for f,v in fields_virtual: box[f] = v.f(row) for f,v in fields_lazy: box[f] = (v.handler or VirtualCommand)(v.f,row) ### old style virtual fields for item in table.virtualfields: try: rowsobj = rowsobj.setvirtualfields(**{tablename:item}) except (KeyError, AttributeError): # to avoid breaking virtualfields when partial select pass return rowsobj def common_filter(self, query, tablenames): tenant_fieldname = self.db._request_tenant for tablename in tablenames: table = self.db[tablename] # deal with user provided filters if table._common_filter != None: query = query & table._common_filter(query) # deal with multi_tenant filters if tenant_fieldname in table: default = table[tenant_fieldname].default if not default is None: newquery = table[tenant_fieldname] == default if query is None: query = newquery else: query = query & newquery return query def CASE(self,query,t,f): def represent(x): types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} if x is None: return 'NULL' elif isinstance(x,Expression): return str(x) else: return self.represent(x,types.get(type(x),'string')) return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ (self.expand(query),represent(t),represent(f))) ################################################################################### # List of all the available adapters; they all extend BaseAdapter. ################################################################################### class SQLiteAdapter(BaseAdapter): drivers = ('sqlite2','sqlite3') can_select_for_update = None # support ourselves with BEGIN TRANSACTION def EXTRACT(self,field,what): return "web2py_extract('%s',%s)" % (what, self.expand(field)) @staticmethod def web2py_extract(lookup, s): table = { 'year': (0, 4), 'month': (5, 7), 'day': (8, 10), 'hour': (11, 13), 'minute': (14, 16), 'second': (17, 19), } try: if lookup != 'epoch': (i, j) = table[lookup] return int(s[i:j]) else: return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) except: return None @staticmethod def web2py_regexp(expression, item): return re.compile(expression).search(item) is not None def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "sqlite" self.uri = uri if do_connect: self.find_driver(adapter_args) self.pool_size = 0 self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() path_encoding = sys.getfilesystemencoding() \ or locale.getdefaultlocale()[1] or 'utf8' if uri.startswith('sqlite:memory'): dbpath = ':memory:' else: dbpath = uri.split('://',1)[1] if dbpath[0] != '/': if PYTHON_VERSION == 2: dbpath = pjoin( self.folder.decode(path_encoding).encode('utf8'), dbpath) else: dbpath = pjoin(self.folder, dbpath) if not 'check_same_thread' in driver_args: driver_args['check_same_thread'] = False if not 'detect_types' in driver_args and do_connect: driver_args['detect_types'] = self.driver.PARSE_DECLTYPES def connector(dbpath=dbpath, driver_args=driver_args): return self.driver.Connection(dbpath, **driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) self.connection.create_function("REGEXP", 2, SQLiteAdapter.web2py_regexp) def _truncate(self, table, mode=''): tablename = table._tablename return ['DELETE FROM %s;' % tablename, "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename] def lastrowid(self, table): return self.cursor.lastrowid def REGEXP(self,first,second): return '(%s REGEXP %s)' % (self.expand(first), self.expand(second,'string')) def select(self, query, fields, attributes): """ Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. Note that the entire database, rather than one record, is locked (it will be locked eventually anyway by the following UPDATE). """ if attributes.get('for_update', False) and not 'cache' in attributes: self.execute('BEGIN IMMEDIATE TRANSACTION;') return super(SQLiteAdapter, self).select(query, fields, attributes) class SpatiaLiteAdapter(SQLiteAdapter): drivers = ('sqlite3','sqlite2') types = copy.copy(BaseAdapter.types) types.update(geometry='GEOMETRY') def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db self.dbengine = "spatialite" self.uri = uri if do_connect: self.find_driver(adapter_args) self.pool_size = 0 self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() self.srid = srid path_encoding = sys.getfilesystemencoding() \ or locale.getdefaultlocale()[1] or 'utf8' if uri.startswith('spatialite:memory'): dbpath = ':memory:' else: dbpath = uri.split('://',1)[1] if dbpath[0] != '/': dbpath = pjoin( self.folder.decode(path_encoding).encode('utf8'), dbpath) if not 'check_same_thread' in driver_args: driver_args['check_same_thread'] = False if not 'detect_types' in driver_args and do_connect: driver_args['detect_types'] = self.driver.PARSE_DECLTYPES def connector(dbpath=dbpath, driver_args=driver_args): return self.driver.Connection(dbpath, **driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): self.connection.enable_load_extension(True) # for Windows, rename libspatialite-2.dll to libspatialite.dll # Linux uses libspatialite.so # Mac OS X uses libspatialite.dylib libspatialite = SPATIALLIBS[platform.system()] self.execute(r'SELECT load_extension("%s");' % libspatialite) self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) self.connection.create_function("REGEXP", 2, SQLiteAdapter.web2py_regexp) # GIS functions def ST_ASGEOJSON(self, first, second): return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), second['precision'], second['options']) def ST_ASTEXT(self, first): return 'AsText(%s)' %(self.expand(first)) def ST_CONTAINS(self, first, second): return 'Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_DISTANCE(self, first, second): return 'Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_EQUALS(self, first, second): return 'Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_INTERSECTS(self, first, second): return 'Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_OVERLAPS(self, first, second): return 'Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_SIMPLIFY(self, first, second): return 'Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double')) def ST_TOUCHES(self, first, second): return 'Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_WITHIN(self, first, second): return 'Within(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith if field_is_type('geo'): srid = 4326 # Spatialite default srid for geometry geotype, parms = fieldtype[:-1].split('(') parms = parms.split(',') if len(parms) >= 2: schema, srid = parms[:2] # if field_is_type('geometry'): value = "ST_GeomFromText('%s',%s)" %(obj, srid) # elif field_is_type('geography'): # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) # else: # raise SyntaxError, 'Invalid field type %s' %fieldtype return value return BaseAdapter.represent(self, obj, fieldtype) class JDBCSQLiteAdapter(SQLiteAdapter): drivers = ('zxJDBC_sqlite',) def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "sqlite" self.uri = uri if do_connect: self.find_driver(adapter_args) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() path_encoding = sys.getfilesystemencoding() \ or locale.getdefaultlocale()[1] or 'utf8' if uri.startswith('sqlite:memory'): dbpath = ':memory:' else: dbpath = uri.split('://',1)[1] if dbpath[0] != '/': dbpath = pjoin( self.folder.decode(path_encoding).encode('utf8'), dbpath) def connector(dbpath=dbpath,driver_args=driver_args): return self.driver.connect( self.driver.getConnection('jdbc:sqlite:'+dbpath), **driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) def execute(self, a): return self.log_execute(a) class MySQLAdapter(BaseAdapter): drivers = ('MySQLdb','pymysql') maxcharlength = 255 commit_on_alter_table = True support_distributed_transaction = True types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'LONGTEXT', 'json': 'LONGTEXT', 'password': 'VARCHAR(%(length)s)', 'blob': 'LONGBLOB', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'DOUBLE', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'DATETIME', 'id': 'INT AUTO_INCREMENT NOT NULL', 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'LONGTEXT', 'list:string': 'LONGTEXT', 'list:reference': 'LONGTEXT', 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', } def varquote(self,name): return varquote_aux(name,'`%s`') def RANDOM(self): return 'RAND()' def SUBSTRING(self,field,parameters): return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) def EPOCH(self, first): return "UNIX_TIMESTAMP(%s)" % self.expand(first) def CONCAT(self, *items): return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items) def REGEXP(self,first,second): return '(%s REGEXP %s)' % (self.expand(first), self.expand(second,'string')) def _drop(self,table,mode): # breaks db integrity but without this mysql does not drop table return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 'SET FOREIGN_KEY_CHECKS=1;'] def _insert_empty(self, table): return 'INSERT INTO %s VALUES (DEFAULT);' % table def distributed_transaction_begin(self,key): self.execute('XA START;') def prepare(self,key): self.execute("XA END;") self.execute("XA PREPARE;") def commit_prepared(self,ley): self.execute("XA COMMIT;") def rollback_prepared(self,key): self.execute("XA ROLLBACK;") REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "mysql" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( "Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') port = int(m.group('port') or '3306') charset = m.group('charset') or 'utf8' driver_args.update(db=db, user=credential_decoder(user), passwd=credential_decoder(password), host=host, port=port, charset=charset) def connector(driver_args=driver_args): return self.driver.connect(**driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): self.execute('SET FOREIGN_KEY_CHECKS=1;') self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") def lastrowid(self,table): self.execute('select last_insert_id();') return int(self.cursor.fetchone()[0]) class PostgreSQLAdapter(BaseAdapter): drivers = ('psycopg2','pg8000') support_distributed_transaction = True types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'TEXT', 'json': 'TEXT', 'password': 'VARCHAR(%(length)s)', 'blob': 'BYTEA', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'FLOAT8', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'SERIAL PRIMARY KEY', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'TEXT', 'list:string': 'TEXT', 'list:reference': 'TEXT', 'geometry': 'GEOMETRY', 'geography': 'GEOGRAPHY', 'big-id': 'BIGSERIAL PRIMARY KEY', 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', } def varquote(self,name): return varquote_aux(name,'"%s"') def adapt(self,obj): if self.driver_name == 'psycopg2': return psycopg2_adapt(obj).getquoted() elif self.driver_name == 'pg8000': return "'%s'" % str(obj).replace("%","%%").replace("'","''") else: return "'%s'" % str(obj).replace("'","''") def sequence_name(self,table): return '%s_id_Seq' % table def RANDOM(self): return 'RANDOM()' def ADD(self, first, second): t = first.type if t in ('text','string','password', 'json', 'upload','blob'): return '(%s || %s)' % (self.expand(first), self.expand(second, t)) else: return '(%s + %s)' % (self.expand(first), self.expand(second, t)) def distributed_transaction_begin(self,key): return def prepare(self,key): self.execute("PREPARE TRANSACTION '%s';" % key) def commit_prepared(self,key): self.execute("COMMIT PREPARED '%s';" % key) def rollback_prepared(self,key): self.execute("ROLLBACK PREPARED '%s';" % key) def create_sequence_and_triggers(self, query, table, **args): # following lines should only be executed if table._sequence_name does not exist # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ # % (table._tablename, table._fieldname, table._sequence_name)) self.execute(query) REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db self.dbengine = "postgres" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.srid = srid self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL") user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') port = m.group('port') or '5432' sslmode = m.group('sslmode') if sslmode: msg = ("dbname='%s' user='%s' host='%s' " "port=%s password='%s' sslmode='%s'") \ % (db, user, host, port, password, sslmode) else: msg = ("dbname='%s' user='%s' host='%s' " "port=%s password='%s'") \ % (db, user, host, port, password) # choose diver according uri if self.driver: self.__version__ = "%s %s" % (self.driver.__name__, self.driver.__version__) else: self.__version__ = None def connector(msg=msg,driver_args=driver_args): return self.driver.connect(msg,**driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): self.connection.set_client_encoding('UTF8') self.execute("SET standard_conforming_strings=on;") self.try_json() def lastrowid(self,table): self.execute("select currval('%s')" % table._sequence_name) return int(self.cursor.fetchone()[0]) def try_json(self): # check JSON data type support # (to be added to after_connection) if self.driver_name == "pg8000": supports_json = self.connection.server_version >= "9.2.0" elif (self.driver_name == "psycopg2") and \ (self.driver.__version__ >= "2.0.12"): supports_json = self.connection.server_version >= 90200 elif self.driver_name == "zxJDBC": supports_json = self.connection.dbversion >= "9.2.0" else: supports_json = None if supports_json: self.types["json"] = "JSON" else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)") def LIKE(self,first,second): args = (self.expand(first), self.expand(second,'string')) if not first.type in ('string', 'text', 'json'): return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) else: return '(%s LIKE %s)' % args def ILIKE(self,first,second): args = (self.expand(first), self.expand(second,'string')) if not first.type in ('string', 'text', 'json'): return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) else: return '(%s ILIKE %s)' % args def REGEXP(self,first,second): return '(%s ~ %s)' % (self.expand(first), self.expand(second,'string')) def STARTSWITH(self,first,second): return '(%s ILIKE %s)' % (self.expand(first), self.expand(second+'%','string')) def ENDSWITH(self,first,second): return '(%s ILIKE %s)' % (self.expand(first), self.expand('%'+second,'string')) # GIS functions def ST_ASGEOJSON(self, first, second): """ http://postgis.org/docs/ST_AsGeoJSON.html """ return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], self.expand(first), second['precision'], second['options']) def ST_ASTEXT(self, first): """ http://postgis.org/docs/ST_AsText.html """ return 'ST_AsText(%s)' %(self.expand(first)) def ST_X(self, first): """ http://postgis.org/docs/ST_X.html """ return 'ST_X(%s)' %(self.expand(first)) def ST_Y(self, first): """ http://postgis.org/docs/ST_Y.html """ return 'ST_Y(%s)' %(self.expand(first)) def ST_CONTAINS(self, first, second): """ http://postgis.org/docs/ST_Contains.html """ return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_DISTANCE(self, first, second): """ http://postgis.org/docs/ST_Distance.html """ return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_EQUALS(self, first, second): """ http://postgis.org/docs/ST_Equals.html """ return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_INTERSECTS(self, first, second): """ http://postgis.org/docs/ST_Intersects.html """ return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_OVERLAPS(self, first, second): """ http://postgis.org/docs/ST_Overlaps.html """ return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_SIMPLIFY(self, first, second): """ http://postgis.org/docs/ST_Simplify.html """ return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double')) def ST_TOUCHES(self, first, second): """ http://postgis.org/docs/ST_Touches.html """ return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def ST_WITHIN(self, first, second): """ http://postgis.org/docs/ST_Within.html """ return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type)) def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith if field_is_type('geo'): srid = 4326 # postGIS default srid for geometry geotype, parms = fieldtype[:-1].split('(') parms = parms.split(',') if len(parms) >= 2: schema, srid = parms[:2] if field_is_type('geometry'): value = "ST_GeomFromText('%s',%s)" %(obj, srid) elif field_is_type('geography'): value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) # else: # raise SyntaxError('Invalid field type %s' %fieldtype) return value return BaseAdapter.represent(self, obj, fieldtype) class NewPostgreSQLAdapter(PostgreSQLAdapter): drivers = ('psycopg2','pg8000') types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'TEXT', 'json': 'TEXT', 'password': 'VARCHAR(%(length)s)', 'blob': 'BYTEA', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'FLOAT8', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'SERIAL PRIMARY KEY', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'BIGINT[]', 'list:string': 'TEXT[]', 'list:reference': 'BIGINT[]', 'geometry': 'GEOMETRY', 'geography': 'GEOGRAPHY', 'big-id': 'BIGSERIAL PRIMARY KEY', 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', } def parse_list_integers(self, value, field_type): return value def parse_list_references(self, value, field_type): return [self.parse_reference(r, field_type[5:]) for r in value] def parse_list_strings(self, value, field_type): return value def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith if field_is_type('list:'): if not obj: obj = [] elif not isinstance(obj, (list, tuple)): obj = [obj] if field_is_type('list:string'): obj = map(str,obj) else: obj = map(int,obj) return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) return BaseAdapter.represent(self, obj, fieldtype) class JDBCPostgreSQLAdapter(PostgreSQLAdapter): drivers = ('zxJDBC',) REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None ): self.db = db self.dbengine = "postgres" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL") user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') port = m.group('port') or '5432' msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) def connector(msg=msg,driver_args=driver_args): return self.driver.connect(*msg,**driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): self.connection.set_client_encoding('UTF8') self.execute('BEGIN;') self.execute("SET CLIENT_ENCODING TO 'UNICODE';") self.try_json() class OracleAdapter(BaseAdapter): drivers = ('cx_Oracle',) commit_on_alter_table = False types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR2(%(length)s)', 'text': 'CLOB', 'json': 'CLOB', 'password': 'VARCHAR2(%(length)s)', 'blob': 'CLOB', 'upload': 'VARCHAR2(%(length)s)', 'integer': 'INT', 'bigint': 'NUMBER', 'float': 'FLOAT', 'double': 'BINARY_DOUBLE', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'CHAR(8)', 'datetime': 'DATE', 'id': 'NUMBER PRIMARY KEY', 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', 'big-id': 'NUMBER PRIMARY KEY', 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', } def sequence_name(self,tablename): return '%s_sequence' % tablename def trigger_name(self,tablename): return '%s_trigger' % tablename def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'dbms_random.value' def NOT_NULL(self,default,field_type): return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) def _drop(self,table,mode): sequence_name = table._sequence_name return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name] def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby if len(sql_w) > 1: sql_w_row = sql_w + ' AND w_row > %i' % lmin else: sql_w_row = 'WHERE w_row > %i' % lmin return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def constraint_name(self, tablename, fieldname): constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) if len(constraint_name)>30: constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) return constraint_name def represent_exceptions(self, obj, fieldtype): if fieldtype == 'blob': obj = base64.b64encode(str(obj)) return ":CLOB('%s')" % obj elif fieldtype == 'date': if isinstance(obj, (datetime.date, datetime.datetime)): obj = obj.isoformat()[:10] else: obj = str(obj) return "to_date('%s','yyyy-mm-dd')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T',' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: obj = str(obj) return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj return None def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "oracle" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] if not 'threaded' in driver_args: driver_args['threaded']=True def connector(uri=ruri,driver_args=driver_args): return self.driver.connect(uri,**driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") def execute(self, command, args=None): args = args or [] i = 1 while True: m = self.oracle_fix.match(command) if not m: break command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] args.append(m.group('clob')[6:-2].replace("''", "'")) i += 1 if command[-1:]==';': command = command[:-1] return self.log_execute(command, args) def create_sequence_and_triggers(self, query, table, **args): tablename = table._tablename sequence_name = table._sequence_name trigger_name = table._trigger_name self.execute(query) self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) self.execute(""" CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW DECLARE curr_val NUMBER; diff_val NUMBER; PRAGMA autonomous_transaction; BEGIN IF :NEW.id IS NOT NULL THEN EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; diff_val := :NEW.id - curr_val - 1; IF diff_val != 0 THEN EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; END IF; END IF; SELECT %(sequence_name)s.nextval INTO :NEW.id FROM DUAL; END; """ % dict(trigger_name=trigger_name, tablename=tablename, sequence_name=sequence_name)) def lastrowid(self,table): sequence_name = table._sequence_name self.execute('SELECT %s.currval FROM dual;' % sequence_name) return long(self.cursor.fetchone()[0]) #def parse_value(self, value, field_type, blob_decode=True): # if blob_decode and isinstance(value, cx_Oracle.LOB): # try: # value = value.read() # except self.driver.ProgrammingError: # # After a subsequent fetch the LOB value is not valid anymore # pass # return BaseAdapter.parse_value(self, value, field_type, blob_decode) def _fetchall(self): if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ for c in r]) for r in self.cursor] else: return self.cursor.fetchall() class MSSQLAdapter(BaseAdapter): drivers = ('pyodbc',) T_SEP = 'T' types = { 'boolean': 'BIT', 'string': 'VARCHAR(%(length)s)', 'text': 'TEXT', 'json': 'TEXT', 'password': 'VARCHAR(%(length)s)', 'blob': 'IMAGE', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'FLOAT', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATETIME', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'INT IDENTITY PRIMARY KEY', 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'TEXT', 'list:string': 'TEXT', 'list:reference': 'TEXT', 'geometry': 'geometry', 'geography': 'geography', 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', } def concat_add(self,tablename): return '; ALTER TABLE %s ADD ' % tablename def varquote(self,name): return varquote_aux(name,'[%s]') def EXTRACT(self,field,what): return "DATEPART(%s,%s)" % (what, self.expand(field)) def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'NEWID()' def ALLOW_NULL(self): return ' NULL' def SUBSTRING(self,field,parameters): return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) def PRIMARY_KEY(self,key): return 'PRIMARY KEY CLUSTERED (%s)' % key def AGGREGATE(self, first, what): if what == 'LENGTH': what = 'LEN' return "%s(%s)" % (what, self.expand(first)) def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_s += ' TOP %i' % lmax if 'GROUP BY' in sql_o: orderfound = sql_o.find('ORDER BY ') if orderfound >= 0: sql_o = sql_o[:orderfound] return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) TRUE = 1 FALSE = 0 REGEX_DSN = re.compile('^(?P<dsn>.+)$') REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db self.dbengine = "mssql" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.srid = srid self.find_or_make_work_folder() # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 ruri = uri.split('://',1)[1] if '@' not in ruri: try: m = self.REGEX_DSN.match(ruri) if not m: raise SyntaxError( 'Parsing uri string(%s) has no result' % self.uri) dsn = m.group('dsn') if not dsn: raise SyntaxError('DSN required') except SyntaxError: e = sys.exc_info()[1] LOGGER.error('NdGpatch error') raise e # was cnxn = 'DSN=%s' % dsn cnxn = dsn else: m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( "Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') port = m.group('port') or '1433' # Parse the optional url name-value arg pairs after the '?' # (in the form of arg1=value1&arg2=value2&...) # Default values (drivers like FreeTDS insist on uppercase parameter keys) argsdict = { 'DRIVER':'{SQL Server}' } urlargs = m.group('urlargs') or '' for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ % (host, port, db, user, password, urlargs) def connector(cnxn=cnxn,driver_args=driver_args): return self.driver.connect(cnxn,**driver_args) self.connector = connector if do_connect: self.reconnect() def lastrowid(self,table): #self.execute('SELECT @@IDENTITY;') self.execute('SELECT SCOPE_IDENTITY();') return long(self.cursor.fetchone()[0]) def integrity_error_class(self): return pyodbc.IntegrityError def rowslice(self,rows,minimum=0,maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] def EPOCH(self, first): return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first) def CONCAT(self, *items): return '(%s)' % ' + '.join(self.expand(x,'string') for x in items) # GIS Spatial Extensions # No STAsGeoJSON in MSSQL def ST_ASTEXT(self, first): return '%s.STAsText()' %(self.expand(first)) def ST_CONTAINS(self, first, second): return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type)) def ST_DISTANCE(self, first, second): return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type)) def ST_EQUALS(self, first, second): return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type)) def ST_INTERSECTS(self, first, second): return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type)) def ST_OVERLAPS(self, first, second): return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type)) # no STSimplify in MSSQL def ST_TOUCHES(self, first, second): return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type)) def ST_WITHIN(self, first, second): return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type)) def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith if field_is_type('geometry'): srid = 0 # MS SQL default srid for geometry geotype, parms = fieldtype[:-1].split('(') if parms: srid = parms return "geometry::STGeomFromText('%s',%s)" %(obj, srid) elif fieldtype == 'geography': srid = 4326 # MS SQL default srid for geography geotype, parms = fieldtype[:-1].split('(') if parms: srid = parms return "geography::STGeomFromText('%s',%s)" %(obj, srid) # else: # raise SyntaxError('Invalid field type %s' %fieldtype) return "geometry::STGeomFromText('%s',%s)" %(obj, srid) return BaseAdapter.represent(self, obj, fieldtype) class MSSQL3Adapter(MSSQLAdapter): """ experimental support for pagination in MSSQL""" def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby if lmin == 0: sql_s += ' TOP %i' % lmax return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) lmin += 1 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] sql_f_inner = [f for f in sql_f.split(',')] sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] sql_f_iproxy = ', '.join(sql_f_iproxy) sql_f_oproxy = ', '.join(sql_f_outer) return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o) def rowslice(self,rows,minimum=0,maximum=None): return rows class MSSQL2Adapter(MSSQLAdapter): drivers = ('pyodbc',) types = { 'boolean': 'CHAR(1)', 'string': 'NVARCHAR(%(length)s)', 'text': 'NTEXT', 'json': 'NTEXT', 'password': 'NVARCHAR(%(length)s)', 'blob': 'IMAGE', 'upload': 'NVARCHAR(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'FLOAT', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATETIME', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'INT IDENTITY PRIMARY KEY', 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'NTEXT', 'list:string': 'NTEXT', 'list:reference': 'NTEXT', 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', } def represent(self, obj, fieldtype): value = BaseAdapter.represent(self, obj, fieldtype) if fieldtype in ('string','text', 'json') and value[:1]=="'": value = 'N'+value return value def execute(self,a): return self.log_execute(a.decode('utf8')) class VerticaAdapter(MSSQLAdapter): drivers = ('pyodbc',) T_SEP = ' ' types = { 'boolean': 'BOOLEAN', 'string': 'VARCHAR(%(length)s)', 'text': 'BYTEA', 'json': 'VARCHAR(%(length)s)', 'password': 'VARCHAR(%(length)s)', 'blob': 'BYTEA', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'DOUBLE PRECISION', 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'DATETIME', 'id': 'IDENTITY', 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'BYTEA', 'list:string': 'BYTEA', 'list:reference': 'BYTEA', 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', } def EXTRACT(self, first, what): return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first)) def _truncate(self, table, mode=''): tablename = table._tablename return ['TRUNCATE %s %s;' % (tablename, mode or '')] def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) return 'SELECT %s %s FROM %s%s%s;' % \ (sql_s, sql_f, sql_t, sql_w, sql_o) def lastrowid(self,table): self.execute('SELECT LAST_INSERT_ID();') return long(self.cursor.fetchone()[0]) def execute(self, a): return self.log_execute(a) class SybaseAdapter(MSSQLAdapter): drivers = ('Sybase',) types = { 'boolean': 'BIT', 'string': 'CHAR VARYING(%(length)s)', 'text': 'TEXT', 'json': 'TEXT', 'password': 'CHAR VARYING(%(length)s)', 'blob': 'IMAGE', 'upload': 'CHAR VARYING(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'FLOAT', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATETIME', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'INT IDENTITY PRIMARY KEY', 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'TEXT', 'list:string': 'TEXT', 'list:reference': 'TEXT', 'geometry': 'geometry', 'geography': 'geography', 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', } def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db self.dbengine = "sybase" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.srid = srid self.find_or_make_work_folder() # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 ruri = uri.split('://',1)[1] if '@' not in ruri: try: m = self.REGEX_DSN.match(ruri) if not m: raise SyntaxError( 'Parsing uri string(%s) has no result' % self.uri) dsn = m.group('dsn') if not dsn: raise SyntaxError('DSN required') except SyntaxError: e = sys.exc_info()[1] LOGGER.error('NdGpatch error') raise e else: m = self.REGEX_URI.match(uri) if not m: raise SyntaxError( "Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') port = m.group('port') or '1433' dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) driver_args.update(user = credential_decoder(user), password = credential_decoder(password)) def connector(dsn=dsn,driver_args=driver_args): return self.driver.connect(dsn,**driver_args) self.connector = connector if do_connect: self.reconnect() def integrity_error_class(self): return RuntimeError # FIX THIS class FireBirdAdapter(BaseAdapter): drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') commit_on_alter_table = False support_distributed_transaction = True types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'BLOB SUB_TYPE 1', 'json': 'BLOB SUB_TYPE 1', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB SUB_TYPE 0', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'DOUBLE PRECISION', 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INTEGER PRIMARY KEY', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'BLOB SUB_TYPE 1', 'list:string': 'BLOB SUB_TYPE 1', 'list:reference': 'BLOB SUB_TYPE 1', 'big-id': 'BIGINT PRIMARY KEY', 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', } def sequence_name(self,tablename): return 'genid_%s' % tablename def trigger_name(self,tablename): return 'trg_id_%s' % tablename def RANDOM(self): return 'RAND()' def EPOCH(self, first): return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first) def NOT_NULL(self,default,field_type): return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) def SUBSTRING(self,field,parameters): return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1]) def LENGTH(self, first): return "CHAR_LENGTH(%s)" % self.expand(first) def CONTAINS(self,first,second,case_sensitive=False): if first.type.startswith('list:'): second = Expression(None,self.CONCAT('|',Expression( None,self.REPLACE(second,('|','||'))),'|')) return '(%s CONTAINING %s)' % (self.expand(first), self.expand(second, 'string')) def _drop(self,table,mode): sequence_name = table._sequence_name return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name] def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def _truncate(self,table,mode = ''): return ['DELETE FROM %s;' % table._tablename, 'SET GENERATOR %s TO 0;' % table._sequence_name] REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "firebird" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') port = int(m.group('port') or 3050) db = m.group('db') if not db: raise SyntaxError('Database name required') charset = m.group('charset') or 'UTF8' driver_args.update(dsn='%s/%s:%s' % (host,port,db), user = credential_decoder(user), password = credential_decoder(password), charset = charset) def connector(driver_args=driver_args): return self.driver.connect(**driver_args) self.connector = connector if do_connect: self.reconnect() def create_sequence_and_triggers(self, query, table, **args): tablename = table._tablename sequence_name = table._sequence_name trigger_name = table._trigger_name self.execute(query) self.execute('create generator %s;' % sequence_name) self.execute('set generator %s to 0;' % sequence_name) self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name)) def lastrowid(self,table): sequence_name = table._sequence_name self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) return long(self.cursor.fetchone()[0]) class FireBirdEmbeddedAdapter(FireBirdAdapter): drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "firebird" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( "Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' pathdb = m.group('path') if not pathdb: raise SyntaxError('Path required') charset = m.group('charset') if not charset: charset = 'UTF8' host = '' driver_args.update(host=host, database=pathdb, user=credential_decoder(user), password=credential_decoder(password), charset=charset) def connector(driver_args=driver_args): return self.driver.connect(**driver_args) self.connector = connector if do_connect: self.reconnect() class InformixAdapter(BaseAdapter): drivers = ('informixdb',) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'BLOB SUB_TYPE 1', 'json': 'BLOB SUB_TYPE 1', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB SUB_TYPE 0', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'DOUBLE PRECISION', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'SERIAL', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'BLOB SUB_TYPE 1', 'list:string': 'BLOB SUB_TYPE 1', 'list:reference': 'BLOB SUB_TYPE 1', 'big-id': 'BIGSERIAL', 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', } def RANDOM(self): return 'Random()' def NOT_NULL(self,default,field_type): return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby fetch_amt = lmax - lmin dbms_version = int(self.connection.dbms_version.split('.')[0]) if lmin and (dbms_version >= 10): # Requires Informix 10.0+ sql_s += ' SKIP %d' % (lmin, ) if fetch_amt and (dbms_version >= 9): # Requires Informix 9.0+ sql_s += ' FIRST %d' % (fetch_amt, ) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def represent_exceptions(self, obj, fieldtype): if fieldtype == 'date': if isinstance(obj, (datetime.date, datetime.datetime)): obj = obj.isoformat()[:10] else: obj = str(obj) return "to_date('%s','%%Y-%%m-%%d')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T',' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: obj = str(obj) return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj return None REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "informix" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( "Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') user = credential_decoder(user) password = credential_decoder(password) dsn = '%s@%s' % (db,host) driver_args.update(user=user,password=password,autocommit=True) def connector(dsn=dsn,driver_args=driver_args): return self.driver.connect(dsn,**driver_args) self.connector = connector if do_connect: self.reconnect() def execute(self,command): if command[-1:]==';': command = command[:-1] return self.log_execute(command) def lastrowid(self,table): return self.cursor.sqlerrd[1] def integrity_error_class(self): return informixdb.IntegrityError class InformixSEAdapter(InformixAdapter): """ work in progress """ def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): return 'SELECT %s %s FROM %s%s%s;' % \ (sql_s, sql_f, sql_t, sql_w, sql_o) def rowslice(self,rows,minimum=0,maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] class DB2Adapter(BaseAdapter): drivers = ('pyodbc',) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'CLOB', 'json': 'CLOB', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'REAL', 'double': 'DOUBLE', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', } def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'RAND()' def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def represent_exceptions(self, obj, fieldtype): if fieldtype == 'blob': obj = base64.b64encode(str(obj)) return "BLOB('%s')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T','-').replace(':','.') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+'-00.00.00' return "'%s'" % obj return None def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "db2" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://', 1)[1] def connector(cnxn=ruri,driver_args=driver_args): return self.driver.connect(cnxn,**driver_args) self.connector = connector if do_connect: self.reconnect() def execute(self,command): if command[-1:]==';': command = command[:-1] return self.log_execute(command) def lastrowid(self,table): self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) return long(self.cursor.fetchone()[0]) def rowslice(self,rows,minimum=0,maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] class TeradataAdapter(BaseAdapter): drivers = ('pyodbc',) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'CLOB', 'json': 'CLOB', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'REAL', 'double': 'DOUBLE', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', # Modified Constraint syntax for Teradata. # Teradata does not support ON DELETE. 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 'reference': 'INT', 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 'big-reference': 'BIGINT', 'reference FK': ' REFERENCES %(foreign_key)s', 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', } def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "teradata" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://', 1)[1] def connector(cnxn=ruri,driver_args=driver_args): return self.driver.connect(cnxn,**driver_args) self.connector = connector if do_connect: self.reconnect() def LEFT_JOIN(self): return 'LEFT OUTER JOIN' # Similar to MSSQL, Teradata can't specify a range (for Pageby) def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby sql_s += ' TOP %i' % lmax return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def _truncate(self, table, mode=''): tablename = table._tablename return ['DELETE FROM %s ALL;' % (tablename)] INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name # (ANSI-SQL wants this form of name # to be a delimited identifier) class IngresAdapter(BaseAdapter): drivers = ('pyodbc',) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'CLOB', 'json': 'CLOB', 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 'integer': 'INTEGER4', # or int8... 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'FLOAT8', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'ANSIDATE', 'time': 'TIME WITHOUT TIME ZONE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'CLOB', 'list:string': 'CLOB', 'list:reference': 'CLOB', 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO } def LEFT_JOIN(self): return 'LEFT OUTER JOIN' def RANDOM(self): return 'RANDOM()' def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby fetch_amt = lmax - lmin if fetch_amt: sql_s += ' FIRST %d ' % (fetch_amt, ) if lmin: # Requires Ingres 9.2+ sql_o += ' OFFSET %d' % (lmin, ) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "ingres" self._driver = pyodbc self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() connstr = uri.split(':', 1)[1] # Simple URI processing connstr = connstr.lstrip() while connstr.startswith('/'): connstr = connstr[1:] if '=' in connstr: # Assume we have a regular ODBC connection string and just use it ruri = connstr else: # Assume only (local) dbname is passed in with OS auth database_name = connstr default_driver_name = 'Ingres' vnode = '(local)' servertype = 'ingres' ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) def connector(cnxn=ruri,driver_args=driver_args): return self.driver.connect(cnxn,**driver_args) self.connector = connector # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns if do_connect: self.reconnect() def create_sequence_and_triggers(self, query, table, **args): # post create table auto inc code (if needed) # modify table to btree for performance.... # Older Ingres releases could use rule/trigger like Oracle above. if hasattr(table,'_primarykey'): modify_tbl_sql = 'modify %s to btree unique on %s' % \ (table._tablename, ', '.join(["'%s'" % x for x in table.primarykey])) self.execute(modify_tbl_sql) else: tmp_seqname='%s_iisq' % table._tablename query=query.replace(INGRES_SEQNAME, tmp_seqname) self.execute('create sequence %s' % tmp_seqname) self.execute(query) self.execute('modify %s to btree unique on %s' % (table._tablename, 'id')) def lastrowid(self,table): tmp_seqname='%s_iisq' % table self.execute('select current value for %s' % tmp_seqname) return long(self.cursor.fetchone()[0]) # don't really need int type cast here... def integrity_error_class(self): return self._driver.IntegrityError class IngresUnicodeAdapter(IngresAdapter): drivers = ('pyodbc',) types = { 'boolean': 'CHAR(1)', 'string': 'NVARCHAR(%(length)s)', 'text': 'NCLOB', 'json': 'NCLOB', 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 'blob': 'BLOB', 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 'integer': 'INTEGER4', # or int8... 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'FLOAT8', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'ANSIDATE', 'time': 'TIME WITHOUT TIME ZONE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'NCLOB', 'list:string': 'NCLOB', 'list:reference': 'NCLOB', 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO } class SAPDBAdapter(BaseAdapter): drivers = ('sapdb',) support_distributed_transaction = False types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'LONG', 'json': 'LONG', 'password': 'VARCHAR(%(length)s)', 'blob': 'LONG', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INT', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'DOUBLE PRECISION', 'decimal': 'FIXED(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'TIME', 'datetime': 'TIMESTAMP', 'id': 'INT PRIMARY KEY', 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'LONG', 'list:string': 'LONG', 'list:reference': 'LONG', 'big-id': 'BIGINT PRIMARY KEY', 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', } def sequence_name(self,table): return '%s_id_Seq' % table def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby if len(sql_w) > 1: sql_w_row = sql_w + ' AND w_row > %i' % lmin else: sql_w_row = 'WHERE w_row > %i' % lmin return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def create_sequence_and_triggers(self, query, table, **args): # following lines should only be executed if table._sequence_name does not exist self.execute('CREATE SEQUENCE %s;' % table._sequence_name) self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ % (table._tablename, table._id.name, table._sequence_name)) self.execute(query) REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "sapdb" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL") user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') def connector(user=user, password=password, database=db, host=host, driver_args=driver_args): return self.driver.Connection(user, password, database, host, **driver_args) self.connector = connector if do_connect: self.reconnect() def lastrowid(self,table): self.execute("select %s.NEXTVAL from dual" % table._sequence_name) return long(self.cursor.fetchone()[0]) class CubridAdapter(MySQLAdapter): drivers = ('cubriddb',) REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "cubrid" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( "Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') port = int(m.group('port') or '30000') charset = m.group('charset') or 'utf8' user = credential_decoder(user) passwd = credential_decoder(password) def connector(host=host,port=port,db=db, user=user,passwd=password,driver_args=driver_args): return self.driver.connect(host,port,db,user,passwd,**driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): self.execute('SET FOREIGN_KEY_CHECKS=1;') self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") ######## GAE MySQL ########## class DatabaseStoredFile: web2py_filesystem = False def escape(self,obj): return self.db._adapter.escape(obj) def __init__(self,db,filename,mode): if not db._adapter.dbengine in ('mysql', 'postgres'): raise RuntimeError("only MySQL/Postgres can store metadata .table files in database for now") self.db = db self.filename = filename self.mode = mode if not self.web2py_filesystem: if db._adapter.dbengine == 'mysql': sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" elif db._adapter.dbengine == 'postgres': sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" self.db.executesql(sql) DatabaseStoredFile.web2py_filesystem = True self.p=0 self.data = '' if mode in ('r','rw','a'): query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ % filename rows = self.db.executesql(query) if rows: self.data = rows[0][0] elif exists(filename): datafile = open(filename, 'r') try: self.data = datafile.read() finally: datafile.close() elif mode in ('r','rw'): raise RuntimeError("File %s does not exist" % filename) def read(self, bytes): data = self.data[self.p:self.p+bytes] self.p += len(data) return data def readline(self): i = self.data.find('\n',self.p)+1 if i>0: data, self.p = self.data[self.p:i], i else: data, self.p = self.data[self.p:], len(self.data) return data def write(self,data): self.data += data def close_connection(self): if self.db is not None: self.db.executesql( "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ % (self.filename, self.data.replace("'","''")) self.db.executesql(query) self.db.commit() self.db = None def close(self): self.close_connection() @staticmethod def exists(db, filename): if exists(filename): return True query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename if db.executesql(query): return True return False class UseDatabaseStoredFile: def file_exists(self, filename): return DatabaseStoredFile.exists(self.db,filename) def file_open(self, filename, mode='rb', lock=True): return DatabaseStoredFile(self.db,filename,mode) def file_close(self, fileobj): fileobj.close_connection() def file_delete(self,filename): query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename self.db.executesql(query) self.db.commit() class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter): uploads_in_blob = True REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') def __init__(self, db, uri='google:sql://realm:domain/database', pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "mysql" self.uri = uri self.pool_size = pool_size self.db_codec = db_codec self._after_connection = after_connection self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( os.sep+'applications'+os.sep,1)[1]) ruri = uri.split("://")[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) instance = credential_decoder(m.group('instance')) self.dbstring = db = credential_decoder(m.group('db')) driver_args['instance'] = instance if not 'charset' in driver_args: driver_args['charset'] = 'utf8' self.createdb = createdb = adapter_args.get('createdb',True) if not createdb: driver_args['database'] = db def connector(driver_args=driver_args): return rdbms.connect(**driver_args) self.connector = connector if do_connect: self.reconnect() def after_connection(self): if self.createdb: # self.execute('DROP DATABASE %s' % self.dbstring) self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) self.execute('USE %s' % self.dbstring) self.execute("SET FOREIGN_KEY_CHECKS=1;") self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") def execute(self, command, *a, **b): return self.log_execute(command.decode('utf8'), *a, **b) class NoSQLAdapter(BaseAdapter): can_select_for_update = False @staticmethod def to_unicode(obj): if isinstance(obj, str): return obj.decode('utf8') elif not isinstance(obj, unicode): return unicode(obj) return obj def id_query(self, table): return table._id > 0 def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith if isinstance(obj, CALLABLETYPES): obj = obj() if isinstance(fieldtype, SQLCustomType): return fieldtype.encoder(obj) if isinstance(obj, (Expression, Field)): raise SyntaxError("non supported on GAE") if self.dbengine == 'google:datastore': if isinstance(fieldtype, gae.Property): return obj is_string = isinstance(fieldtype,str) is_list = is_string and field_is_type('list:') if is_list: if not obj: obj = [] if not isinstance(obj, (list, tuple)): obj = [obj] if obj == '' and not \ (is_string and fieldtype[:2] in ['st','te', 'pa','up']): return None if not obj is None: if isinstance(obj, list) and not is_list: obj = [self.represent(o, fieldtype) for o in obj] elif fieldtype in ('integer','bigint','id'): obj = long(obj) elif fieldtype == 'double': obj = float(obj) elif is_string and field_is_type('reference'): if isinstance(obj, (Row, Reference)): obj = obj['id'] obj = long(obj) elif fieldtype == 'boolean': if obj and not str(obj)[0].upper() in '0F': obj = True else: obj = False elif fieldtype == 'date': if not isinstance(obj, datetime.date): (y, m, d) = map(int,str(obj).strip().split('-')) obj = datetime.date(y, m, d) elif isinstance(obj,datetime.datetime): (y, m, d) = (obj.year, obj.month, obj.day) obj = datetime.date(y, m, d) elif fieldtype == 'time': if not isinstance(obj, datetime.time): time_items = map(int,str(obj).strip().split(':')[:3]) if len(time_items) == 3: (h, mi, s) = time_items else: (h, mi, s) = time_items + [0] obj = datetime.time(h, mi, s) elif fieldtype == 'datetime': if not isinstance(obj, datetime.datetime): (y, m, d) = map(int,str(obj)[:10].strip().split('-')) time_items = map(int,str(obj)[11:].strip().split(':')[:3]) while len(time_items)<3: time_items.append(0) (h, mi, s) = time_items obj = datetime.datetime(y, m, d, h, mi, s) elif fieldtype == 'blob': pass elif fieldtype == 'json': if isinstance(obj, basestring): obj = self.to_unicode(obj) if have_serializers: obj = serializers.loads_json(obj) elif simplejson: obj = simplejson.loads(obj) else: raise RuntimeError("missing simplejson") elif is_string and field_is_type('list:string'): return map(self.to_unicode,obj) elif is_list: return map(int,obj) else: obj = self.to_unicode(obj) return obj def _insert(self,table,fields): return 'insert %s in %s' % (fields, table) def _count(self,query,distinct=None): return 'count %s' % repr(query) def _select(self,query,fields,attributes): return 'select %s where %s' % (repr(fields), repr(query)) def _delete(self,tablename, query): return 'delete %s where %s' % (repr(tablename),repr(query)) def _update(self,tablename,query,fields): return 'update %s (%s) where %s' % (repr(tablename), repr(fields),repr(query)) def commit(self): """ remember: no transactions on many NoSQL """ pass def rollback(self): """ remember: no transactions on many NoSQL """ pass def close_connection(self): """ remember: no transactions on many NoSQL """ pass # these functions should never be called! def OR(self,first,second): raise SyntaxError("Not supported") def AND(self,first,second): raise SyntaxError("Not supported") def AS(self,first,second): raise SyntaxError("Not supported") def ON(self,first,second): raise SyntaxError("Not supported") def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported") def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported") def ADD(self,first,second): raise SyntaxError("Not supported") def SUB(self,first,second): raise SyntaxError("Not supported") def MUL(self,first,second): raise SyntaxError("Not supported") def DIV(self,first,second): raise SyntaxError("Not supported") def LOWER(self,first): raise SyntaxError("Not supported") def UPPER(self,first): raise SyntaxError("Not supported") def EXTRACT(self,first,what): raise SyntaxError("Not supported") def LENGTH(self, first): raise SyntaxError("Not supported") def AGGREGATE(self,first,what): raise SyntaxError("Not supported") def LEFT_JOIN(self): raise SyntaxError("Not supported") def RANDOM(self): raise SyntaxError("Not supported") def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported") def PRIMARY_KEY(self,key): raise SyntaxError("Not supported") def ILIKE(self,first,second): raise SyntaxError("Not supported") def drop(self,table,mode): raise SyntaxError("Not supported") def alias(self,table,alias): raise SyntaxError("Not supported") def migrate_table(self,*a,**b): raise SyntaxError("Not supported") def distributed_transaction_begin(self,key): raise SyntaxError("Not supported") def prepare(self,key): raise SyntaxError("Not supported") def commit_prepared(self,key): raise SyntaxError("Not supported") def rollback_prepared(self,key): raise SyntaxError("Not supported") def concat_add(self,table): raise SyntaxError("Not supported") def constraint_name(self, table, fieldname): raise SyntaxError("Not supported") def create_sequence_and_triggers(self, query, table, **args): pass def log_execute(self,*a,**b): raise SyntaxError("Not supported") def execute(self,*a,**b): raise SyntaxError("Not supported") def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported") def lastrowid(self,table): raise SyntaxError("Not supported") def integrity_error_class(self): raise SyntaxError("Not supported") def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported") class GAEF(object): def __init__(self,name,op,value,apply): self.name=name=='id' and '__key__' or name self.op=op self.value=value self.apply=apply def __repr__(self): return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value)) class GoogleDatastoreAdapter(NoSQLAdapter): uploads_in_blob = True types = {} def file_exists(self, filename): pass def file_open(self, filename, mode='rb', lock=True): pass def file_close(self, fileobj): pass REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.types.update({ 'boolean': gae.BooleanProperty, 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 'text': gae.TextProperty, 'json': gae.TextProperty, 'password': gae.StringProperty, 'blob': gae.BlobProperty, 'upload': gae.StringProperty, 'integer': gae.IntegerProperty, 'bigint': gae.IntegerProperty, 'float': gae.FloatProperty, 'double': gae.FloatProperty, 'decimal': GAEDecimalProperty, 'date': gae.DateProperty, 'time': gae.TimeProperty, 'datetime': gae.DateTimeProperty, 'id': None, 'reference': gae.IntegerProperty, 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), }) self.db = db self.uri = uri self.dbengine = 'google:datastore' self.folder = folder db['_lastsql'] = '' self.db_codec = 'UTF-8' self._after_connection = after_connection self.pool_size = 0 match = self.REGEX_NAMESPACE.match(uri) if match: namespace_manager.set_namespace(match.group('namespace')) def parse_id(self, value, field_type): return value def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None): myfields = {} for field in table: if isinstance(polymodel,Table) and field.name in polymodel.fields(): continue attr = {} if isinstance(field.custom_qualifier, dict): #this is custom properties to add to the GAE field declartion attr = field.custom_qualifier field_type = field.type if isinstance(field_type, SQLCustomType): ftype = self.types[field_type.native or field_type.type](**attr) elif isinstance(field_type, gae.Property): ftype = field_type elif field_type.startswith('id'): continue elif field_type.startswith('decimal'): precision, scale = field_type[7:].strip('()').split(',') precision = int(precision) scale = int(scale) ftype = GAEDecimalProperty(precision, scale, **attr) elif field_type.startswith('reference'): if field.notnull: attr = dict(required=True) referenced = field_type[10:].strip() ftype = self.types[field_type[:9]](referenced, **attr) elif field_type.startswith('list:reference'): if field.notnull: attr['required'] = True referenced = field_type[15:].strip() ftype = self.types[field_type[:14]](**attr) elif field_type.startswith('list:'): ftype = self.types[field_type](**attr) elif not field_type in self.types\ or not self.types[field_type]: raise SyntaxError('Field: unknown field type: %s' % field_type) else: ftype = self.types[field_type](**attr) myfields[field.name] = ftype if not polymodel: table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) elif polymodel==True: table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) elif isinstance(polymodel,Table): table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) else: raise SyntaxError("polymodel must be None, True, a table or a tablename") return None def expand(self,expression,field_type=None): if isinstance(expression,Field): if expression.type in ('text', 'blob', 'json'): raise SyntaxError('AppEngine does not index by: %s' % expression.type) return expression.name elif isinstance(expression, (Expression, Query)): if not expression.second is None: return expression.op(expression.first, expression.second) elif not expression.first is None: return expression.op(expression.first) else: return expression.op() elif field_type: return self.represent(expression,field_type) elif isinstance(expression,(list,tuple)): return ','.join([self.represent(item,field_type) for item in expression]) else: return str(expression) ### TODO from gql.py Expression def AND(self,first,second): a = self.expand(first) b = self.expand(second) if b[0].name=='__key__' and a[0].name!='__key__': return b+a return a+b def EQ(self,first,second=None): if isinstance(second, Key): return [GAEF(first.name,'=',second,lambda a,b:a==b)] return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)] def NE(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] else: if not second is None: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'!=',second,lambda a,b:a!=b)] def LT(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'<',second,lambda a,b:a<b)] def LE(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'<=',second,lambda a,b:a<=b)] def GT(self,first,second=None): if first.type != 'id' or second==0 or second == '0': return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'>',second,lambda a,b:a>b)] def GE(self,first,second=None): if first.type != 'id': return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] else: second = Key.from_path(first._tablename, long(second)) return [GAEF(first.name,'>=',second,lambda a,b:a>=b)] def INVERT(self,first): return '-%s' % first.name def COMMA(self,first,second): return '%s, %s' % (self.expand(first),self.expand(second)) def BELONGS(self,first,second=None): if not isinstance(second,(list, tuple)): raise SyntaxError("Not supported") if first.type != 'id': return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] else: second = [Key.from_path(first._tablename, int(i)) for i in second] return [GAEF(first.name,'in',second,lambda a,b:a in b)] def CONTAINS(self,first,second,case_sensitive=False): # silently ignoring: GAE can only do case sensitive matches! if not first.type.startswith('list:'): raise SyntaxError("Not supported") return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)] def NOT(self,first): nops = { self.EQ: self.NE, self.NE: self.EQ, self.LT: self.GE, self.GT: self.LE, self.LE: self.GT, self.GE: self.LT} if not isinstance(first,Query): raise SyntaxError("Not suported") nop = nops.get(first.op,None) if not nop: raise SyntaxError("Not suported %s" % first.op.__name__) first.op = nop return self.expand(first) def truncate(self,table,mode): self.db(self.db._adapter.id_query(table)).delete() def select_raw(self,query,fields=None,attributes=None): db = self.db fields = fields or [] attributes = attributes or {} args_get = attributes.get new_fields = [] for item in fields: if isinstance(item,SQLALL): new_fields += item._table else: new_fields.append(item) fields = new_fields if query: tablename = self.get_table(query) elif fields: tablename = fields[0].tablename query = db._adapter.id_query(fields[0].table) else: raise SyntaxError("Unable to determine a tablename") if query: if use_common_filters(query): query = self.common_filter(query,[tablename]) #tableobj is a GAE Model class (or subclass) tableobj = db[tablename]._tableobj filters = self.expand(query) projection = None if len(db[tablename].fields) == len(fields): #getting all fields, not a projection query projection = None elif args_get('projection') == True: projection = [] for f in fields: if f.type in ['text', 'blob', 'json']: raise SyntaxError( "text and blob field types not allowed in projection queries") else: projection.append(f.name) elif args_get('filterfields') == True: projection = [] for f in fields: projection.append(f.name) # real projection's can't include 'id'. # it will be added to the result later query_projection = [ p for p in projection if \ p != db[tablename]._id.name] if projection and \ args_get('projection') == True\ else None cursor = None if isinstance(args_get('reusecursor'), str): cursor = args_get('reusecursor') items = gae.Query(tableobj, projection=query_projection, cursor=cursor) for filter in filters: if args_get('projection') == True and \ filter.name in query_projection and \ filter.op in ['=', '<=', '>=']: raise SyntaxError( "projection fields cannot have equality filters") if filter.name=='__key__' and filter.op=='>' and filter.value==0: continue elif filter.name=='__key__' and filter.op=='=': if filter.value==0: items = [] elif isinstance(filter.value, Key): # key qeuries return a class instance, # can't use projection # extra values will be ignored in post-processing later item = tableobj.get(filter.value) items = (item and [item]) or [] else: # key qeuries return a class instance, # can't use projection # extra values will be ignored in post-processing later item = tableobj.get_by_id(filter.value) items = (item and [item]) or [] elif isinstance(items,list): # i.e. there is a single record! items = [i for i in items if filter.apply( getattr(item,filter.name),filter.value)] else: if filter.name=='__key__' and filter.op != 'in': items.order('__key__') items = items.filter('%s %s' % (filter.name,filter.op), filter.value) if not isinstance(items,list): if args_get('left', None): raise SyntaxError('Set: no left join in appengine') if args_get('groupby', None): raise SyntaxError('Set: no groupby in appengine') orderby = args_get('orderby', False) if orderby: ### THIS REALLY NEEDS IMPROVEMENT !!! if isinstance(orderby, (list, tuple)): orderby = xorify(orderby) if isinstance(orderby,Expression): orderby = self.expand(orderby) orders = orderby.split(', ') for order in orders: order={'-id':'-__key__','id':'__key__'}.get(order,order) items = items.order(order) if args_get('limitby', None): (lmin, lmax) = attributes['limitby'] (limit, offset) = (lmax - lmin, lmin) rows = items.fetch(limit,offset=offset) #cursor is only useful if there was a limit and we didn't return # all results if args_get('reusecursor'): db['_lastcursor'] = items.cursor() items = rows return (items, tablename, projection or db[tablename].fields) def select(self,query,fields,attributes): """ This is the GAE version of select. some notes to consider: - db['_lastsql'] is not set because there is not SQL statement string for a GAE query - 'nativeRef' is a magical fieldname used for self references on GAE - optional attribute 'projection' when set to True will trigger use of the GAE projection queries. note that there are rules for what is accepted imposed by GAE: each field must be indexed, projection queries cannot contain blob or text fields, and you cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection - optional attribute 'filterfields' when set to True web2py will only parse the explicitly listed fields into the Rows object, even though all fields are returned in the query. This can be used to reduce memory usage in cases where true projection queries are not usable. - optional attribute 'reusecursor' allows use of cursor with queries that have the limitby attribute. Set the attribute to True for the first query, set it to the value of db['_lastcursor'] to continue a previous query. The user must save the cursor value between requests, and the filters must be identical. It is up to the user to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors """ (items, tablename, fields) = self.select_raw(query,fields,attributes) # self.db['_lastsql'] = self._select(query,fields,attributes) rows = [[(t==self.db[tablename]._id.name and item) or \ (t=='nativeRef' and item) or getattr(item, t) \ for t in fields] for item in items] colnames = ['%s.%s' % (tablename, t) for t in fields] processor = attributes.get('processor',self.parse) return processor(rows,fields,colnames,False) def count(self,query,distinct=None,limit=None): if distinct: raise RuntimeError("COUNT DISTINCT not supported") (items, tablename, fields) = self.select_raw(query) # self.db['_lastsql'] = self._count(query) try: return len(items) except TypeError: return items.count(limit=limit) def delete(self,tablename, query): """ This function was changed on 2010-05-04 because according to http://code.google.com/p/googleappengine/issues/detail?id=3119 GAE no longer supports deleting more than 1000 records. """ # self.db['_lastsql'] = self._delete(tablename,query) (items, tablename, fields) = self.select_raw(query) # items can be one item or a query if not isinstance(items,list): #use a keys_only query to ensure that this runs as a datastore # small operations leftitems = items.fetch(1000, keys_only=True) counter = 0 while len(leftitems): counter += len(leftitems) gae.delete(leftitems) leftitems = items.fetch(1000, keys_only=True) else: counter = len(items) gae.delete(items) return counter def update(self,tablename,query,update_fields): # self.db['_lastsql'] = self._update(tablename,query,update_fields) (items, tablename, fields) = self.select_raw(query) counter = 0 for item in items: for field, value in update_fields: setattr(item, field.name, self.represent(value,field.type)) item.put() counter += 1 LOGGER.info(str(counter)) return counter def insert(self,table,fields): dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) # table._db['_lastsql'] = self._insert(table,fields) tmp = table._tableobj(**dfields) tmp.put() rid = Reference(tmp.key().id()) (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) return rid def bulk_insert(self,table,items): parsed_items = [] for item in items: dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) parsed_items.append(table._tableobj(**dfields)) gae.put(parsed_items) return True def uuid2int(uuidv): return uuid.UUID(uuidv).int def int2uuid(n): return str(uuid.UUID(int=n)) class CouchDBAdapter(NoSQLAdapter): drivers = ('couchdb',) uploads_in_blob = True types = { 'boolean': bool, 'string': str, 'text': str, 'json': str, 'password': str, 'blob': str, 'upload': str, 'integer': long, 'bigint': long, 'float': float, 'double': float, 'date': datetime.date, 'time': datetime.time, 'datetime': datetime.datetime, 'id': long, 'reference': long, 'list:string': list, 'list:integer': list, 'list:reference': list, } def file_exists(self, filename): pass def file_open(self, filename, mode='rb', lock=True): pass def file_close(self, fileobj): pass def expand(self,expression,field_type=None): if isinstance(expression,Field): if expression.type=='id': return "%s._id" % expression.tablename return BaseAdapter.expand(self,expression,field_type) def AND(self,first,second): return '(%s && %s)' % (self.expand(first),self.expand(second)) def OR(self,first,second): return '(%s || %s)' % (self.expand(first),self.expand(second)) def EQ(self,first,second): if second is None: return '(%s == null)' % self.expand(first) return '(%s == %s)' % (self.expand(first),self.expand(second,first.type)) def NE(self,first,second): if second is None: return '(%s != null)' % self.expand(first) return '(%s != %s)' % (self.expand(first),self.expand(second,first.type)) def COMMA(self,first,second): return '%s + %s' % (self.expand(first),self.expand(second)) def represent(self, obj, fieldtype): value = NoSQLAdapter.represent(self, obj, fieldtype) if fieldtype=='id': return repr(str(long(value))) elif fieldtype in ('date','time','datetime','boolean'): return serializers.json(value) return repr(not isinstance(value,unicode) and value \ or value and value.encode('utf8')) def __init__(self,db,uri='couchdb://127.0.0.1:5984', pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.uri = uri if do_connect: self.find_driver(adapter_args) self.dbengine = 'couchdb' self.folder = folder db['_lastsql'] = '' self.db_codec = 'UTF-8' self._after_connection = after_connection self.pool_size = pool_size url='http://'+uri[10:] def connector(url=url,driver_args=driver_args): return self.driver.Server(url,**driver_args) self.reconnect(connector,cursor=False) def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): if migrate: try: self.connection.create(table._tablename) except: pass def insert(self,table,fields): id = uuid2int(web2py_uuid()) ctable = self.connection[table._tablename] values = dict((k.name,self.represent(v,k.type)) for k,v in fields) values['_id'] = str(id) ctable.save(values) return id def _select(self,query,fields,attributes): if not isinstance(query,Query): raise SyntaxError("Not Supported") for key in set(attributes.keys())-SELECT_ARGS: raise SyntaxError('invalid select attribute: %s' % key) new_fields=[] for item in fields: if isinstance(item,SQLALL): new_fields += item._table else: new_fields.append(item) def uid(fd): return fd=='id' and '_id' or fd def get(row,fd): return fd=='id' and long(row['_id']) or row.get(fd,None) fields = new_fields tablename = self.get_table(query) fieldnames = [f.name for f in (fields or self.db[tablename])] colnames = ['%s.%s' % (tablename,k) for k in fieldnames] fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ dict(t=tablename, query=self.expand(query), order='%s._id' % tablename, fields=fields) return fn, colnames def select(self,query,fields,attributes): if not isinstance(query,Query): raise SyntaxError("Not Supported") fn, colnames = self._select(query,fields,attributes) tablename = colnames[0].split('.')[0] ctable = self.connection[tablename] rows = [cols['value'] for cols in ctable.query(fn)] processor = attributes.get('processor',self.parse) return processor(rows,fields,colnames,False) def delete(self,tablename,query): if not isinstance(query,Query): raise SyntaxError("Not Supported") if query.first.type=='id' and query.op==self.EQ: id = query.second tablename = query.first.tablename assert(tablename == query.first.tablename) ctable = self.connection[tablename] try: del ctable[str(id)] return 1 except couchdb.http.ResourceNotFound: return 0 else: tablename = self.get_table(query) rows = self.select(query,[self.db[tablename]._id],{}) ctable = self.connection[tablename] for row in rows: del ctable[str(row.id)] return len(rows) def update(self,tablename,query,fields): if not isinstance(query,Query): raise SyntaxError("Not Supported") if query.first.type=='id' and query.op==self.EQ: id = query.second tablename = query.first.tablename ctable = self.connection[tablename] try: doc = ctable[str(id)] for key,value in fields: doc[key.name] = self.represent(value,self.db[tablename][key.name].type) ctable.save(doc) return 1 except couchdb.http.ResourceNotFound: return 0 else: tablename = self.get_table(query) rows = self.select(query,[self.db[tablename]._id],{}) ctable = self.connection[tablename] table = self.db[tablename] for row in rows: doc = ctable[str(row.id)] for key,value in fields: doc[key.name] = self.represent(value,table[key.name].type) ctable.save(doc) return len(rows) def count(self,query,distinct=None): if distinct: raise RuntimeError("COUNT DISTINCT not supported") if not isinstance(query,Query): raise SyntaxError("Not Supported") tablename = self.get_table(query) rows = self.select(query,[self.db[tablename]._id],{}) return len(rows) def cleanup(text): """ validates that the given text is clean: only contains [0-9a-zA-Z_] """ if not REGEX_ALPHANUMERIC.match(text): raise SyntaxError('invalid table or field name: %s' % text) return text class MongoDBAdapter(NoSQLAdapter): native_json = True drivers = ('pymongo',) uploads_in_blob = True types = { 'boolean': bool, 'string': str, 'text': str, 'json': str, 'password': str, 'blob': str, 'upload': str, 'integer': long, 'bigint': long, 'float': float, 'double': float, 'date': datetime.date, 'time': datetime.time, 'datetime': datetime.datetime, 'id': long, 'reference': long, 'list:string': list, 'list:integer': list, 'list:reference': list, } error_messages = {"javascript_needed": "This must yet be replaced" + " with javascript in order to work."} def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', pool_size=0, folder=None, db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.uri = uri if do_connect: self.find_driver(adapter_args) import random from bson.objectid import ObjectId from bson.son import SON import pymongo.uri_parser m = pymongo.uri_parser.parse_uri(uri) self.SON = SON self.ObjectId = ObjectId self.random = random self.dbengine = 'mongodb' self.folder = folder db['_lastsql'] = '' self.db_codec = 'UTF-8' self._after_connection = after_connection self.pool_size = pool_size #this is the minimum amount of replicates that it should wait # for on insert/update self.minimumreplication = adapter_args.get('minimumreplication',0) # by default all inserts and selects are performand asynchronous, # but now the default is # synchronous, except when overruled by either this default or # function parameter self.safe = adapter_args.get('safe',True) if isinstance(m,tuple): m = {"database" : m[1]} if m.get('database')==None: raise SyntaxError("Database is required!") def connector(uri=self.uri,m=m): try: # Connection() is deprecated if hasattr(self.driver, "MongoClient"): Connection = self.driver.MongoClient else: Connection = self.driver.Connection return Connection(uri)[m.get('database')] except self.driver.errors.ConnectionFailure: inst = sys.exc_info()[1] raise SyntaxError("The connection to " + uri + " could not be made") self.reconnect(connector,cursor=False) def object_id(self, arg=None): """ Convert input to a valid Mongodb ObjectId instance self.object_id("<random>") -> ObjectId (not unique) instance """ if not arg: arg = 0 if isinstance(arg, basestring): # we assume an integer as default input rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 if arg.isdigit() and (not rawhex): arg = int(arg) elif arg == "<random>": arg = int("0x%sL" % \ "".join([self.random.choice("0123456789abcdef") \ for x in range(24)]), 0) elif arg.isalnum(): if not arg.startswith("0x"): arg = "0x%s" % arg try: arg = int(arg, 0) except ValueError, e: raise ValueError( "invalid objectid argument string: %s" % e) else: raise ValueError("Invalid objectid argument string. " + "Requires an integer or base 16 value") elif isinstance(arg, self.ObjectId): return arg if not isinstance(arg, (int, long)): raise TypeError("object_id argument must be of type " + "ObjectId or an objectid representable integer") if arg == 0: hexvalue = "".zfill(24) else: hexvalue = hex(arg)[2:].replace("L", "") return self.ObjectId(hexvalue) def parse_reference(self, value, field_type): # here we have to check for ObjectID before base parse if isinstance(value, self.ObjectId): value = long(str(value), 16) return super(MongoDBAdapter, self).parse_reference(value, field_type) def parse_id(self, value, field_type): if isinstance(value, self.ObjectId): value = long(str(value), 16) return super(MongoDBAdapter, self).parse_id(value, field_type) def represent(self, obj, fieldtype): # the base adatpter does not support MongoDB ObjectId if isinstance(obj, self.ObjectId): value = obj else: value = NoSQLAdapter.represent(self, obj, fieldtype) # reference types must be convert to ObjectID if fieldtype =='date': if value == None: return value # this piece of data can be stripped off based on the fieldtype t = datetime.time(0, 0, 0) # mongodb doesn't has a date object and so it must datetime, # string or integer return datetime.datetime.combine(value, t) elif fieldtype == 'time': if value == None: return value # this piece of data can be stripped of based on the fieldtype d = datetime.date(2000, 1, 1) # mongodb doesn't has a time object and so it must datetime, # string or integer return datetime.datetime.combine(d, value) elif (isinstance(fieldtype, basestring) and fieldtype.startswith('list:')): if fieldtype.startswith('list:reference'): newval = [] for v in value: newval.append(self.object_id(v)) return newval return value elif ((isinstance(fieldtype, basestring) and fieldtype.startswith("reference")) or (isinstance(fieldtype, Table))): value = self.object_id(value) return value # Safe determines whether a asynchronious request is done or a # synchronious action is done # For safety, we use by default synchronous requests def insert(self, table, fields, safe=None): if safe==None: safe = self.safe ctable = self.connection[table._tablename] values = dict() for k, v in fields: if not k.name in ["id", "safe"]: fieldname = k.name fieldtype = table[k.name].type if ("reference" in fieldtype) or (fieldtype=="id"): values[fieldname] = self.object_id(v) else: values[fieldname] = self.represent(v, fieldtype) ctable.insert(values, safe=safe) return long(str(values['_id']), 16) def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None, isCapped=False): if isCapped: raise RuntimeError("Not implemented") def count(self, query, distinct=None, snapshot=True): if distinct: raise RuntimeError("COUNT DISTINCT not supported") if not isinstance(query,Query): raise SyntaxError("Not Supported") tablename = self.get_table(query) return long(self.select(query,[self.db[tablename]._id], {}, count=True,snapshot=snapshot)['count']) # Maybe it would be faster if we just implemented the pymongo # .count() function which is probably quicker? # therefor call __select() connection[table].find(query).count() # Since this will probably reduce the return set? def expand(self, expression, field_type=None): if isinstance(expression, Query): # any query using 'id':= # set name as _id (as per pymongo/mongodb primary key) # convert second arg to an objectid field # (if its not already) # if second arg is 0 convert to objectid if isinstance(expression.first,Field) and \ ((expression.first.type == 'id') or \ ("reference" in expression.first.type)): if expression.first.type == 'id': expression.first.name = '_id' # cast to Mongo ObjectId if isinstance(expression.second, (tuple, list, set)): expression.second = [self.object_id(item) for item in expression.second] else: expression.second = self.object_id(expression.second) result = expression.op(expression.first, expression.second) if isinstance(expression, Field): if expression.type=='id': result = "_id" else: result = expression.name elif isinstance(expression, (Expression, Query)): if not expression.second is None: result = expression.op(expression.first, expression.second) elif not expression.first is None: result = expression.op(expression.first) elif not isinstance(expression.op, str): result = expression.op() else: result = expression.op elif field_type: result = self.represent(expression,field_type) elif isinstance(expression,(list,tuple)): result = ','.join(self.represent(item,field_type) for item in expression) else: result = expression return result def _select(self, query, fields, attributes): if 'for_update' in attributes: logging.warn('mongodb does not support for_update') for key in set(attributes.keys())-set(('limitby', 'orderby','for_update')): if attributes[key]!=None: logging.warn('select attribute not implemented: %s' % key) new_fields=[] mongosort_list = [] # try an orderby attribute orderby = attributes.get('orderby', False) limitby = attributes.get('limitby', False) # distinct = attributes.get('distinct', False) if orderby: if isinstance(orderby, (list, tuple)): orderby = xorify(orderby) # !!!! need to add 'random' for f in self.expand(orderby).split(','): if f.startswith('-'): mongosort_list.append((f[1:], -1)) else: mongosort_list.append((f, 1)) if limitby: limitby_skip, limitby_limit = limitby else: limitby_skip = limitby_limit = 0 mongofields_dict = self.SON() mongoqry_dict = {} for item in fields: if isinstance(item, SQLALL): new_fields += item._table else: new_fields.append(item) fields = new_fields if isinstance(query,Query): tablename = self.get_table(query) elif len(fields) != 0: tablename = fields[0].tablename else: raise SyntaxError("The table name could not be found in " + "the query nor from the select statement.") mongoqry_dict = self.expand(query) fields = fields or self.db[tablename] for field in fields: mongofields_dict[field.name] = 1 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ limitby_limit, limitby_skip def select(self, query, fields, attributes, count=False, snapshot=False): # TODO: support joins tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ limitby_limit, limitby_skip = self._select(query, fields, attributes) ctable = self.connection[tablename] if count: return {'count' : ctable.find( mongoqry_dict, mongofields_dict, skip=limitby_skip, limit=limitby_limit, sort=mongosort_list, snapshot=snapshot).count()} else: # pymongo cursor object mongo_list_dicts = ctable.find(mongoqry_dict, mongofields_dict, skip=limitby_skip, limit=limitby_limit, sort=mongosort_list, snapshot=snapshot) rows = [] # populate row in proper order # Here we replace ._id with .id to follow the standard naming colnames = [] newnames = [] for field in fields: colname = str(field) colnames.append(colname) tablename, fieldname = colname.split(".") if fieldname == "_id": # Mongodb reserved uuid key field.name = "id" newnames.append(".".join((tablename, field.name))) for record in mongo_list_dicts: row=[] for colname in colnames: tablename, fieldname = colname.split(".") # switch to Mongo _id uuids for retrieving # record id's if fieldname == "id": fieldname = "_id" if fieldname in record: value = record[fieldname] else: value = None row.append(value) rows.append(row) processor = attributes.get('processor', self.parse) result = processor(rows, fields, newnames, False) return result def INVERT(self, first): #print "in invert first=%s" % first return '-%s' % self.expand(first) def drop(self, table, mode=''): ctable = self.connection[table._tablename] ctable.drop() def truncate(self, table, mode, safe=None): if safe == None: safe=self.safe ctable = self.connection[table._tablename] ctable.remove(None, safe=True) def oupdate(self, tablename, query, fields): if not isinstance(query, Query): raise SyntaxError("Not Supported") filter = None if query: filter = self.expand(query) modify = {'$set': dict((k.name, self.represent(v, k.type)) for k, v in fields)} return modify, filter def update(self, tablename, query, fields, safe=None): if safe == None: safe = self.safe # return amount of adjusted rows or zero, but no exceptions # @ related not finding the result if not isinstance(query, Query): raise RuntimeError("Not implemented") amount = self.count(query, False) modify, filter = self.oupdate(tablename, query, fields) try: result = self.connection[tablename].update(filter, modify, multi=True, safe=safe) if safe: try: # if result count is available fetch it return result["n"] except (KeyError, AttributeError, TypeError): return amount else: return amount except Exception, e: # TODO Reverse update query to verifiy that the query succeded raise RuntimeError("uncaught exception when updating rows: %s" % e) #this function returns a dict with the where clause and update fields def _update(self,tablename,query,fields): return str(self.oupdate(tablename, query, fields)) def delete(self, tablename, query, safe=None): if safe is None: safe = self.safe amount = 0 amount = self.count(query, False) if not isinstance(query, Query): raise RuntimeError("query type %s is not supported" % \ type(query)) filter = self.expand(query) self._delete(tablename, filter, safe=safe) return amount def _delete(self, tablename, filter, safe=None): return self.connection[tablename].remove(filter, safe=safe) def bulk_insert(self, table, items): return [self.insert(table,item) for item in items] # TODO This will probably not work:( def NOT(self, first): result = {} result["$not"] = self.expand(first) return result def AND(self,first,second): f = self.expand(first) s = self.expand(second) f.update(s) return f def OR(self,first,second): # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) result = {} f = self.expand(first) s = self.expand(second) result['$or'] = [f,s] return result def BELONGS(self, first, second): if isinstance(second, str): return {self.expand(first) : {"$in" : [ second[:-1]]} } elif second==[] or second==() or second==set(): return {1:0} items = [self.expand(item, first.type) for item in second] return {self.expand(first) : {"$in" : items} } def EQ(self,first,second): result = {} result[self.expand(first)] = self.expand(second) return result def NE(self, first, second=None): result = {} result[self.expand(first)] = {'$ne': self.expand(second)} return result def LT(self,first,second=None): if second is None: raise RuntimeError("Cannot compare %s < None" % first) result = {} result[self.expand(first)] = {'$lt': self.expand(second)} return result def LE(self,first,second=None): if second is None: raise RuntimeError("Cannot compare %s <= None" % first) result = {} result[self.expand(first)] = {'$lte': self.expand(second)} return result def GT(self,first,second): result = {} result[self.expand(first)] = {'$gt': self.expand(second)} return result def GE(self,first,second=None): if second is None: raise RuntimeError("Cannot compare %s >= None" % first) result = {} result[self.expand(first)] = {'$gte': self.expand(second)} return result def ADD(self, first, second): raise NotImplementedError(self.error_messages["javascript_needed"]) return '%s + %s' % (self.expand(first), self.expand(second, first.type)) def SUB(self, first, second): raise NotImplementedError(self.error_messages["javascript_needed"]) return '(%s - %s)' % (self.expand(first), self.expand(second, first.type)) def MUL(self, first, second): raise NotImplementedError(self.error_messages["javascript_needed"]) return '(%s * %s)' % (self.expand(first), self.expand(second, first.type)) def DIV(self, first, second): raise NotImplementedError(self.error_messages["javascript_needed"]) return '(%s / %s)' % (self.expand(first), self.expand(second, first.type)) def MOD(self, first, second): raise NotImplementedError(self.error_messages["javascript_needed"]) return '(%s %% %s)' % (self.expand(first), self.expand(second, first.type)) def AS(self, first, second): raise NotImplementedError(self.error_messages["javascript_needed"]) return '%s AS %s' % (self.expand(first), second) # We could implement an option that simulates a full featured SQL # database. But I think the option should be set explicit or # implemented as another library. def ON(self, first, second): raise NotImplementedError("This is not possible in NoSQL" + " but can be simulated with a wrapper.") return '%s ON %s' % (self.expand(first), self.expand(second)) # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS # WHICH ONE IS BEST? def COMMA(self, first, second): return '%s, %s' % (self.expand(first), self.expand(second)) def LIKE(self, first, second): #escaping regex operators? return {self.expand(first): ('%s' % \ self.expand(second, 'string').replace('%','/'))} def STARTSWITH(self, first, second): #escaping regex operators? return {self.expand(first): ('/^%s/' % \ self.expand(second, 'string'))} def ENDSWITH(self, first, second): #escaping regex operators? return {self.expand(first): ('/%s^/' % \ self.expand(second, 'string'))} def CONTAINS(self, first, second, case_sensitive=False): # silently ignore, only case sensitive # There is a technical difference, but mongodb doesn't support # that, but the result will be the same return {self.expand(first) : ('/%s/' % \ self.expand(second, 'string'))} def LIKE(self, first, second): import re return {self.expand(first): {'$regex': \ re.escape(self.expand(second, 'string')).replace('%','.*')}} #TODO verify full compatibilty with official SQL Like operator def STARTSWITH(self, first, second): #TODO Solve almost the same problem as with endswith import re return {self.expand(first): {'$regex' : '^' + re.escape(self.expand(second, 'string'))}} #TODO verify full compatibilty with official SQL Like operator def ENDSWITH(self, first, second): #escaping regex operators? #TODO if searched for a name like zsa_corbitt and the function # is endswith('a') then this is also returned. # Aldo it end with a t import re return {self.expand(first): {'$regex': \ re.escape(self.expand(second, 'string')) + '$'}} #TODO verify full compatibilty with official oracle contains operator def CONTAINS(self, first, second, case_sensitive=False): # silently ignore, only case sensitive #There is a technical difference, but mongodb doesn't support # that, but the result will be the same #TODO contains operators need to be transformed to Regex return {self.expand(first) : {' $regex': \ ".*" + re.escape(self.expand(second, 'string')) + ".*"}} class IMAPAdapter(NoSQLAdapter): drivers = ('imaplib',) """ IMAP server adapter This class is intended as an interface with email IMAP servers to perform simple queries in the web2py DAL query syntax, so email read, search and other related IMAP mail services (as those implemented by brands like Google(r), and Yahoo!(r) can be managed from web2py applications. The code uses examples by Yuji Tomita on this post: http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 and is based in docs for Python imaplib, python email and email IETF's (i.e. RFC2060 and RFC3501) This adapter was tested with a small set of operations with Gmail(r). Other services requests could raise command syntax and response data issues. It creates its table and field names "statically", meaning that the developer should leave the table and field definitions to the DAL instance by calling the adapter's .define_tables() method. The tables are defined with the IMAP server mailbox list information. .define_tables() returns a dictionary mapping dal tablenames to the server mailbox names with the following structure: {<tablename>: str <server mailbox name>} Here is a list of supported fields: Field Type Description ################################################################ uid string answered boolean Flag created date content list:string A list of text or html parts to string cc string bcc string size integer the amount of octets of the message* deleted boolean Flag draft boolean Flag flagged boolean Flag sender string recent boolean Flag seen boolean Flag subject string mime string The mime header declaration email string The complete RFC822 message** attachments <type list> Each non text part as dict encoding string The main detected encoding *At the application side it is measured as the length of the RFC822 message string WARNING: As row id's are mapped to email sequence numbers, make sure your imap client web2py app does not delete messages during select or update actions, to prevent updating or deleting different messages. Sequence numbers change whenever the mailbox is updated. To avoid this sequence numbers issues, it is recommended the use of uid fields in query references (although the update and delete in separate actions rule still applies). # This is the code recommended to start imap support # at the app's model: imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl imapdb.define_tables() Here is an (incomplete) list of possible imap commands: # Count today's unseen messages # smaller than 6000 octets from the # inbox mailbox q = imapdb.INBOX.seen == False q &= imapdb.INBOX.created == datetime.date.today() q &= imapdb.INBOX.size < 6000 unread = imapdb(q).count() # Fetch last query messages rows = imapdb(q).select() # it is also possible to filter query select results with limitby and # sequences of mailbox fields set.select(<fields sequence>, limitby=(<int>, <int>)) # Mark last query messages as seen messages = [row.uid for row in rows] seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) # Delete messages in the imap database that have mails from mr. Gumby deleted = 0 for mailbox in imapdb.tables deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() # It is possible also to mark messages for deletion instead of ereasing them # directly with set.update(deleted=True) # This object give access # to the adapter auto mailbox # mapped names (which native # mailbox has what table name) imapdb.mailboxes <dict> # tablename, server native name pairs # To retrieve a table native mailbox name use: imapdb.<table>.mailbox ### New features v2.4.1: # Declare mailboxes statically with tablename, name pairs # This avoids the extra server names retrieval imapdb.define_tables({"inbox": "INBOX"}) # Selects without content/attachments/email columns will only # fetch header and flags imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) """ types = { 'string': str, 'text': str, 'date': datetime.date, 'datetime': datetime.datetime, 'id': long, 'boolean': bool, 'integer': int, 'bigint': long, 'blob': str, 'list:string': str, } dbengine = 'imap' REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): # db uri: user@example.com:password@imap.server.com:123 # TODO: max size adapter argument for preventing large mail transfers self.db = db self.uri = uri if do_connect: self.find_driver(adapter_args) self.pool_size=pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.credential_decoder = credential_decoder self.driver_args = driver_args self.adapter_args = adapter_args self.mailbox_size = None self.static_names = None self.charset = sys.getfilesystemencoding() # imap class self.imap4 = None uri = uri.split("://")[1] """ MESSAGE is an identifier for sequence number""" self.flags = ['\\Deleted', '\\Draft', '\\Flagged', '\\Recent', '\\Seen', '\\Answered'] self.search_fields = { 'id': 'MESSAGE', 'created': 'DATE', 'uid': 'UID', 'sender': 'FROM', 'to': 'TO', 'cc': 'CC', 'bcc': 'BCC', 'content': 'TEXT', 'size': 'SIZE', 'deleted': '\\Deleted', 'draft': '\\Draft', 'flagged': '\\Flagged', 'recent': '\\Recent', 'seen': '\\Seen', 'subject': 'SUBJECT', 'answered': '\\Answered', 'mime': None, 'email': None, 'attachments': None } db['_lastsql'] = '' m = self.REGEX_URI.match(uri) user = m.group('user') password = m.group('password') host = m.group('host') port = int(m.group('port')) over_ssl = False if port==993: over_ssl = True driver_args.update(host=host,port=port, password=password, user=user) def connector(driver_args=driver_args): # it is assumed sucessful authentication alLways # TODO: support direct connection and login tests if over_ssl: self.imap4 = self.driver.IMAP4_SSL else: self.imap4 = self.driver.IMAP4 connection = self.imap4(driver_args["host"], driver_args["port"]) data = connection.login(driver_args["user"], driver_args["password"]) # static mailbox list connection.mailbox_names = None # dummy cursor function connection.cursor = lambda : True return connection self.db.define_tables = self.define_tables self.connector = connector if do_connect: self.reconnect() def reconnect(self, f=None, cursor=True): """ IMAP4 Pool connection method imap connection lacks of self cursor command. A custom command should be provided as a replacement for connection pooling to prevent uncaught remote session closing """ if getattr(self,'connection',None) != None: return if f is None: f = self.connector if not self.pool_size: self.connection = f() self.cursor = cursor and self.connection.cursor() else: POOLS = ConnectionPool.POOLS uri = self.uri while True: GLOBAL_LOCKER.acquire() if not uri in POOLS: POOLS[uri] = [] if POOLS[uri]: self.connection = POOLS[uri].pop() GLOBAL_LOCKER.release() self.cursor = cursor and self.connection.cursor() if self.cursor and self.check_active_connection: try: # check if connection is alive or close it result, data = self.connection.list() except: # Possible connection reset error # TODO: read exception class self.connection = f() break else: GLOBAL_LOCKER.release() self.connection = f() self.cursor = cursor and self.connection.cursor() break self.after_connection_hook() def get_last_message(self, tablename): last_message = None # request mailbox list to the server # if needed if not isinstance(self.connection.mailbox_names, dict): self.get_mailboxes() try: result = self.connection.select(self.connection.mailbox_names[tablename]) last_message = int(result[1][0]) except (IndexError, ValueError, TypeError, KeyError): e = sys.exc_info()[1] LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) return last_message def get_uid_bounds(self, tablename): if not isinstance(self.connection.mailbox_names, dict): self.get_mailboxes() # fetch first and last messages # return (first, last) messages uid's last_message = self.get_last_message(tablename) result, data = self.connection.uid("search", None, "(ALL)") uid_list = data[0].strip().split() if len(uid_list) <= 0: return None else: return (uid_list[0], uid_list[-1]) def convert_date(self, date, add=None): if add is None: add = datetime.timedelta() """ Convert a date object to a string with d-Mon-Y style for IMAP or the inverse case add <timedelta> adds to the date object """ months = [None, "Jan","Feb","Mar","Apr","May","Jun", "Jul", "Aug","Sep","Oct","Nov","Dec"] if isinstance(date, basestring): # Prevent unexpected date response format try: dayname, datestring = date.split(",") except (ValueError): LOGGER.debug("Could not parse date text: %s" % date) return None date_list = datestring.strip().split() year = int(date_list[2]) month = months.index(date_list[1]) day = int(date_list[0]) hms = map(int, date_list[3].split(":")) return datetime.datetime(year, month, day, hms[0], hms[1], hms[2]) + add elif isinstance(date, (datetime.datetime, datetime.date)): return (date + add).strftime("%d-%b-%Y") else: return None @staticmethod def header_represent(f, r): from email.header import decode_header text, encoding = decode_header(f)[0] return text def encode_text(self, text, charset, errors="replace"): """ convert text for mail to unicode""" if text is None: text = "" else: if isinstance(text, str): if charset is None: text = unicode(text, "utf-8", errors) else: text = unicode(text, charset, errors) else: raise Exception("Unsupported mail text type %s" % type(text)) return text.encode("utf-8") def get_charset(self, message): charset = message.get_content_charset() return charset def get_mailboxes(self): """ Query the mail database for mailbox names """ if self.static_names: # statically defined mailbox names self.connection.mailbox_names = self.static_names return self.static_names.keys() mailboxes_list = self.connection.list() self.connection.mailbox_names = dict() mailboxes = list() x = 0 for item in mailboxes_list[1]: x = x + 1 item = item.strip() if not "NOSELECT" in item.upper(): sub_items = item.split("\"") sub_items = [sub_item for sub_item in sub_items \ if len(sub_item.strip()) > 0] # mailbox = sub_items[len(sub_items) -1] mailbox = sub_items[-1] # remove unwanted characters and store original names # Don't allow leading non alphabetic characters mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) mailboxes.append(mailbox_name) self.connection.mailbox_names[mailbox_name] = mailbox return mailboxes def get_query_mailbox(self, query): nofield = True tablename = None attr = query while nofield: if hasattr(attr, "first"): attr = attr.first if isinstance(attr, Field): return attr.tablename elif isinstance(attr, Query): pass else: return None else: return None return tablename def is_flag(self, flag): if self.search_fields.get(flag, None) in self.flags: return True else: return False def define_tables(self, mailbox_names=None): """ Auto create common IMAP fileds This function creates fields definitions "statically" meaning that custom fields as in other adapters should not be supported and definitions handled on a service/mode basis (local syntax for Gmail(r), Ymail(r) Returns a dictionary with tablename, server native mailbox name pairs. """ if mailbox_names: # optional statically declared mailboxes self.static_names = mailbox_names else: self.static_names = None if not isinstance(self.connection.mailbox_names, dict): self.get_mailboxes() names = self.connection.mailbox_names.keys() for name in names: self.db.define_table("%s" % name, Field("uid", "string", writable=False), Field("answered", "boolean"), Field("created", "datetime", writable=False), Field("content", "list:string", writable=False), Field("to", "string", writable=False), Field("cc", "string", writable=False), Field("bcc", "string", writable=False), Field("size", "integer", writable=False), Field("deleted", "boolean"), Field("draft", "boolean"), Field("flagged", "boolean"), Field("sender", "string", writable=False), Field("recent", "boolean", writable=False), Field("seen", "boolean"), Field("subject", "string", writable=False), Field("mime", "string", writable=False), Field("email", "string", writable=False, readable=False), Field("attachments", list, writable=False, readable=False), Field("encoding") ) # Set a special _mailbox attribute for storing # native mailbox names self.db[name].mailbox = \ self.connection.mailbox_names[name] # decode quoted printable self.db[name].to.represent = self.db[name].cc.represent = \ self.db[name].bcc.represent = self.db[name].sender.represent = \ self.db[name].subject.represent = self.header_represent # Set the db instance mailbox collections self.db.mailboxes = self.connection.mailbox_names return self.db.mailboxes def create_table(self, *args, **kwargs): # not implemented # but required by DAL pass def _select(self, query, fields, attributes): if use_common_filters(query): query = self.common_filter(query, [self.get_query_mailbox(query),]) return str(query) def select(self, query, fields, attributes): """ Search and Fetch records and return web2py rows """ # move this statement elsewhere (upper-level) if use_common_filters(query): query = self.common_filter(query, [self.get_query_mailbox(query),]) import email # get records from imap server with search + fetch # convert results to a dictionary tablename = None fetch_results = list() if isinstance(query, Query): tablename = self.get_table(query) mailbox = self.connection.mailbox_names.get(tablename, None) if mailbox is None: raise ValueError("Mailbox name not found: %s" % mailbox) else: # select with readonly result, selected = self.connection.select(mailbox, True) if result != "OK": raise Exception("IMAP error: %s" % selected) self.mailbox_size = int(selected[0]) search_query = "(%s)" % str(query).strip() search_result = self.connection.uid("search", None, search_query) # Normal IMAP response OK is assumed (change this) if search_result[0] == "OK": # For "light" remote server responses just get the first # ten records (change for non-experimental implementation) # However, light responses are not guaranteed with this # approach, just fewer messages. limitby = attributes.get('limitby', None) messages_set = search_result[1][0].split() # descending order messages_set.reverse() if limitby is not None: # TODO: orderby, asc/desc, limitby from complete message set messages_set = messages_set[int(limitby[0]):int(limitby[1])] # keep the requests small for header/flags if any([(field.name in ["content", "size", "attachments", "email"]) for field in fields]): imap_fields = "(RFC822 FLAGS)" else: imap_fields = "(RFC822.HEADER FLAGS)" if len(messages_set) > 0: # create fetch results object list # fetch each remote message and store it in memmory # (change to multi-fetch command syntax for faster # transactions) for uid in messages_set: # fetch the RFC822 message body typ, data = self.connection.uid("fetch", uid, imap_fields) if typ == "OK": fr = {"message": int(data[0][0].split()[0]), "uid": long(uid), "email": email.message_from_string(data[0][1]), "raw_message": data[0][1]} fr["multipart"] = fr["email"].is_multipart() # fetch flags for the message fr["flags"] = self.driver.ParseFlags(data[1]) fetch_results.append(fr) else: # error retrieving the message body raise Exception("IMAP error retrieving the body: %s" % data) else: raise Exception("IMAP search error: %s" % search_result[1]) elif isinstance(query, (Expression, basestring)): raise NotImplementedError() else: raise TypeError("Unexpected query type") imapqry_dict = {} imapfields_dict = {} if len(fields) == 1 and isinstance(fields[0], SQLALL): allfields = True elif len(fields) == 0: allfields = True else: allfields = False if allfields: colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] else: colnames = ["%s.%s" % (tablename, field.name) for field in fields] for k in colnames: imapfields_dict[k] = k imapqry_list = list() imapqry_array = list() for fr in fetch_results: attachments = [] content = [] size = 0 n = int(fr["message"]) item_dict = dict() message = fr["email"] uid = fr["uid"] charset = self.get_charset(message) flags = fr["flags"] raw_message = fr["raw_message"] # Return messages data mapping static fields # and fetched results. Mapping should be made # outside the select function (with auxiliary # instance methods) # pending: search flags states trough the email message # instances for correct output # preserve subject encoding (ASCII/quoted printable) if "%s.id" % tablename in colnames: item_dict["%s.id" % tablename] = n if "%s.created" % tablename in colnames: item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) if "%s.uid" % tablename in colnames: item_dict["%s.uid" % tablename] = uid if "%s.sender" % tablename in colnames: # If there is no encoding found in the message header # force utf-8 replacing characters (change this to # module's defaults). Applies to .sender, .to, .cc and .bcc fields item_dict["%s.sender" % tablename] = message["From"] if "%s.to" % tablename in colnames: item_dict["%s.to" % tablename] = message["To"] if "%s.cc" % tablename in colnames: if "Cc" in message.keys(): item_dict["%s.cc" % tablename] = message["Cc"] else: item_dict["%s.cc" % tablename] = "" if "%s.bcc" % tablename in colnames: if "Bcc" in message.keys(): item_dict["%s.bcc" % tablename] = message["Bcc"] else: item_dict["%s.bcc" % tablename] = "" if "%s.deleted" % tablename in colnames: item_dict["%s.deleted" % tablename] = "\\Deleted" in flags if "%s.draft" % tablename in colnames: item_dict["%s.draft" % tablename] = "\\Draft" in flags if "%s.flagged" % tablename in colnames: item_dict["%s.flagged" % tablename] = "\\Flagged" in flags if "%s.recent" % tablename in colnames: item_dict["%s.recent" % tablename] = "\\Recent" in flags if "%s.seen" % tablename in colnames: item_dict["%s.seen" % tablename] = "\\Seen" in flags if "%s.subject" % tablename in colnames: item_dict["%s.subject" % tablename] = message["Subject"] if "%s.answered" % tablename in colnames: item_dict["%s.answered" % tablename] = "\\Answered" in flags if "%s.mime" % tablename in colnames: item_dict["%s.mime" % tablename] = message.get_content_type() if "%s.encoding" % tablename in colnames: item_dict["%s.encoding" % tablename] = charset # Here goes the whole RFC822 body as an email instance # for controller side custom processing # The message is stored as a raw string # >> email.message_from_string(raw string) # returns a Message object for enhanced object processing if "%s.email" % tablename in colnames: # WARNING: no encoding performed (raw message) item_dict["%s.email" % tablename] = raw_message # Size measure as suggested in a Velocity Reviews post # by Tim Williams: "how to get size of email attachment" # Note: len() and server RFC822.SIZE reports doesn't match # To retrieve the server size for representation would add a new # fetch transaction to the process for part in message.walk(): maintype = part.get_content_maintype() if ("%s.attachments" % tablename in colnames) or \ ("%s.content" % tablename in colnames): if "%s.attachments" % tablename in colnames: if not ("text" in maintype): payload = part.get_payload(decode=True) if payload: attachment = { "payload": payload, "filename": part.get_filename(), "encoding": part.get_content_charset(), "mime": part.get_content_type(), "disposition": part["Content-Disposition"]} attachments.append(attachment) if "%s.content" % tablename in colnames: payload = part.get_payload(decode=True) part_charset = self.get_charset(part) if "text" in maintype: if payload: content.append(self.encode_text(payload, part_charset)) if "%s.size" % tablename in colnames: if part is not None: size += len(str(part)) item_dict["%s.content" % tablename] = bar_encode(content) item_dict["%s.attachments" % tablename] = attachments item_dict["%s.size" % tablename] = size imapqry_list.append(item_dict) # extra object mapping for the sake of rows object # creation (sends an array or lists) for item_dict in imapqry_list: imapqry_array_item = list() for fieldname in colnames: imapqry_array_item.append(item_dict[fieldname]) imapqry_array.append(imapqry_array_item) # parse result and return a rows object colnames = colnames processor = attributes.get('processor',self.parse) return processor(imapqry_array, fields, colnames) def _update(self, tablename, query, fields, commit=False): # TODO: the adapter should implement an .expand method commands = list() if use_common_filters(query): query = self.common_filter(query, [tablename,]) mark = [] unmark = [] if query: for item in fields: field = item[0] name = field.name value = item[1] if self.is_flag(name): flag = self.search_fields[name] if (value is not None) and (flag != "\\Recent"): if value: mark.append(flag) else: unmark.append(flag) result, data = self.connection.select( self.connection.mailbox_names[tablename]) string_query = "(%s)" % query result, data = self.connection.search(None, string_query) store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] # build commands for marked flags for number in store_list: result = None if len(mark) > 0: commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) if len(unmark) > 0: commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) return commands def update(self, tablename, query, fields): rowcount = 0 commands = self._update(tablename, query, fields) for command in commands: result, data = self.connection.store(*command) if result == "OK": rowcount += 1 else: raise Exception("IMAP storing error: %s" % data) return rowcount def _count(self, query, distinct=None): raise NotImplementedError() def count(self,query,distinct=None): counter = 0 tablename = self.get_query_mailbox(query) if query and tablename is not None: if use_common_filters(query): query = self.common_filter(query, [tablename,]) result, data = self.connection.select(self.connection.mailbox_names[tablename]) string_query = "(%s)" % query result, data = self.connection.search(None, string_query) store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] counter = len(store_list) return counter def delete(self, tablename, query): counter = 0 if query: if use_common_filters(query): query = self.common_filter(query, [tablename,]) result, data = self.connection.select(self.connection.mailbox_names[tablename]) string_query = "(%s)" % query result, data = self.connection.search(None, string_query) store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] for number in store_list: result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") if result == "OK": counter += 1 else: raise Exception("IMAP store error: %s" % data) if counter > 0: result, data = self.connection.expunge() return counter def BELONGS(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": values = [str(val) for val in second if str(val).isdigit()] result = "%s" % ",".join(values).strip() elif name == "UID": values = [str(val) for val in second if str(val).isdigit()] result = "UID %s" % ",".join(values).strip() else: raise Exception("Operation not supported") # result = "(%s %s)" % (self.expand(first), self.expand(second)) return result def CONTAINS(self, first, second, case_sensitive=False): # silently ignore, only case sensitive result = None name = self.search_fields[first.name] if name in ("FROM", "TO", "SUBJECT", "TEXT"): result = "%s \"%s\"" % (name, self.expand(second)) else: if first.name in ("cc", "bcc"): result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) elif first.name == "mime": result = "HEADER Content-Type \"%s\"" % self.expand(second) else: raise Exception("Operation not supported") return result def GT(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": last_message = self.get_last_message(first.tablename) result = "%d:%d" % (int(self.expand(second)) + 1, last_message) elif name == "UID": # GT and LT may not return # expected sets depending on # the uid format implemented try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] LOGGER.debug("Error requesting uid bounds: %s", str(e)) return "" try: lower_limit = int(self.expand(second)) + 1 except (ValueError, TypeError): e = sys.exc_info()[1] raise Exception("Operation not supported (non integer UID)") result = "UID %s:%s" % (lower_limit, threshold) elif name == "DATE": result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) elif name == "SIZE": result = "LARGER %s" % self.expand(second) else: raise Exception("Operation not supported") return result def GE(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": last_message = self.get_last_message(first.tablename) result = "%s:%s" % (self.expand(second), last_message) elif name == "UID": # GT and LT may not return # expected sets depending on # the uid format implemented try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] LOGGER.debug("Error requesting uid bounds: %s", str(e)) return "" lower_limit = self.expand(second) result = "UID %s:%s" % (lower_limit, threshold) elif name == "DATE": result = "SINCE %s" % self.convert_date(second) else: raise Exception("Operation not supported") return result def LT(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": result = "%s:%s" % (1, int(self.expand(second)) - 1) elif name == "UID": try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] LOGGER.debug("Error requesting uid bounds: %s", str(e)) return "" try: upper_limit = int(self.expand(second)) - 1 except (ValueError, TypeError): e = sys.exc_info()[1] raise Exception("Operation not supported (non integer UID)") result = "UID %s:%s" % (pedestal, upper_limit) elif name == "DATE": result = "BEFORE %s" % self.convert_date(second) elif name == "SIZE": result = "SMALLER %s" % self.expand(second) else: raise Exception("Operation not supported") return result def LE(self, first, second): result = None name = self.search_fields[first.name] if name == "MESSAGE": result = "%s:%s" % (1, self.expand(second)) elif name == "UID": try: pedestal, threshold = self.get_uid_bounds(first.tablename) except TypeError: e = sys.exc_info()[1] LOGGER.debug("Error requesting uid bounds: %s", str(e)) return "" upper_limit = int(self.expand(second)) result = "UID %s:%s" % (pedestal, upper_limit) elif name == "DATE": result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) else: raise Exception("Operation not supported") return result def NE(self, first, second=None): if (second is None) and isinstance(first, Field): # All records special table query if first.type == "id": return self.GE(first, 1) result = self.NOT(self.EQ(first, second)) result = result.replace("NOT NOT", "").strip() return result def EQ(self,first,second): name = self.search_fields[first.name] result = None if name is not None: if name == "MESSAGE": # query by message sequence number result = "%s" % self.expand(second) elif name == "UID": result = "UID %s" % self.expand(second) elif name == "DATE": result = "ON %s" % self.convert_date(second) elif name in self.flags: if second: result = "%s" % (name.upper()[1:]) else: result = "NOT %s" % (name.upper()[1:]) else: raise Exception("Operation not supported") else: raise Exception("Operation not supported") return result def AND(self, first, second): result = "%s %s" % (self.expand(first), self.expand(second)) return result def OR(self, first, second): result = "OR %s %s" % (self.expand(first), self.expand(second)) return "%s" % result.replace("OR OR", "OR") def NOT(self, first): result = "NOT %s" % self.expand(first) return result ######################################################################## # end of adapters ######################################################################## ADAPTERS = { 'sqlite': SQLiteAdapter, 'spatialite': SpatiaLiteAdapter, 'sqlite:memory': SQLiteAdapter, 'spatialite:memory': SpatiaLiteAdapter, 'mysql': MySQLAdapter, 'postgres': PostgreSQLAdapter, 'postgres:psycopg2': PostgreSQLAdapter, 'postgres:pg8000': PostgreSQLAdapter, 'postgres2:psycopg2': NewPostgreSQLAdapter, 'postgres2:pg8000': NewPostgreSQLAdapter, 'oracle': OracleAdapter, 'mssql': MSSQLAdapter, 'mssql2': MSSQL2Adapter, 'mssql3': MSSQL3Adapter, 'vertica': VerticaAdapter, 'sybase': SybaseAdapter, 'db2': DB2Adapter, 'teradata': TeradataAdapter, 'informix': InformixAdapter, 'informix-se': InformixSEAdapter, 'firebird': FireBirdAdapter, 'firebird_embedded': FireBirdAdapter, 'ingres': IngresAdapter, 'ingresu': IngresUnicodeAdapter, 'sapdb': SAPDBAdapter, 'cubrid': CubridAdapter, 'jdbc:sqlite': JDBCSQLiteAdapter, 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 'jdbc:postgres': JDBCPostgreSQLAdapter, 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 'google:datastore': GoogleDatastoreAdapter, 'google:sql': GoogleSQLAdapter, 'couchdb': CouchDBAdapter, 'mongodb': MongoDBAdapter, 'imap': IMAPAdapter } def sqlhtml_validators(field): """ Field type validation, using web2py's validators mechanism. makes sure the content of a field is in line with the declared fieldtype """ db = field.db if not have_validators: return [] field_type, field_length = field.type, field.length if isinstance(field_type, SQLCustomType): if hasattr(field_type, 'validator'): return field_type.validator else: field_type = field_type.type elif not isinstance(field_type,str): return [] requires=[] def ff(r,id): row=r(id) if not row: return id elif hasattr(r, '_format') and isinstance(r._format,str): return r._format % row elif hasattr(r, '_format') and callable(r._format): return r._format(row) else: return id if field_type in (('string', 'text', 'password')): requires.append(validators.IS_LENGTH(field_length)) elif field_type == 'json': requires.append(validators.IS_EMPTY_OR(validators.IS_JSON())) elif field_type == 'double' or field_type == 'float': requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) elif field_type in ('integer','bigint'): requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) elif field_type.startswith('decimal'): requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) elif field_type == 'date': requires.append(validators.IS_DATE()) elif field_type == 'time': requires.append(validators.IS_TIME()) elif field_type == 'datetime': requires.append(validators.IS_DATETIME()) elif db and field_type.startswith('reference') and \ field_type.find('.') < 0 and \ field_type[10:] in db.tables: referenced = db[field_type[10:]] def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) field.represent = field.represent or repr_ref if hasattr(referenced, '_format') and referenced._format: requires = validators.IS_IN_DB(db,referenced._id, referenced._format) if field.unique: requires._and = validators.IS_NOT_IN_DB(db,field) if field.tablename == field_type[10:]: return validators.IS_EMPTY_OR(requires) return requires elif db and field_type.startswith('list:reference') and \ field_type.find('.') < 0 and \ field_type[15:] in db.tables: referenced = db[field_type[15:]] def list_ref_repr(ids, row=None, r=referenced, f=ff): if not ids: return None refs = None db, id = r._db, r._id if isinstance(db._adapter, GoogleDatastoreAdapter): def count(values): return db(id.belongs(values)).select(id) rx = range(0, len(ids), 30) refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) else: refs = db(id.belongs(ids)).select(id) return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '') field.represent = field.represent or list_ref_repr if hasattr(referenced, '_format') and referenced._format: requires = validators.IS_IN_DB(db,referenced._id, referenced._format,multiple=True) else: requires = validators.IS_IN_DB(db,referenced._id, multiple=True) if field.unique: requires._and = validators.IS_NOT_IN_DB(db,field) return requires elif field_type.startswith('list:'): def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) field.represent = field.represent or repr_list if field.unique: requires.insert(0,validators.IS_NOT_IN_DB(db,field)) sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] if field.notnull and not field_type[:2] in sff: requires.insert(0, validators.IS_NOT_EMPTY()) elif not field.notnull and field_type[:2] in sff and requires: requires[-1] = validators.IS_EMPTY_OR(requires[-1]) return requires def bar_escape(item): return str(item).replace('|', '||') def bar_encode(items): return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip()) def bar_decode_integer(value): if not hasattr(value,'split') and hasattr(value,'read'): value = value.read() return [long(x) for x in value.split('|') if x.strip()] def bar_decode_string(value): return [x.replace('||', '|') for x in REGEX_UNPACK.split(value[1:-1]) if x.strip()] class Row(object): """ a dictionary that lets you do d['a'] as well as d.a this is only used to store a Row """ def __init__(self,*args,**kwargs): self.__dict__.update(*args,**kwargs) def __getitem__(self, key): key=str(key) m = REGEX_TABLE_DOT_FIELD.match(key) if key in self.get('_extra',{}): return self._extra[key] elif m: try: return ogetattr(self, m.group(1))[m.group(2)] except (KeyError,AttributeError,TypeError): key = m.group(2) return ogetattr(self, key) def __setitem__(self, key, value): setattr(self, str(key), value) __delitem__ = delattr __copy__ = lambda self: Row(self) __call__ = __getitem__ def get(self,key,default=None): return self.__dict__.get(key,default) def __contains__(self,key): return key in self.__dict__ has_key = __contains__ def __nonzero__(self): return len(self.__dict__)>0 def update(self, *args, **kwargs): self.__dict__.update(*args, **kwargs) def keys(self): return self.__dict__.keys() def items(self): return self.__dict__.items() def values(self): return self.__dict__.values() def __iter__(self): return self.__dict__.__iter__() def iteritems(self): return self.__dict__.iteritems() def __str__(self): ### this could be made smarter return '<Row %s>' % self.as_dict() def __repr__(self): return '<Row %s>' % self.as_dict() def __int__(self): return object.__getattribute__(self,'id') def __long__(self): return long(object.__getattribute__(self,'id')) def __eq__(self,other): try: return self.as_dict() == other.as_dict() except AttributeError: return False def __ne__(self,other): return not (self == other) def __copy__(self): return Row(dict(self)) def as_dict(self, datetime_to_str=False, custom_types=None): SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] if isinstance(custom_types,(list,tuple,set)): SERIALIZABLE_TYPES += custom_types elif custom_types: SERIALIZABLE_TYPES.append(custom_types) d = dict(self) for k in copy.copy(d.keys()): v=d[k] if d[k] is None: continue elif isinstance(v,Row): d[k]=v.as_dict() elif isinstance(v,Reference): d[k]=long(v) elif isinstance(v,decimal.Decimal): d[k]=float(v) elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): if datetime_to_str: d[k] = v.isoformat().replace('T',' ')[:19] elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): del d[k] return d def as_xml(self, row_name="row", colnames=None, indent=' '): def f(row,field,indent=' '): if isinstance(row,Row): spc = indent+' \n' items = [f(row[x],x,indent+' ') for x in row] return '%s<%s>\n%s\n%s</%s>' % ( indent, field, spc.join(item for item in items if item), indent, field) elif not callable(row): if REGEX_ALPHANUMERIC.match(field): return '%s<%s>%s</%s>' % (indent,field,row,field) else: return '%s<extra name="%s">%s</extra>' % \ (indent,field,row) else: return None return f(self, row_name, indent=indent) def as_json(self, mode="object", default=None, colnames=None, serialize=True, **kwargs): """ serializes the table to a JSON list of objects kwargs are passed to .as_dict method only "object" mode supported for single row serialize = False used by Rows.as_json TODO: return array mode with query column order """ def inner_loop(record, col): (t, f) = col.split('.') res = None if not REGEX_TABLE_DOT_FIELD.match(col): key = col res = record._extra[col] else: key = f if isinstance(record.get(t, None), Row): res = record[t][f] else: res = record[f] if mode == 'object': return (key, res) else: return res multi = any([isinstance(v, self.__class__) for v in self.values()]) mode = mode.lower() if not mode in ['object', 'array']: raise SyntaxError('Invalid JSON serialization mode: %s' % mode) if mode=='object' and colnames: item = dict([inner_loop(self, col) for col in colnames]) elif colnames: item = [inner_loop(self, col) for col in colnames] else: if not mode == 'object': raise SyntaxError('Invalid JSON serialization mode: %s' % mode) if multi: item = dict() [item.update(**v.as_dict(**kwargs)) for v in self.values()] else: item = self.as_dict(**kwargs) if serialize: if have_serializers: return serializers.json(item, default=default or serializers.custom_json) elif simplejson: return simplejson.dumps(item) else: raise RuntimeError("missing simplejson") else: return item ################################################################################ # Everything below should be independent of the specifics of the database # and should work for RDBMs and some NoSQL databases ################################################################################ class SQLCallableList(list): def __call__(self): return copy.copy(self) def smart_query(fields,text): if not isinstance(fields,(list,tuple)): fields = [fields] new_fields = [] for field in fields: if isinstance(field,Field): new_fields.append(field) elif isinstance(field,Table): for ofield in field: new_fields.append(ofield) else: raise RuntimeError("fields must be a list of fields") fields = new_fields field_map = {} for field in fields: n = field.name.lower() if not n in field_map: field_map[n] = field n = str(field).lower() if not n in field_map: field_map[n] = field constants = {} i = 0 while True: m = REGEX_CONST_STRING.search(text) if not m: break text = text[:m.start()]+('#%i' % i)+text[m.end():] constants[str(i)] = m.group()[1:-1] i+=1 text = re.sub('\s+',' ',text).lower() for a,b in [('&','and'), ('|','or'), ('~','not'), ('==','='), ('<','<'), ('>','>'), ('<=','<='), ('>=','>='), ('<>','!='), ('=<','<='), ('=>','>='), ('=','='), (' less or equal than ','<='), (' greater or equal than ','>='), (' equal or less than ','<='), (' equal or greater than ','>='), (' less or equal ','<='), (' greater or equal ','>='), (' equal or less ','<='), (' equal or greater ','>='), (' not equal to ','!='), (' not equal ','!='), (' equal to ','='), (' equal ','='), (' equals ','='), (' less than ','<'), (' greater than ','>'), (' starts with ','startswith'), (' ends with ','endswith'), (' not in ' , 'notbelongs'), (' in ' , 'belongs'), (' is ','=')]: if a[0]==' ': text = text.replace(' is'+a,' %s ' % b) text = text.replace(a,' %s ' % b) text = re.sub('\s+',' ',text).lower() text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) query = field = neg = op = logic = None for item in text.split(): if field is None: if item == 'not': neg = True elif not neg and not logic and item in ('and','or'): logic = item elif item in field_map: field = field_map[item] else: raise RuntimeError("Invalid syntax") elif not field is None and op is None: op = item elif not op is None: if item.startswith('#'): if not item[1:] in constants: raise RuntimeError("Invalid syntax") value = constants[item[1:]] else: value = item if field.type in ('text', 'string', 'json'): if op == '=': op = 'like' if op == '=': new_query = field==value elif op == '<': new_query = field<value elif op == '>': new_query = field>value elif op == '<=': new_query = field<=value elif op == '>=': new_query = field>=value elif op == '!=': new_query = field!=value elif op == 'belongs': new_query = field.belongs(value.split(',')) elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) elif field.type in ('text', 'string', 'json'): if op == 'contains': new_query = field.contains(value) elif op == 'like': new_query = field.like(value) elif op == 'startswith': new_query = field.startswith(value) elif op == 'endswith': new_query = field.endswith(value) else: raise RuntimeError("Invalid operation") elif field._db._adapter.dbengine=='google:datastore' and \ field.type in ('list:integer', 'list:string', 'list:reference'): if op == 'contains': new_query = field.contains(value) else: raise RuntimeError("Invalid operation") else: raise RuntimeError("Invalid operation") if neg: new_query = ~new_query if query is None: query = new_query elif logic == 'and': query &= new_query elif logic == 'or': query |= new_query field = op = neg = logic = None return query class DAL(object): """ an instance of this class represents a database connection Example:: db = DAL('sqlite://test.db') or db = DAL({"uri": ..., "items": ...}) # experimental db.define_table('tablename', Field('fieldname1'), Field('fieldname2')) """ def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs): if not hasattr(THREAD_LOCAL,'db_instances'): THREAD_LOCAL.db_instances = {} if not hasattr(THREAD_LOCAL,'db_instances_zombie'): THREAD_LOCAL.db_instances_zombie = {} if uri == '<zombie>': db_uid = kwargs['db_uid'] # a zombie must have a db_uid! if db_uid in THREAD_LOCAL.db_instances: db_group = THREAD_LOCAL.db_instances[db_uid] db = db_group[-1] elif db_uid in THREAD_LOCAL.db_instances_zombie: db = THREAD_LOCAL.db_instances_zombie[db_uid] else: db = super(DAL, cls).__new__(cls) THREAD_LOCAL.db_instances_zombie[db_uid] = db else: db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) if db_uid in THREAD_LOCAL.db_instances_zombie: db = THREAD_LOCAL.db_instances_zombie[db_uid] del THREAD_LOCAL.db_instances_zombie[db_uid] else: db = super(DAL, cls).__new__(cls) db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) db_group.append(db) THREAD_LOCAL.db_instances[db_uid] = db_group db._db_uid = db_uid return db @staticmethod def set_folder(folder): """ # ## this allows gluon to set a folder for this thread # ## <<<<<<<<< Should go away as new DAL replaces old sql.py """ BaseAdapter.set_folder(folder) @staticmethod def get_instances(): """ Returns a dictionary with uri as key with timings and defined tables {'sqlite://storage.sqlite': { 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 'dbtables': { 'defined': ['auth_cas', 'auth_event', 'auth_group', 'auth_membership', 'auth_permission', 'auth_user'], 'lazy': '[]' } } } """ dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() infos = {} for db_uid, db_group in dbs: for db in db_group: if not db._uri: continue k = hide_password(db._uri) infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], dbtables = {'defined': sorted(list(set(db.tables) - set(db._LAZY_TABLES.keys()))), 'lazy': sorted(db._LAZY_TABLES.keys())} ) return infos @staticmethod def distributed_transaction_begin(*instances): if not instances: return thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] instances = enumerate(instances) for (i, db) in instances: if not db._adapter.support_distributed_transaction(): raise SyntaxError( 'distributed transaction not suported by %s' % db._dbname) for (i, db) in instances: db._adapter.distributed_transaction_begin(keys[i]) @staticmethod def distributed_transaction_commit(*instances): if not instances: return instances = enumerate(instances) thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] for (i, db) in instances: if not db._adapter.support_distributed_transaction(): raise SyntaxError( 'distributed transaction not suported by %s' % db._dbanme) try: for (i, db) in instances: db._adapter.prepare(keys[i]) except: for (i, db) in instances: db._adapter.rollback_prepared(keys[i]) raise RuntimeError('failure to commit distributed transaction') else: for (i, db) in instances: db._adapter.commit_prepared(keys[i]) return def __init__(self, uri=DEFAULT_URI, pool_size=0, folder=None, db_codec='UTF-8', check_reserved=None, migrate=True, fake_migrate=False, migrate_enabled=True, fake_migrate_all=False, decode_credentials=False, driver_args=None, adapter_args=None, attempts=5, auto_import=False, bigint_id=False,debug=False,lazy_tables=False, db_uid=None, do_connect=True, after_connection=None): """ Creates a new Database Abstraction Layer instance. Keyword arguments: :uri: string that contains information for connecting to a database. (default: 'sqlite://dummy.db') experimental: you can specify a dictionary as uri parameter i.e. with db = DAL({"uri": "sqlite://storage.sqlite", "items": {...}, ...}) for an example of dict input you can check the output of the scaffolding db model with db.as_dict() Note that for compatibility with Python older than version 2.6.5 you should cast your dict input keys to str due to a syntax limitation on kwarg names. for proper DAL dictionary input you can use one of: obj = serializers.cast_keys(dict, [encoding="utf-8"]) or else (for parsing json input) obj = serializers.loads_json(data, unicode_keys=False) :pool_size: How many open connections to make to the database object. :folder: where .table files will be created. automatically set within web2py use an explicit path when using DAL outside web2py :db_codec: string encoding of the database (default: 'UTF-8') :check_reserved: list of adapters to check tablenames and column names against sql/nosql reserved keywords. (Default None) * 'common' List of sql keywords that are common to all database types such as "SELECT, INSERT". (recommended) * 'all' Checks against all known SQL keywords. (not recommended) <adaptername> Checks against the specific adapters list of keywords (recommended) * '<adaptername>_nonreserved' Checks against the specific adapters list of nonreserved keywords. (if available) :migrate (defaults to True) sets default migrate behavior for all tables :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables :migrate_enabled (defaults to True). If set to False disables ALL migrations :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables :attempts (defaults to 5). Number of times to attempt connecting :auto_import (defaults to False). If set, import automatically table definitions from the databases folder :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields :lazy_tables (defaults to False): delay table definition until table access :after_connection (defaults to None): a callable that will be execute after the connection """ items = None if isinstance(uri, dict): if "items" in uri: items = uri.pop("items") try: newuri = uri.pop("uri") except KeyError: newuri = DEFAULT_URI locals().update(uri) uri = newuri if uri == '<zombie>' and db_uid is not None: return if not decode_credentials: credential_decoder = lambda cred: cred else: credential_decoder = lambda cred: urllib.unquote(cred) self._folder = folder if folder: self.set_folder(folder) self._uri = uri self._pool_size = pool_size self._db_codec = db_codec self._lastsql = '' self._timings = [] self._pending_references = {} self._request_tenant = 'request_tenant' self._common_fields = [] self._referee_name = '%(table)s' self._bigint_id = bigint_id self._debug = debug self._migrated = [] self._LAZY_TABLES = {} self._lazy_tables = lazy_tables self._tables = SQLCallableList() self._driver_args = driver_args self._adapter_args = adapter_args self._check_reserved = check_reserved self._decode_credentials = decode_credentials self._attempts = attempts self._do_connect = do_connect if not str(attempts).isdigit() or attempts < 0: attempts = 5 if uri: uris = isinstance(uri,(list,tuple)) and uri or [uri] error = '' connected = False for k in range(attempts): for uri in uris: try: if is_jdbc and not uri.startswith('jdbc:'): uri = 'jdbc:'+uri self._dbname = REGEX_DBNAME.match(uri).group() if not self._dbname in ADAPTERS: raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) # notice that driver args or {} else driver_args # defaults to {} global, not correct kwargs = dict(db=self,uri=uri, pool_size=pool_size, folder=folder, db_codec=db_codec, credential_decoder=credential_decoder, driver_args=driver_args or {}, adapter_args=adapter_args or {}, do_connect=do_connect, after_connection=after_connection) self._adapter = ADAPTERS[self._dbname](**kwargs) types = ADAPTERS[self._dbname].types # copy so multiple DAL() possible self._adapter.types = copy.copy(types) if bigint_id: if 'big-id' in types and 'reference' in types: self._adapter.types['id'] = types['big-id'] self._adapter.types['reference'] = types['big-reference'] connected = True break except SyntaxError: raise except Exception: tb = traceback.format_exc() sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) if connected: break else: time.sleep(1) if not connected: raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) else: self._adapter = BaseAdapter(db=self,pool_size=0, uri='None',folder=folder, db_codec=db_codec, after_connection=after_connection) migrate = fake_migrate = False adapter = self._adapter self._uri_hash = hashlib_md5(adapter.uri).hexdigest() self.check_reserved = check_reserved if self.check_reserved: from reserved_sql_keywords import ADAPTERS as RSK self.RSK = RSK self._migrate = migrate self._fake_migrate = fake_migrate self._migrate_enabled = migrate_enabled self._fake_migrate_all = fake_migrate_all if auto_import or items: self.import_table_definitions(adapter.folder, items=items) @property def tables(self): return self._tables def import_table_definitions(self, path, migrate=False, fake_migrate=False, items=None): pattern = pjoin(path,self._uri_hash+'_*.table') if items: for tablename, table in items.iteritems(): # TODO: read all field/table options fields = [] # remove unsupported/illegal Table arguments [table.pop(name) for name in ("name", "fields") if name in table] if "items" in table: for fieldname, field in table.pop("items").iteritems(): # remove unsupported/illegal Field arguments [field.pop(key) for key in ("requires", "name", "compute", "colname") if key in field] fields.append(Field(str(fieldname), **field)) self.define_table(str(tablename), *fields, **table) else: for filename in glob.glob(pattern): tfile = self._adapter.file_open(filename, 'r') try: sql_fields = pickle.load(tfile) name = filename[len(pattern)-7:-6] mf = [(value['sortable'], Field(key, type=value['type'], length=value.get('length',None), notnull=value.get('notnull',False), unique=value.get('unique',False))) \ for key, value in sql_fields.iteritems()] mf.sort(lambda a,b: cmp(a[0],b[0])) self.define_table(name,*[item[1] for item in mf], **dict(migrate=migrate, fake_migrate=fake_migrate)) finally: self._adapter.file_close(tfile) def check_reserved_keyword(self, name): """ Validates ``name`` against SQL keywords Uses self.check_reserve which is a list of operators to use. self.check_reserved ['common', 'postgres', 'mysql'] self.check_reserved ['all'] """ for backend in self.check_reserved: if name.upper() in self.RSK[backend]: raise SyntaxError( 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper())) def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True): """ EXAMPLE: db.define_table('person',Field('name'),Field('info')) db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) @request.restful() def index(): def GET(*args,**vars): patterns = [ "/friends[person]", "/{person.name}/:field", "/{person.name}/pets[pet.ownedby]", "/{person.name}/pets[pet.ownedby]/{pet.name}", "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", ("/dogs[pet]", db.pet.info=='dog'), ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), ] parser = db.parse_as_rest(patterns,args,vars) if parser.status == 200: return dict(content=parser.response) else: raise HTTP(parser.status,parser.error) def POST(table_name,**vars): if table_name == 'person': return db.person.validate_and_insert(**vars) elif table_name == 'pet': return db.pet.validate_and_insert(**vars) else: raise HTTP(400) return locals() """ db = self re1 = REGEX_SEARCH_PATTERN re2 = REGEX_SQUARE_BRACKETS def auto_table(table,base='',depth=0): patterns = [] for field in db[table].fields: if base: tag = '%s/%s' % (base,field.replace('_','-')) else: tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) f = db[table][field] if not f.readable: continue if f.type=='id' or 'slug' in field or f.type.startswith('reference'): tag += '/{%s.%s}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type.startswith('boolean'): tag += '/{%s.%s}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type in ('float','double','integer','bigint'): tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type.startswith('list:'): tag += '/{%s.%s.contains}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type in ('date','datetime'): tag+= '/{%s.%s.year}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.month}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.day}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') if f.type in ('datetime','time'): tag+= '/{%s.%s.hour}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.minute}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') tag+='/{%s.%s.second}' % (table,field) patterns.append(tag) patterns.append(tag+'/:field') if depth>0: for f in db[table]._referenced_by: tag+='/%s[%s.%s]' % (table,f.tablename,f.name) patterns.append(tag) patterns += auto_table(table,base=tag,depth=depth-1) return patterns if patterns == 'auto': patterns=[] for table in db.tables: if not table.startswith('auth_'): patterns.append('/%s[%s]' % (table,table)) patterns += auto_table(table,base='',depth=1) else: i = 0 while i<len(patterns): pattern = patterns[i] if not isinstance(pattern,str): pattern = pattern[0] tokens = pattern.split('/') if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], '/'.join(tokens[:-1])) patterns = patterns[:i]+new_patterns+patterns[i+1:] i += len(new_patterns) else: i += 1 if '/'.join(args) == 'patterns': return Row({'status':200,'pattern':'list', 'error':None,'response':patterns}) for pattern in patterns: basequery, exposedfields = None, [] if isinstance(pattern,tuple): if len(pattern)==2: pattern, basequery = pattern elif len(pattern)>2: pattern, basequery, exposedfields = pattern[0:3] otable=table=None if not isinstance(queries,dict): dbset=db(queries) if basequery is not None: dbset = dbset(basequery) i=0 tags = pattern[1:].split('/') if len(tags)!=len(args): continue for tag in tags: if re1.match(tag): # print 're1:'+tag tokens = tag[1:-1].split('.') table, field = tokens[0], tokens[1] if not otable or table == otable: if len(tokens)==2 or tokens[2]=='eq': query = db[table][field]==args[i] elif tokens[2]=='ne': query = db[table][field]!=args[i] elif tokens[2]=='lt': query = db[table][field]<args[i] elif tokens[2]=='gt': query = db[table][field]>args[i] elif tokens[2]=='ge': query = db[table][field]>=args[i] elif tokens[2]=='le': query = db[table][field]<=args[i] elif tokens[2]=='year': query = db[table][field].year()==args[i] elif tokens[2]=='month': query = db[table][field].month()==args[i] elif tokens[2]=='day': query = db[table][field].day()==args[i] elif tokens[2]=='hour': query = db[table][field].hour()==args[i] elif tokens[2]=='minute': query = db[table][field].minutes()==args[i] elif tokens[2]=='second': query = db[table][field].seconds()==args[i] elif tokens[2]=='startswith': query = db[table][field].startswith(args[i]) elif tokens[2]=='contains': query = db[table][field].contains(args[i]) else: raise RuntimeError("invalid pattern: %s" % pattern) if len(tokens)==4 and tokens[3]=='not': query = ~query elif len(tokens)>=4: raise RuntimeError("invalid pattern: %s" % pattern) if not otable and isinstance(queries,dict): dbset = db(queries[table]) if basequery is not None: dbset = dbset(basequery) dbset=dbset(query) else: raise RuntimeError("missing relation in pattern: %s" % pattern) elif re2.match(tag) and args[i]==tag[:tag.find('[')]: ref = tag[tag.find('[')+1:-1] if '.' in ref and otable: table,field = ref.split('.') selfld = '_id' if db[table][field].type.startswith('reference '): refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] else: refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] if refs: selfld = refs[0] if nested_select: try: dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) except ValueError: return Row({'status':400,'pattern':pattern, 'error':'invalid path','response':None}) else: items = [item.id for item in dbset.select(db[otable][selfld])] dbset=db(db[table][field].belongs(items)) else: table = ref if not otable and isinstance(queries,dict): dbset = db(queries[table]) dbset=dbset(db[table]) elif tag==':field' and table: # print 're3:'+tag field = args[i] if not field in db[table]: break # hand-built patterns should respect .readable=False as well if not db[table][field].readable: return Row({'status':418,'pattern':pattern, 'error':'I\'m a teapot','response':None}) try: distinct = vars.get('distinct', False) == 'True' offset = long(vars.get('offset',None) or 0) limits = (offset,long(vars.get('limit',None) or 1000)+offset) except ValueError: return Row({'status':400,'error':'invalid limits','response':None}) items = dbset.select(db[table][field], distinct=distinct, limitby=limits) if items: return Row({'status':200,'response':items, 'pattern':pattern}) else: return Row({'status':404,'pattern':pattern, 'error':'no record found','response':None}) elif tag != args[i]: break otable = table i += 1 if i==len(tags) and table: ofields = vars.get('order',db[table]._id.name).split('|') try: orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] except (KeyError, AttributeError): return Row({'status':400,'error':'invalid orderby','response':None}) if exposedfields: fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] else: fields = [field for field in db[table] if field.readable] count = dbset.count() try: offset = long(vars.get('offset',None) or 0) limits = (offset,long(vars.get('limit',None) or 1000)+offset) except ValueError: return Row({'status':400,'error':'invalid limits','response':None}) if count > limits[1]-limits[0]: return Row({'status':400,'error':'too many records','response':None}) try: response = dbset.select(limitby=limits,orderby=orderby,*fields) except ValueError: return Row({'status':400,'pattern':pattern, 'error':'invalid path','response':None}) return Row({'status':200,'response':response, 'pattern':pattern,'count':count}) return Row({'status':400,'error':'no matching pattern','response':None}) def define_table( self, tablename, *fields, **args ): if not isinstance(tablename,str): raise SyntaxError("missing table name") elif hasattr(self,tablename) or tablename in self.tables: if not args.get('redefine',False): raise SyntaxError('table already defined: %s' % tablename) elif tablename.startswith('_') or hasattr(self,tablename) or \ REGEX_PYTHON_KEYWORDS.match(tablename): raise SyntaxError('invalid table name: %s' % tablename) elif self.check_reserved: self.check_reserved_keyword(tablename) else: invalid_args = set(args)-TABLE_ARGS if invalid_args: raise SyntaxError('invalid table "%s" attributes: %s' \ % (tablename,invalid_args)) if self._lazy_tables and not tablename in self._LAZY_TABLES: self._LAZY_TABLES[tablename] = (tablename,fields,args) table = None else: table = self.lazy_define_table(tablename,*fields,**args) if not tablename in self.tables: self.tables.append(tablename) return table def lazy_define_table( self, tablename, *fields, **args ): args_get = args.get common_fields = self._common_fields if common_fields: fields = list(fields) + list(common_fields) table_class = args_get('table_class',Table) table = table_class(self, tablename, *fields, **args) table._actual = True self[tablename] = table # must follow above line to handle self references table._create_references() for field in table: if field.requires == DEFAULT: field.requires = sqlhtml_validators(field) migrate = self._migrate_enabled and args_get('migrate',self._migrate) if migrate and not self._uri in (None,'None') \ or self._adapter.dbengine=='google:datastore': fake_migrate = self._fake_migrate_all or \ args_get('fake_migrate',self._fake_migrate) polymodel = args_get('polymodel',None) try: GLOBAL_LOCKER.acquire() self._lastsql = self._adapter.create_table( table,migrate=migrate, fake_migrate=fake_migrate, polymodel=polymodel) finally: GLOBAL_LOCKER.release() else: table._dbt = None on_define = args_get('on_define',None) if on_define: on_define(table) return table def as_dict(self, flat=False, sanitize=True, field_options=True): dbname = db_uid = uri = None if not sanitize: uri, dbname, db_uid = (self._uri, self._dbname, self._db_uid) db_as_dict = dict(items={}, tables=[], uri=uri, dbname=dbname, db_uid=db_uid, **dict([(k, getattr(self, "_" + k)) for k in 'pool_size','folder','db_codec', 'check_reserved','migrate','fake_migrate', 'migrate_enabled','fake_migrate_all', 'decode_credentials','driver_args', 'adapter_args', 'attempts', 'bigint_id','debug','lazy_tables', 'do_connect'])) for table in self: tablename = str(table) db_as_dict["tables"].append(tablename) db_as_dict["items"][tablename] = table.as_dict(flat=flat, sanitize=sanitize, field_options=field_options) return db_as_dict def as_xml(self, sanitize=True, field_options=True): if not have_serializers: raise ImportError("No xml serializers available") d = self.as_dict(flat=True, sanitize=sanitize, field_options=field_options) return serializers.xml(d) def as_json(self, sanitize=True, field_options=True): if not have_serializers: raise ImportError("No json serializers available") d = self.as_dict(flat=True, sanitize=sanitize, field_options=field_options) return serializers.json(d) def as_yaml(self, sanitize=True, field_options=True): if not have_serializers: raise ImportError("No YAML serializers available") d = self.as_dict(flat=True, sanitize=sanitize, field_options=field_options) return serializers.yaml(d) def __contains__(self, tablename): try: return tablename in self.tables except AttributeError: # The instance has no .tables attribute yet return False has_key = __contains__ def get(self,key,default=None): return self.__dict__.get(key,default) def __iter__(self): for tablename in self.tables: yield self[tablename] def __getitem__(self, key): return self.__getattr__(str(key)) def __getattr__(self, key): if ogetattr(self,'_lazy_tables') and \ key in ogetattr(self,'_LAZY_TABLES'): tablename, fields, args = self._LAZY_TABLES.pop(key) return self.lazy_define_table(tablename,*fields,**args) return ogetattr(self, key) def __setitem__(self, key, value): osetattr(self, str(key), value) def __setattr__(self, key, value): if key[:1]!='_' and key in self: raise SyntaxError( 'Object %s exists and cannot be redefined' % key) osetattr(self,key,value) __delitem__ = object.__delattr__ def __repr__(self): if hasattr(self,'_uri'): return '<DAL uri="%s">' % hide_password(str(self._uri)) else: return '<DAL db_uid="%s">' % self._db_uid def smart_query(self,fields,text): return Set(self, smart_query(fields,text)) def __call__(self, query=None, ignore_common_filters=None): if isinstance(query,Table): query = self._adapter.id_query(query) elif isinstance(query,Field): query = query!=None elif isinstance(query, dict): icf = query.get("ignore_common_filters") if icf: ignore_common_filters = icf return Set(self, query, ignore_common_filters=ignore_common_filters) def commit(self): self._adapter.commit() def rollback(self): self._adapter.rollback() def close(self): self._adapter.close() if self._db_uid in THREAD_LOCAL.db_instances: db_group = THREAD_LOCAL.db_instances[self._db_uid] db_group.remove(self) if not db_group: del THREAD_LOCAL.db_instances[self._db_uid] def executesql(self, query, placeholders=None, as_dict=False, fields=None, colnames=None): """ placeholders is optional and will always be None. If using raw SQL with placeholders, placeholders may be a sequence of values to be substituted in or, (if supported by the DB driver), a dictionary with keys matching named placeholders in your SQL. Added 2009-12-05 "as_dict" optional argument. Will always be None when using DAL. If using raw SQL can be set to True and the results cursor returned by the DB driver will be converted to a sequence of dictionaries keyed with the db field names. Tested with SQLite but should work with any database since the cursor.description used to get field names is part of the Python dbi 2.0 specs. Results returned with as_dict=True are the same as those returned when applying .to_list() to a DAL query. [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] Added 2012-08-24 "fields" and "colnames" optional arguments. If either is provided, the results cursor returned by the DB driver will be converted to a DAL Rows object using the db._adapter.parse() method. The "fields" argument is a list of DAL Field objects that match the fields returned from the DB. The Field objects should be part of one or more Table objects defined on the DAL object. The "fields" list can include one or more DAL Table objects in addition to or instead of including Field objects, or it can be just a single table (not in a list). In that case, the Field objects will be extracted from the table(s). Instead of specifying the "fields" argument, the "colnames" argument can be specified as a list of field names in tablename.fieldname format. Again, these should represent tables and fields defined on the DAL object. It is also possible to specify both "fields" and the associated "colnames". In that case, "fields" can also include DAL Expression objects in addition to Field objects. For Field objects in "fields", the associated "colnames" must still be in tablename.fieldname format. For Expression objects in "fields", the associated "colnames" can be any arbitrary labels. Note, the DAL Table objects referred to by "fields" or "colnames" can be dummy tables and do not have to represent any real tables in the database. Also, note that the "fields" and "colnames" must be in the same order as the fields in the results cursor returned from the DB. """ adapter = self._adapter if placeholders: adapter.execute(query, placeholders) else: adapter.execute(query) if as_dict: if not hasattr(adapter.cursor,'description'): raise RuntimeError("database does not support executesql(...,as_dict=True)") # Non-DAL legacy db query, converts cursor results to dict. # sequence of 7-item sequences. each sequence tells about a column. # first item is always the field name according to Python Database API specs columns = adapter.cursor.description # reduce the column info down to just the field names fields = [f[0] for f in columns] # will hold our finished resultset in a list data = adapter._fetchall() # convert the list for each row into a dictionary so it's # easier to work with. row['field_name'] rather than row[0] return [dict(zip(fields,row)) for row in data] try: data = adapter._fetchall() except: return None if fields or colnames: fields = [] if fields is None else fields if not isinstance(fields, list): fields = [fields] extracted_fields = [] for field in fields: if isinstance(field, Table): extracted_fields.extend([f for f in field]) else: extracted_fields.append(field) if not colnames: colnames = ['%s.%s' % (f.tablename, f.name) for f in extracted_fields] data = adapter.parse( data, fields=extracted_fields, colnames=colnames) return data def _remove_references_to(self, thistable): for table in self: table._referenced_by = [field for field in table._referenced_by if not field.table==thistable] def export_to_csv_file(self, ofile, *args, **kwargs): step = long(kwargs.get('max_fetch_rows,',500)) write_colnames = kwargs['write_colnames'] = \ kwargs.get("write_colnames", True) for table in self.tables: ofile.write('TABLE %s\r\n' % table) query = self._adapter.id_query(self[table]) nrows = self(query).count() kwargs['write_colnames'] = write_colnames for k in range(0,nrows,step): self(query).select(limitby=(k,k+step)).export_to_csv_file( ofile, *args, **kwargs) kwargs['write_colnames'] = False ofile.write('\r\n\r\n') ofile.write('END') def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', unique='uuid', *args, **kwargs): #if id_map is None: id_map={} id_offset = {} # only used if id_map is None for line in ifile: line = line.strip() if not line: continue elif line == 'END': return elif not line.startswith('TABLE ') or not line[6:] in self.tables: raise SyntaxError('invalid file format') else: tablename = line[6:] self[tablename].import_from_csv_file( ifile, id_map, null, unique, id_offset, *args, **kwargs) def DAL_unpickler(db_uid): return DAL('<zombie>',db_uid=db_uid) def DAL_pickler(db): return DAL_unpickler, (db._db_uid,) copyreg.pickle(DAL, DAL_pickler, DAL_unpickler) class SQLALL(object): """ Helper class providing a comma-separated string having all the field names (prefixed by table name and '.') normally only called from within gluon.sql """ def __init__(self, table): self._table = table def __str__(self): return ', '.join([str(field) for field in self._table]) # class Reference(int): class Reference(long): def __allocate(self): if not self._record: self._record = self._table[long(self)] if not self._record: raise RuntimeError( "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self))) def __getattr__(self, key): if key == 'id': return long(self) self.__allocate() return self._record.get(key, None) def get(self, key, default=None): return self.__getattr__(key, default) def __setattr__(self, key, value): if key.startswith('_'): long.__setattr__(self, key, value) return self.__allocate() self._record[key] = value def __getitem__(self, key): if key == 'id': return long(self) self.__allocate() return self._record.get(key, None) def __setitem__(self,key,value): self.__allocate() self._record[key] = value def Reference_unpickler(data): return marshal.loads(data) def Reference_pickler(data): try: marshal_dump = marshal.dumps(long(data)) except AttributeError: marshal_dump = 'i%s' % struct.pack('<i', long(data)) return (Reference_unpickler, (marshal_dump,)) copyreg.pickle(Reference, Reference_pickler, Reference_unpickler) class MethodAdder(object): def __init__(self,table): self.table = table def __call__(self): return self.register() def __getattr__(self,method_name): return self.register(method_name) def register(self,method_name=None): def _decorated(f): instance = self.table import types method = types.MethodType(f, instance, instance.__class__) name = method_name or f.func_name setattr(instance, name, method) return f return _decorated class Table(object): """ an instance of this class represents a database table Example:: db = DAL(...) db.define_table('users', Field('name')) db.users.insert(name='me') # print db.users._insert(...) to see SQL db.users.drop() """ def __init__( self, db, tablename, *fields, **args ): """ Initializes the table and performs checking on the provided fields. Each table will have automatically an 'id'. If a field is of type Table, the fields (excluding 'id') from that table will be used instead. :raises SyntaxError: when a supplied field is of incorrect type. """ self._actual = False # set to True by define_table() self._tablename = tablename self._ot = args.get('actual_name') self._sequence_name = args.get('sequence_name') or \ db and db._adapter.sequence_name(tablename) self._trigger_name = args.get('trigger_name') or \ db and db._adapter.trigger_name(tablename) self._common_filter = args.get('common_filter') self._format = args.get('format') self._singular = args.get( 'singular',tablename.replace('_',' ').capitalize()) self._plural = args.get( 'plural',pluralize(self._singular.lower()).capitalize()) # horrible but for backard compatibility of appamdin: if 'primarykey' in args and args['primarykey'] is not None: self._primarykey = args.get('primarykey') self._before_insert = [] self._before_update = [Set.delete_uploaded_files] self._before_delete = [Set.delete_uploaded_files] self._after_insert = [] self._after_update = [] self._after_delete = [] self.add_method = MethodAdder(self) fieldnames,newfields=set(),[] if hasattr(self,'_primarykey'): if not isinstance(self._primarykey,list): raise SyntaxError( "primarykey must be a list of fields from table '%s'" \ % tablename) if len(self._primarykey)==1: self._id = [f for f in fields if isinstance(f,Field) \ and f.name==self._primarykey[0]][0] elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: field = Field('id', 'id') newfields.append(field) fieldnames.add('id') self._id = field virtual_fields = [] for field in fields: if isinstance(field, (FieldMethod, FieldVirtual)): virtual_fields.append(field) elif isinstance(field, Field) and not field.name in fieldnames: if field.db is not None: field = copy.copy(field) newfields.append(field) fieldnames.add(field.name) if field.type=='id': self._id = field elif isinstance(field, Table): table = field for field in table: if not field.name in fieldnames and not field.type=='id': t2 = not table._actual and self._tablename field = field.clone(point_self_references_to=t2) newfields.append(field) fieldnames.add(field.name) elif not isinstance(field, (Field, Table)): raise SyntaxError( 'define_table argument is not a Field or Table: %s' % field) fields = newfields self._db = db tablename = tablename self._fields = SQLCallableList() self.virtualfields = [] fields = list(fields) if db and db._adapter.uploads_in_blob==True: uploadfields = [f.name for f in fields if f.type=='blob'] for field in fields: fn = field.uploadfield if isinstance(field, Field) and field.type == 'upload'\ and fn is True: fn = field.uploadfield = '%s_blob' % field.name if isinstance(fn,str) and not fn in uploadfields: fields.append(Field(fn,'blob',default='', writable=False,readable=False)) lower_fieldnames = set() reserved = dir(Table) + ['fields'] for field in fields: field_name = field.name if db and db.check_reserved: db.check_reserved_keyword(field_name) elif field_name in reserved: raise SyntaxError("field name %s not allowed" % field_name) if field_name.lower() in lower_fieldnames: raise SyntaxError("duplicate field %s in table %s" \ % (field_name, tablename)) else: lower_fieldnames.add(field_name.lower()) self.fields.append(field_name) self[field_name] = field if field.type == 'id': self['id'] = field field.tablename = field._tablename = tablename field.table = field._table = self field.db = field._db = db if db and not field.type in ('text', 'blob', 'json') and \ db._adapter.maxcharlength < field.length: field.length = db._adapter.maxcharlength self.ALL = SQLALL(self) if hasattr(self,'_primarykey'): for k in self._primarykey: if k not in self.fields: raise SyntaxError( "primarykey must be a list of fields from table '%s " % tablename) else: self[k].notnull = True for field in virtual_fields: self[field.name] = field @property def fields(self): return self._fields def update(self,*args,**kwargs): raise RuntimeError("Syntax Not Supported") def _enable_record_versioning(self, archive_db=None, archive_name = '%(tablename)s_archive', current_record = 'current_record', is_active = 'is_active'): archive_db = archive_db or self._db archive_name = archive_name % dict(tablename=self._tablename) if archive_name in archive_db.tables(): return # do not try define the archive if already exists fieldnames = self.fields() field_type = self if archive_db is self._db else 'bigint' archive_db.define_table( archive_name, Field(current_record,field_type), *[field.clone(unique=False) for field in self]) self._before_update.append( lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: archive_record(qset,fs,db[an],cn)) if is_active and is_active in fieldnames: self._before_delete.append( lambda qset: qset.update(is_active=False)) newquery = lambda query, t=self: t.is_active == True query = self._common_filter if query: newquery = query & newquery self._common_filter = newquery def _validate(self,**vars): errors = Row() for key,value in vars.iteritems(): value,error = self[key].validate(value) if error: errors[key] = error return errors def _create_references(self): db = self._db pr = db._pending_references self._referenced_by = [] for field in self: fieldname = field.name field_type = field.type if isinstance(field_type,str) and field_type[:10] == 'reference ': ref = field_type[10:].strip() if not ref.split(): raise SyntaxError('Table: reference to nothing: %s' %ref) refs = ref.split('.') rtablename = refs[0] if not rtablename in db: pr[rtablename] = pr.get(rtablename,[]) + [field] continue rtable = db[rtablename] if len(refs)==2: rfieldname = refs[1] if not hasattr(rtable,'_primarykey'): raise SyntaxError( 'keyed tables can only reference other keyed tables (for now)') if rfieldname not in rtable.fields: raise SyntaxError( "invalid field '%s' for referenced table '%s' in table '%s'" \ % (rfieldname, rtablename, self._tablename)) rtable._referenced_by.append(field) for referee in pr.get(self._tablename,[]): self._referenced_by.append(referee) def _filter_fields(self, record, id=False): return dict([(k, v) for (k, v) in record.iteritems() if k in self.fields and (self[k].type!='id' or id)]) def _build_query(self,key): """ for keyed table only """ query = None for k,v in key.iteritems(): if k in self._primarykey: if query: query = query & (self[k] == v) else: query = (self[k] == v) else: raise SyntaxError( 'Field %s is not part of the primary key of %s' % \ (k,self._tablename)) return query def __getitem__(self, key): if not key: return None elif isinstance(key, dict): """ for keyed table """ query = self._build_query(key) rows = self._db(query).select() if rows: return rows[0] return None elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): return self._db(self._id == key).select(limitby=(0,1)).first() elif key: return ogetattr(self, str(key)) def __call__(self, key=DEFAULT, **kwargs): for_update = kwargs.get('_for_update',False) if '_for_update' in kwargs: del kwargs['_for_update'] orderby = kwargs.get('_orderby',None) if '_orderby' in kwargs: del kwargs['_orderby'] if not key is DEFAULT: if isinstance(key, Query): record = self._db(key).select( limitby=(0,1),for_update=for_update, orderby=orderby).first() elif not str(key).isdigit(): record = None else: record = self._db(self._id == key).select( limitby=(0,1),for_update=for_update, orderby=orderby).first() if record: for k,v in kwargs.iteritems(): if record[k]!=v: return None return record elif kwargs: query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby).first() else: return None def __setitem__(self, key, value): if isinstance(key, dict) and isinstance(value, dict): """ option for keyed table """ if set(key.keys()) == set(self._primarykey): value = self._filter_fields(value) kv = {} kv.update(value) kv.update(key) if not self.insert(**kv): query = self._build_query(key) self._db(query).update(**self._filter_fields(value)) else: raise SyntaxError( 'key must have all fields from primary key: %s'%\ (self._primarykey)) elif str(key).isdigit(): if key == 0: self.insert(**self._filter_fields(value)) elif self._db(self._id == key)\ .update(**self._filter_fields(value)) is None: raise SyntaxError('No such record: %s' % key) else: if isinstance(key, dict): raise SyntaxError( 'value must be a dictionary: %s' % value) osetattr(self, str(key), value) __getattr__ = __getitem__ def __setattr__(self, key, value): if key[:1]!='_' and key in self: raise SyntaxError('Object exists and cannot be redefined: %s' % key) osetattr(self,key,value) def __delitem__(self, key): if isinstance(key, dict): query = self._build_query(key) if not self._db(query).delete(): raise SyntaxError('No such record: %s' % key) elif not str(key).isdigit() or \ not self._db(self._id == key).delete(): raise SyntaxError('No such record: %s' % key) def __contains__(self,key): return hasattr(self,key) has_key = __contains__ def items(self): return self.__dict__.items() def __iter__(self): for fieldname in self.fields: yield self[fieldname] def iteritems(self): return self.__dict__.iteritems() def __repr__(self): return '<Table %s (%s)>' % (self._tablename,','.join(self.fields())) def __str__(self): if self._ot is not None: if 'Oracle' in str(type(self._db._adapter)): # <<< patch return '%s %s' % (self._ot, self._tablename) # <<< patch return '%s AS %s' % (self._ot, self._tablename) return self._tablename def _drop(self, mode = ''): return self._db._adapter._drop(self, mode) def drop(self, mode = ''): return self._db._adapter.drop(self,mode) def _listify(self,fields,update=False): new_fields = {} # format: new_fields[name] = (field,value) # store all fields passed as input in new_fields for name in fields: if not name in self.fields: if name != 'id': raise SyntaxError( 'Field %s does not belong to the table' % name) else: field = self[name] value = fields[name] if field.filter_in: value = field.filter_in(value) new_fields[name] = (field,value) # check all fields that should be in the table but are not passed to_compute = [] for ofield in self: name = ofield.name if not name in new_fields: # if field is supposed to be computed, compute it! if ofield.compute: # save those to compute for later to_compute.append((name,ofield)) # if field is required, check its default value elif not update and not ofield.default is None: value = ofield.default fields[name] = value new_fields[name] = (ofield,value) # if this is an update, user the update field instead elif update and not ofield.update is None: value = ofield.update fields[name] = value new_fields[name] = (ofield,value) # if the field is still not there but it should, error elif not update and ofield.required: raise RuntimeError( 'Table: missing required field: %s' % name) # now deal with fields that are supposed to be computed if to_compute: row = Row(fields) for name,ofield in to_compute: # try compute it try: new_fields[name] = (ofield,ofield.compute(row)) except (KeyError, AttributeError): # error sinlently unless field is required! if ofield.required: raise SyntaxError('unable to comput field: %s' % name) return new_fields.values() def _attempt_upload(self, fields): for field in self: if field.type=='upload' and field.name in fields: value = fields[field.name] if value and not isinstance(value,str): if hasattr(value,'file') and hasattr(value,'filename'): new_name = field.store(value.file,filename=value.filename) elif hasattr(value,'read') and hasattr(value,'name'): new_name = field.store(value,filename=value.name) else: raise RuntimeError("Unable to handle upload") fields[field.name] = new_name def _defaults(self, fields): "If there are no fields/values specified, return table defaults" if not fields: fields = {} for field in self: if field.type != "id": fields[field.name] = field.default return fields def _insert(self, **fields): fields = self._defaults(fields) return self._db._adapter._insert(self, self._listify(fields)) def insert(self, **fields): fields = self._defaults(fields) self._attempt_upload(fields) if any(f(fields) for f in self._before_insert): return 0 ret = self._db._adapter.insert(self, self._listify(fields)) if ret and self._after_insert: fields = Row(fields) [f(fields,ret) for f in self._after_insert] return ret def validate_and_insert(self,**fields): response = Row() response.errors = Row() new_fields = copy.copy(fields) for key,value in fields.iteritems(): value,error = self[key].validate(value) if error: response.errors[key] = "%s" % error else: new_fields[key] = value if not response.errors: response.id = self.insert(**new_fields) else: response.id = None return response def update_or_insert(self, _key=DEFAULT, **values): if _key is DEFAULT: record = self(**values) elif isinstance(_key,dict): record = self(**_key) else: record = self(_key) if record: record.update_record(**values) newid = None else: newid = self.insert(**values) return newid def bulk_insert(self, items): """ here items is a list of dictionaries """ items = [self._listify(item) for item in items] if any(f(item) for item in items for f in self._before_insert):return 0 ret = self._db._adapter.bulk_insert(self,items) ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] return ret def _truncate(self, mode = None): return self._db._adapter._truncate(self, mode) def truncate(self, mode = None): return self._db._adapter.truncate(self, mode) def import_from_csv_file( self, csvfile, id_map=None, null='<NULL>', unique='uuid', id_offset=None, # id_offset used only when id_map is None *args, **kwargs ): """ Import records from csv file. Column headers must have same names as table fields. Field 'id' is ignored. If column names read 'table.file' the 'table.' prefix is ignored. 'unique' argument is a field which must be unique (typically a uuid field) 'restore' argument is default False; if set True will remove old values in table first. 'id_map' ff set to None will not map ids. The import will keep the id numbers in the restored table. This assumes that there is an field of type id that is integer and in incrementing order. Will keep the id numbers in restored table. """ delimiter = kwargs.get('delimiter', ',') quotechar = kwargs.get('quotechar', '"') quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) restore = kwargs.get('restore', False) if restore: self._db[self].truncate() reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) colnames = None if isinstance(id_map, dict): if not self._tablename in id_map: id_map[self._tablename] = {} id_map_self = id_map[self._tablename] def fix(field, value, id_map, id_offset): list_reference_s='list:reference' if value == null: value = None elif field.type=='blob': value = base64.b64decode(value) elif field.type=='double' or field.type=='float': if not value.strip(): value = None else: value = float(value) elif field.type in ('integer','bigint'): if not value.strip(): value = None else: value = long(value) elif field.type.startswith('list:string'): value = bar_decode_string(value) elif field.type.startswith(list_reference_s): ref_table = field.type[len(list_reference_s):].strip() if id_map is not None: value = [id_map[ref_table][long(v)] \ for v in bar_decode_string(value)] else: value = [v for v in bar_decode_string(value)] elif field.type.startswith('list:'): value = bar_decode_integer(value) elif id_map and field.type.startswith('reference'): try: value = id_map[field.type[9:].strip()][long(value)] except KeyError: pass elif id_offset and field.type.startswith('reference'): try: value = id_offset[field.type[9:].strip()]+long(value) except KeyError: pass return (field.name, value) def is_id(colname): if colname in self: return self[colname].type == 'id' else: return False first = True unique_idx = None for line in reader: if not line: break if not colnames: colnames = [x.split('.',1)[-1] for x in line][:len(line)] cols, cid = [], None for i,colname in enumerate(colnames): if is_id(colname): cid = i else: cols.append(i) if colname == unique: unique_idx = i else: items = [fix(self[colnames[i]], line[i], id_map, id_offset) \ for i in cols if colnames[i] in self.fields] if not id_map and cid is not None and id_offset is not None and not unique_idx: csv_id = long(line[cid]) curr_id = self.insert(**dict(items)) if first: first = False # First curr_id is bigger than csv_id, # then we are not restoring but # extending db table with csv db table if curr_id>csv_id: id_offset[self._tablename] = curr_id-csv_id else: id_offset[self._tablename] = 0 # create new id until we get the same as old_id+offset while curr_id<csv_id+id_offset[self._tablename]: self._db(self._db[self][colnames[cid]] == curr_id).delete() curr_id = self.insert(**dict(items)) # Validation. Check for duplicate of 'unique' &, # if present, update instead of insert. elif not unique_idx: new_id = self.insert(**dict(items)) else: unique_value = line[unique_idx] query = self._db[self][unique] == unique_value record = self._db(query).select().first() if record: record.update_record(**dict(items)) new_id = record[self._id.name] else: new_id = self.insert(**dict(items)) if id_map and cid is not None: id_map_self[long(line[cid])] = new_id def as_dict(self, flat=False, sanitize=True, field_options=True): tablename = str(self) table_as_dict = dict(name=tablename, items={}, fields=[], sequence_name=self._sequence_name, trigger_name=self._trigger_name, common_filter=self._common_filter, format=self._format, singular=self._singular, plural=self._plural) for field in self: if (field.readable or field.writable) or (not sanitize): table_as_dict["fields"].append(field.name) table_as_dict["items"][field.name] = \ field.as_dict(flat=flat, sanitize=sanitize, options=field_options) return table_as_dict def as_xml(self, sanitize=True, field_options=True): if not have_serializers: raise ImportError("No xml serializers available") d = self.as_dict(flat=True, sanitize=sanitize, field_options=field_options) return serializers.xml(d) def as_json(self, sanitize=True, field_options=True): if not have_serializers: raise ImportError("No json serializers available") d = self.as_dict(flat=True, sanitize=sanitize, field_options=field_options) return serializers.json(d) def as_yaml(self, sanitize=True, field_options=True): if not have_serializers: raise ImportError("No YAML serializers available") d = self.as_dict(flat=True, sanitize=sanitize, field_options=field_options) return serializers.yaml(d) def with_alias(self, alias): return self._db._adapter.alias(self,alias) def on(self, query): return Expression(self._db,self._db._adapter.ON,self,query) def archive_record(qset,fs,archive_table,current_record): tablenames = qset.db._adapter.tables(qset.query) if len(tablenames)!=1: raise RuntimeError("cannot update join") table = qset.db[tablenames[0]] for row in qset.select(): fields = archive_table._filter_fields(row) fields[current_record] = row.id archive_table.insert(**fields) return False class Expression(object): def __init__( self, db, op, first=None, second=None, type=None, **optional_args ): self.db = db self.op = op self.first = first self.second = second self._table = getattr(first,'_table',None) ### self._tablename = first._tablename ## CHECK if not type and first and hasattr(first,'type'): self.type = first.type else: self.type = type self.optional_args = optional_args def sum(self): db = self.db return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type) def max(self): db = self.db return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type) def min(self): db = self.db return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type) def len(self): db = self.db return Expression(db, db._adapter.LENGTH, self, None, 'integer') def avg(self): db = self.db return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type) def abs(self): db = self.db return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type) def lower(self): db = self.db return Expression(db, db._adapter.LOWER, self, None, self.type) def upper(self): db = self.db return Expression(db, db._adapter.UPPER, self, None, self.type) def replace(self,a,b): db = self.db return Expression(db, db._adapter.REPLACE, self, (a,b), self.type) def year(self): db = self.db return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer') def month(self): db = self.db return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer') def day(self): db = self.db return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer') def hour(self): db = self.db return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer') def minutes(self): db = self.db return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer') def coalesce(self,*others): db = self.db return Expression(db, db._adapter.COALESCE, self, others, self.type) def coalesce_zero(self): db = self.db return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type) def seconds(self): db = self.db return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer') def epoch(self): db = self.db return Expression(db, db._adapter.EPOCH, self, None, 'integer') def __getslice__(self, start, stop): db = self.db if start < 0: pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) else: pos0 = start + 1 if stop < 0: length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) elif stop == sys.maxint: length = self.len() else: length = '(%s - %s)' % (stop + 1, pos0) return Expression(db,db._adapter.SUBSTRING, self, (pos0, length), self.type) def __getitem__(self, i): return self[i:i + 1] def __str__(self): return self.db._adapter.expand(self,self.type) def __or__(self, other): # for use in sortby db = self.db return Expression(db,db._adapter.COMMA,self,other,self.type) def __invert__(self): db = self.db if hasattr(self,'_op') and self.op == db._adapter.INVERT: return self.first return Expression(db,db._adapter.INVERT,self,type=self.type) def __add__(self, other): db = self.db return Expression(db,db._adapter.ADD,self,other,self.type) def __sub__(self, other): db = self.db if self.type in ('integer','bigint'): result_type = 'integer' elif self.type in ['date','time','datetime','double','float']: result_type = 'double' elif self.type.startswith('decimal('): result_type = self.type else: raise SyntaxError("subtraction operation not supported for type") return Expression(db,db._adapter.SUB,self,other,result_type) def __mul__(self, other): db = self.db return Expression(db,db._adapter.MUL,self,other,self.type) def __div__(self, other): db = self.db return Expression(db,db._adapter.DIV,self,other,self.type) def __mod__(self, other): db = self.db return Expression(db,db._adapter.MOD,self,other,self.type) def __eq__(self, value): db = self.db return Query(db, db._adapter.EQ, self, value) def __ne__(self, value): db = self.db return Query(db, db._adapter.NE, self, value) def __lt__(self, value): db = self.db return Query(db, db._adapter.LT, self, value) def __le__(self, value): db = self.db return Query(db, db._adapter.LE, self, value) def __gt__(self, value): db = self.db return Query(db, db._adapter.GT, self, value) def __ge__(self, value): db = self.db return Query(db, db._adapter.GE, self, value) def like(self, value, case_sensitive=False): db = self.db op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE return Query(db, op, self, value) def regexp(self, value): db = self.db return Query(db, db._adapter.REGEXP, self, value) def belongs(self, *value): """ Accepts the following inputs: field.belongs(1,2) field.belongs((1,2)) field.belongs(query) Does NOT accept: field.belongs(1) """ db = self.db if len(value) == 1: value = value[0] if isinstance(value,Query): value = db(value)._select(value.first._table._id) return Query(db, db._adapter.BELONGS, self, value) def startswith(self, value): db = self.db if not self.type in ('string', 'text', 'json'): raise SyntaxError("startswith used with incompatible field type") return Query(db, db._adapter.STARTSWITH, self, value) def endswith(self, value): db = self.db if not self.type in ('string', 'text', 'json'): raise SyntaxError("endswith used with incompatible field type") return Query(db, db._adapter.ENDSWITH, self, value) def contains(self, value, all=False, case_sensitive=False): """ The case_sensitive parameters is only useful for PostgreSQL For other RDMBs it is ignored and contains is always case in-sensitive For MongoDB and GAE contains is always case sensitive """ db = self.db if isinstance(value,(list, tuple)): subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) for v in value if str(v).strip()] if not subqueries: return self.contains('') else: return reduce(all and AND or OR,subqueries) if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): raise SyntaxError("contains used with incompatible field type") return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive) def with_alias(self, alias): db = self.db return Expression(db, db._adapter.AS, self, alias, self.type) # GIS expressions def st_asgeojson(self, precision=15, options=0, version=1): return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, dict(precision=precision, options=options, version=version), 'string') def st_astext(self): db = self.db return Expression(db, db._adapter.ST_ASTEXT, self, type='string') def st_x(self): db = self.db return Expression(db, db._adapter.ST_X, self, type='string') def st_y(self): db = self.db return Expression(db, db._adapter.ST_Y, self, type='string') def st_distance(self, other): db = self.db return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double') def st_simplify(self, value): db = self.db return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type) # GIS queries def st_contains(self, value): db = self.db return Query(db, db._adapter.ST_CONTAINS, self, value) def st_equals(self, value): db = self.db return Query(db, db._adapter.ST_EQUALS, self, value) def st_intersects(self, value): db = self.db return Query(db, db._adapter.ST_INTERSECTS, self, value) def st_overlaps(self, value): db = self.db return Query(db, db._adapter.ST_OVERLAPS, self, value) def st_touches(self, value): db = self.db return Query(db, db._adapter.ST_TOUCHES, self, value) def st_within(self, value): db = self.db return Query(db, db._adapter.ST_WITHIN, self, value) # for use in both Query and sortby class SQLCustomType(object): """ allows defining of custom SQL types Example:: decimal = SQLCustomType( type ='double', native ='integer', encoder =(lambda x: int(float(x) * 100)), decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) ) db.define_table( 'example', Field('value', type=decimal) ) :param type: the web2py type (default = 'string') :param native: the backend type :param encoder: how to encode the value to store it in the backend :param decoder: how to decode the value retrieved from the backend :param validator: what validators to use ( default = None, will use the default validator for type) """ def __init__( self, type='string', native=None, encoder=None, decoder=None, validator=None, _class=None, ): self.type = type self.native = native self.encoder = encoder or (lambda x: x) self.decoder = decoder or (lambda x: x) self.validator = validator self._class = _class or type def startswith(self, text=None): try: return self.type.startswith(self, text) except TypeError: return False def __getslice__(self, a=0, b=100): return None def __getitem__(self, i): return None def __str__(self): return self._class class FieldVirtual(object): def __init__(self, name, f=None, ftype='string',label=None,table_name=None): # for backward compatibility (self.name, self.f) = (name, f) if f else ('unkown', name) self.type = ftype self.label = label or self.name.capitalize().replace('_',' ') self.represent = IDENTITY self.formatter = IDENTITY self.comment = None self.readable = True self.writable = False self.requires = None self.widget = None self.tablename = table_name self.filter_out = None class FieldMethod(object): def __init__(self, name, f=None, handler=None): # for backward compatibility (self.name, self.f) = (name, f) if f else ('unkown', name) self.handler = handler def list_represent(x,r=None): return ', '.join(str(y) for y in x or []) class Field(Expression): Virtual = FieldVirtual Method = FieldMethod Lazy = FieldMethod # for backward compatibility """ an instance of this class represents a database field example:: a = Field(name, 'string', length=32, default=None, required=False, requires=IS_NOT_EMPTY(), ondelete='CASCADE', notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, uploadfield=True, # True means store on disk, # 'a_field_name' means store in this field in db # False means file content will be discarded. writable=True, readable=True, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False # upload to separate directories by uuid_keys # first 2 character and tablename.fieldname # False - old behavior # True - put uploaded file in # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] # directory) uploadfs=None # a pyfilesystem where to store upload to be used as argument of DAL.define_table allowed field types: string, boolean, integer, double, text, blob, date, time, datetime, upload, password strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) fields should have a default or they will be required in SQLFORMs the requires argument is used to validate the field input in SQLFORMs """ def __init__( self, fieldname, type='string', length=None, default=DEFAULT, required=False, requires=DEFAULT, ondelete='CASCADE', notnull=False, unique=False, uploadfield=True, widget=None, label=None, comment=None, writable=True, readable=True, update=None, authorize=None, autodelete=False, represent=None, uploadfolder=None, uploadseparate=False, uploadfs=None, compute=None, custom_store=None, custom_retrieve=None, custom_retrieve_file_properties=None, custom_delete=None, filter_in = None, filter_out = None, custom_qualifier = None, map_none = None, ): self._db = self.db = None # both for backward compatibility self.op = None self.first = None self.second = None self.name = fieldname = cleanup(fieldname) if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \ fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): raise SyntaxError('Field: invalid field name: %s' % fieldname) self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) self.default = default if default!=DEFAULT else (update or None) self.required = required # is this field required self.ondelete = ondelete.upper() # this is for reference fields only self.notnull = notnull self.unique = unique self.uploadfield = uploadfield self.uploadfolder = uploadfolder self.uploadseparate = uploadseparate self.uploadfs = uploadfs self.widget = widget self.comment = comment self.writable = writable self.readable = readable self.update = update self.authorize = authorize self.autodelete = autodelete self.represent = list_represent if \ represent==None and type in ('list:integer','list:string') else represent self.compute = compute self.isattachment = True self.custom_store = custom_store self.custom_retrieve = custom_retrieve self.custom_retrieve_file_properties = custom_retrieve_file_properties self.custom_delete = custom_delete self.filter_in = filter_in self.filter_out = filter_out self.custom_qualifier = custom_qualifier self.label = label if label!=None else fieldname.replace('_',' ').title() self.requires = requires if requires!=None else [] self.map_none = map_none def set_attributes(self,*args,**attributes): self.__dict__.update(*args,**attributes) def clone(self,point_self_references_to=False,**args): field = copy.copy(self) if point_self_references_to and \ field.type == 'reference %s'+field._tablename: field.type = 'reference %s' % point_self_references_to field.__dict__.update(args) return field def store(self, file, filename=None, path=None): if self.custom_store: return self.custom_store(file,filename,path) if isinstance(file, cgi.FieldStorage): filename = filename or file.filename file = file.file elif not filename: filename = file.name filename = os.path.basename(filename.replace('/', os.sep)\ .replace('\\', os.sep)) m = REGEX_STORE_PATTERN.search(filename) extension = m and m.group('e') or 'txt' uuid_key = web2py_uuid().replace('-', '')[-16:] encoded_filename = base64.b16encode(filename).lower() newfilename = '%s.%s.%s.%s' % \ (self._tablename, self.name, uuid_key, encoded_filename) newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension self_uploadfield = self.uploadfield if isinstance(self_uploadfield,Field): blob_uploadfield_name = self_uploadfield.uploadfield keys={self_uploadfield.name: newfilename, blob_uploadfield_name: file.read()} self_uploadfield.table.insert(**keys) elif self_uploadfield == True: if path: pass elif self.uploadfolder: path = self.uploadfolder elif self.db._adapter.folder: path = pjoin(self.db._adapter.folder, '..', 'uploads') else: raise RuntimeError( "you must specify a Field(...,uploadfolder=...)") if self.uploadseparate: if self.uploadfs: raise RuntimeError("not supported") path = pjoin(path,"%s.%s" %(self._tablename, self.name), uuid_key[:2]) if not exists(path): os.makedirs(path) pathfilename = pjoin(path, newfilename) if self.uploadfs: dest_file = self.uploadfs.open(newfilename, 'wb') else: dest_file = open(pathfilename, 'wb') try: shutil.copyfileobj(file, dest_file) except IOError: raise IOError( 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) dest_file.close() return newfilename def retrieve(self, name, path=None, nameonly=False): """ if nameonly==True return (filename, fullfilename) instead of (filename, stream) """ self_uploadfield = self.uploadfield if self.custom_retrieve: return self.custom_retrieve(name, path) import http if self.authorize or isinstance(self_uploadfield, str): row = self.db(self == name).select().first() if not row: raise http.HTTP(404) if self.authorize and not self.authorize(row): raise http.HTTP(403) m = REGEX_UPLOAD_PATTERN.match(name) if not m or not self.isattachment: raise TypeError('Can\'t retrieve %s' % name) file_properties = self.retrieve_file_properties(name,path) filename = file_properties['filename'] if isinstance(self_uploadfield, str): # ## if file is in DB stream = StringIO.StringIO(row[self_uploadfield] or '') elif isinstance(self_uploadfield,Field): blob_uploadfield_name = self_uploadfield.uploadfield query = self_uploadfield == name data = self_uploadfield.table(query)[blob_uploadfield_name] stream = StringIO.StringIO(data) elif self.uploadfs: # ## if file is on pyfilesystem stream = self.uploadfs.open(name, 'rb') else: # ## if file is on regular filesystem # this is intentially a sting with filename and not a stream # this propagates and allows stream_file_or_304_or_206 to be called fullname = pjoin(file_properties['path'],name) if nameonly: return (filename, fullname) stream = open(fullname,'rb') return (filename, stream) def retrieve_file_properties(self, name, path=None): self_uploadfield = self.uploadfield if self.custom_retrieve_file_properties: return self.custom_retrieve_file_properties(name, path) try: m = REGEX_UPLOAD_PATTERN.match(name) if not m or not self.isattachment: raise TypeError('Can\'t retrieve %s file properties' % name) filename = base64.b16decode(m.group('name'), True) filename = REGEX_CLEANUP_FN.sub('_', filename) except (TypeError, AttributeError): filename = name if isinstance(self_uploadfield, str): # ## if file is in DB return dict(path=None,filename=filename) elif isinstance(self_uploadfield,Field): return dict(path=None,filename=filename) else: # ## if file is on filesystem if path: pass elif self.uploadfolder: path = self.uploadfolder else: path = pjoin(self.db._adapter.folder, '..', 'uploads') if self.uploadseparate: t = m.group('table') f = m.group('field') u = m.group('uuidkey') path = pjoin(path,"%s.%s" % (t,f),u[:2]) return dict(path=path,filename=filename) def formatter(self, value): requires = self.requires if value is None or not requires: return value or self.map_none if not isinstance(requires, (list, tuple)): requires = [requires] elif isinstance(requires, tuple): requires = list(requires) else: requires = copy.copy(requires) requires.reverse() for item in requires: if hasattr(item, 'formatter'): value = item.formatter(value) return value def validate(self, value): if not self.requires or self.requires == DEFAULT: return ((value if value!=self.map_none else None), None) requires = self.requires if not isinstance(requires, (list, tuple)): requires = [requires] for validator in requires: (value, error) = validator(value) if error: return (value, error) return ((value if value!=self.map_none else None), None) def count(self, distinct=None): return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer') def as_dict(self, flat=False, sanitize=True, options=True): attrs = ('type', 'length', 'default', 'required', 'ondelete', 'notnull', 'unique', 'uploadfield', 'widget', 'label', 'comment', 'writable', 'readable', 'update', 'authorize', 'autodelete', 'represent', 'uploadfolder', 'uploadseparate', 'uploadfs', 'compute', 'custom_store', 'custom_retrieve', 'custom_retrieve_file_properties', 'custom_delete', 'filter_in', 'filter_out', 'custom_qualifier', 'map_none', 'name') SERIALIZABLE_TYPES = (int, long, basestring, dict, list, float, tuple, bool, type(None)) def flatten(obj): if flat: if isinstance(obj, flatten.__class__): return str(type(obj)) elif isinstance(obj, type): try: return str(obj).split("'")[1] except IndexError: return str(obj) elif not isinstance(obj, SERIALIZABLE_TYPES): return str(obj) elif isinstance(obj, dict): newobj = dict() for k, v in obj.items(): newobj[k] = flatten(v) return newobj elif isinstance(obj, (list, tuple, set)): return [flatten(v) for v in obj] else: return obj elif isinstance(obj, (dict, set)): return obj.copy() else: return obj def filter_requires(t, r, options=True): if sanitize and any([keyword in str(t).upper() for keyword in ("CRYPT", "IS_STRONG")]): return None if not isinstance(r, dict): if options and hasattr(r, "options"): if callable(r.options): r.options() newr = r.__dict__.copy() else: newr = r.copy() # remove options if not required if not options and newr.has_key("labels"): [newr.update({key:None}) for key in ("labels", "theset") if (key in newr)] for k, v in newr.items(): if k == "other": if isinstance(v, dict): otype, other = v.popitem() else: otype = flatten(type(v)) other = v newr[k] = {otype: filter_requires(otype, other, options=options)} else: newr[k] = flatten(v) return newr if isinstance(self.requires, (tuple, list, set)): requires = dict([(flatten(type(r)), filter_requires(type(r), r, options=options)) for r in self.requires]) else: requires = {flatten(type(self.requires)): filter_requires(type(self.requires), self.requires, options=options)} d = dict(colname="%s.%s" % (self.tablename, self.name), requires=requires) d.update([(attr, flatten(getattr(self, attr))) for attr in attrs]) return d def as_xml(self, sanitize=True, options=True): if have_serializers: xml = serializers.xml else: raise ImportError("No xml serializers available") d = self.as_dict(flat=True, sanitize=sanitize, options=options) return xml(d) def as_json(self, sanitize=True, options=True): if have_serializers: json = serializers.json else: raise ImportError("No json serializers available") d = self.as_dict(flat=True, sanitize=sanitize, options=options) return json(d) def as_yaml(self, sanitize=True, options=True): if have_serializers: d = self.as_dict(flat=True, sanitize=sanitize, options=options) return serializers.yaml(d) else: raise ImportError("No YAML serializers available") def __nonzero__(self): return True def __str__(self): try: return '%s.%s' % (self.tablename, self.name) except: return '<no table>.%s' % self.name class Query(object): """ a query object necessary to define a set. it can be stored or can be passed to DAL.__call__() to obtain a Set Example:: query = db.users.name=='Max' set = db(query) records = set.select() """ def __init__( self, db, op, first=None, second=None, ignore_common_filters = False, **optional_args ): self.db = self._db = db self.op = op self.first = first self.second = second self.ignore_common_filters = ignore_common_filters self.optional_args = optional_args def __repr__(self): return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self) def __str__(self): return self.db._adapter.expand(self) def __and__(self, other): return Query(self.db,self.db._adapter.AND,self,other) __rand__ = __and__ def __or__(self, other): return Query(self.db,self.db._adapter.OR,self,other) __ror__ = __or__ def __invert__(self): if self.op==self.db._adapter.NOT: return self.first return Query(self.db,self.db._adapter.NOT,self) def __eq__(self, other): return repr(self) == repr(other) def __ne__(self, other): return not (self == other) def case(self,t=1,f=0): return self.db._adapter.CASE(self,t,f) def as_dict(self, flat=False, sanitize=True): """Experimental stuff This allows to return a plain dictionary with the basic query representation. Can be used with json/xml services for client-side db I/O Example: >>> q = db.auth_user.id != 0 >>> q.as_dict(flat=True) {"op": "NE", "first":{"tablename": "auth_user", "fieldname": "id"}, "second":0} """ SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float, basestring, type(None), bool) def loop(d): newd = dict() for k, v in d.items(): if k in ("first", "second"): if isinstance(v, self.__class__): newd[k] = loop(v.__dict__) elif isinstance(v, Field): newd[k] = {"tablename": v._tablename, "fieldname": v.name} elif isinstance(v, Expression): newd[k] = loop(v.__dict__) elif isinstance(v, SERIALIZABLE_TYPES): newd[k] = v elif isinstance(v, (datetime.date, datetime.time, datetime.datetime)): newd[k] = unicode(v) elif k == "op": if callable(v): newd[k] = v.__name__ elif isinstance(v, basestring): newd[k] = v else: pass # not callable or string elif isinstance(v, SERIALIZABLE_TYPES): if isinstance(v, dict): newd[k] = loop(v) else: newd[k] = v return newd if flat: return loop(self.__dict__) else: return self.__dict__ def as_xml(self, sanitize=True): if have_serializers: xml = serializers.xml else: raise ImportError("No xml serializers available") d = self.as_dict(flat=True, sanitize=sanitize) return xml(d) def as_json(self, sanitize=True): if have_serializers: json = serializers.json else: raise ImportError("No json serializers available") d = self.as_dict(flat=True, sanitize=sanitize) return json(d) def xorify(orderby): if not orderby: return None orderby2 = orderby[0] for item in orderby[1:]: orderby2 = orderby2 | item return orderby2 def use_common_filters(query): return (query and hasattr(query,'ignore_common_filters') and \ not query.ignore_common_filters) class Set(object): """ a Set represents a set of records in the database, the records are identified by the query=Query(...) object. normally the Set is generated by DAL.__call__(Query(...)) given a set, for example set = db(db.users.name=='Max') you can: set.update(db.users.name='Massimo') set.delete() # all elements in the set set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) and take subsets: subset = set(db.users.id<5) """ def __init__(self, db, query, ignore_common_filters = None): self.db = db self._db = db # for backward compatibility self.dquery = None # if query is a dict, parse it if isinstance(query, dict): query = self.parse(query) if not ignore_common_filters is None and \ use_common_filters(query) == ignore_common_filters: query = copy.copy(query) query.ignore_common_filters = ignore_common_filters self.query = query def __repr__(self): return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query) def __call__(self, query, ignore_common_filters=False): if query is None: return self elif isinstance(query,Table): query = self.db._adapter.id_query(query) elif isinstance(query,str): query = Expression(self.db,query) elif isinstance(query,Field): query = query!=None if self.query: return Set(self.db, self.query & query, ignore_common_filters=ignore_common_filters) else: return Set(self.db, query, ignore_common_filters=ignore_common_filters) def _count(self,distinct=None): return self.db._adapter._count(self.query,distinct) def _select(self, *fields, **attributes): adapter = self.db._adapter tablenames = adapter.tables(self.query, attributes.get('join',None), attributes.get('left',None), attributes.get('orderby',None), attributes.get('groupby',None)) fields = adapter.expand_all(fields, tablenames) return adapter._select(self.query,fields,attributes) def _delete(self): db = self.db tablename = db._adapter.get_table(self.query) return db._adapter._delete(tablename,self.query) def _update(self, **update_fields): db = self.db tablename = db._adapter.get_table(self.query) fields = db[tablename]._listify(update_fields,update=True) return db._adapter._update(tablename,self.query,fields) def as_dict(self, flat=False, sanitize=True): if flat: uid = dbname = uri = None codec = self.db._db_codec if not sanitize: uri, dbname, uid = (self.db._dbname, str(self.db), self.db._db_uid) d = {"query": self.query.as_dict(flat=flat)} d["db"] = {"uid": uid, "codec": codec, "name": dbname, "uri": uri} return d else: return self.__dict__ def as_xml(self, sanitize=True): if have_serializers: xml = serializers.xml else: raise ImportError("No xml serializers available") d = self.as_dict(flat=True, sanitize=sanitize) return xml(d) def as_json(self, sanitize=True): if have_serializers: json = serializers.json else: raise ImportError("No json serializers available") d = self.as_dict(flat=True, sanitize=sanitize) return json(d) def parse(self, dquery): "Experimental: Turn a dictionary into a Query object" self.dquery = dquery return self.build(self.dquery) def build(self, d): "Experimental: see .parse()" op, first, second = (d["op"], d["first"], d.get("second", None)) left = right = built = None if op in ("AND", "OR"): if not (type(first), type(second)) == (dict, dict): raise SyntaxError("Invalid AND/OR query") if op == "AND": built = self.build(first) & self.build(second) else: built = self.build(first) | self.build(second) elif op == "NOT": if first is None: raise SyntaxError("Invalid NOT query") built = ~self.build(first) else: # normal operation (GT, EQ, LT, ...) for k, v in {"left": first, "right": second}.items(): if isinstance(v, dict) and v.get("op"): v = self.build(v) if isinstance(v, dict) and ("tablename" in v): v = self.db[v["tablename"]][v["fieldname"]] if k == "left": left = v else: right = v if hasattr(self.db._adapter, op): opm = getattr(self.db._adapter, op) if op == "EQ": built = left == right elif op == "NE": built = left != right elif op == "GT": built = left > right elif op == "GE": built = left >= right elif op == "LT": built = left < right elif op == "LE": built = left <= right elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): built = Expression(self.db, opm) elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", "COALESCE_ZERO", "RAW", "INVERT"): built = Expression(self.db, opm, left) elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", "REGEXP", "LIKE", "ILIKE", "STARTSWITH", "ENDSWITH", "ADD", "SUB", "MUL", "DIV", "MOD", "AS", "ON", "COMMA", "NOT_NULL", "COALESCE", "CONTAINS", "BELONGS"): built = Expression(self.db, opm, left, right) # expression as string elif not (left or right): built = Expression(self.db, op) else: raise SyntaxError("Operator not supported: %s" % op) return built def isempty(self): return not self.select(limitby=(0,1), orderby_on_limitby=False) def count(self,distinct=None, cache=None): db = self.db if cache: cache_model, time_expire = cache sql = self._count(distinct=distinct) key = db._uri + '/' + sql if len(key)>200: key = hashlib_md5(key).hexdigest() return cache_model( key, (lambda self=self,distinct=distinct: \ db._adapter.count(self.query,distinct)), time_expire) return db._adapter.count(self.query,distinct) def select(self, *fields, **attributes): adapter = self.db._adapter tablenames = adapter.tables(self.query, attributes.get('join',None), attributes.get('left',None), attributes.get('orderby',None), attributes.get('groupby',None)) fields = adapter.expand_all(fields, tablenames) return adapter.select(self.query,fields,attributes) def nested_select(self,*fields,**attributes): return Expression(self.db,self._select(*fields,**attributes)) def delete(self): db = self.db tablename = db._adapter.get_table(self.query) table = db[tablename] if any(f(self) for f in table._before_delete): return 0 ret = db._adapter.delete(tablename,self.query) ret and [f(self) for f in table._after_delete] return ret def update(self, **update_fields): db = self.db tablename = db._adapter.get_table(self.query) table = db[tablename] table._attempt_upload(update_fields) if any(f(self,update_fields) for f in table._before_update): return 0 fields = table._listify(update_fields,update=True) if not fields: raise SyntaxError("No fields to update") ret = db._adapter.update(tablename,self.query,fields) ret and [f(self,update_fields) for f in table._after_update] return ret def update_naive(self, **update_fields): """ same as update but does not call table._before_update and _after_update """ tablename = self.db._adapter.get_table(self.query) table = self.db[tablename] fields = table._listify(update_fields,update=True) if not fields: raise SyntaxError("No fields to update") ret = self.db._adapter.update(tablename,self.query,fields) return ret def validate_and_update(self, **update_fields): tablename = self.db._adapter.get_table(self.query) response = Row() response.errors = Row() new_fields = copy.copy(update_fields) for key,value in update_fields.iteritems(): value,error = self.db[tablename][key].validate(value) if error: response.errors[key] = error else: new_fields[key] = value table = self.db[tablename] if response.errors: response.updated = None else: if not any(f(self,new_fields) for f in table._before_update): fields = table._listify(new_fields,update=True) if not fields: raise SyntaxError("No fields to update") ret = self.db._adapter.update(tablename,self.query,fields) ret and [f(self,new_fields) for f in table._after_update] else: ret = 0 response.updated = ret return response def delete_uploaded_files(self, upload_fields=None): table = self.db[self.db._adapter.tables(self.query)[0]] # ## mind uploadfield==True means file is not in DB if upload_fields: fields = upload_fields.keys() else: fields = table.fields fields = [f for f in fields if table[f].type == 'upload' and table[f].uploadfield == True and table[f].autodelete] if not fields: return False for record in self.select(*[table[f] for f in fields]): for fieldname in fields: field = table[fieldname] oldname = record.get(fieldname, None) if not oldname: continue if upload_fields and oldname == upload_fields[fieldname]: continue if field.custom_delete: field.custom_delete(oldname) else: uploadfolder = field.uploadfolder if not uploadfolder: uploadfolder = pjoin( self.db._adapter.folder, '..', 'uploads') if field.uploadseparate: items = oldname.split('.') uploadfolder = pjoin( uploadfolder, "%s.%s" % (items[0], items[1]), items[2][:2]) oldpath = pjoin(uploadfolder, oldname) if exists(oldpath): os.unlink(oldpath) return False class RecordUpdater(object): def __init__(self, colset, table, id): self.colset, self.db, self.tablename, self.id = \ colset, table._db, table._tablename, id def __call__(self, **fields): colset, db, tablename, id = self.colset, self.db, self.tablename, self.id table = db[tablename] newfields = fields or dict(colset) for fieldname in newfields.keys(): if not fieldname in table.fields or table[fieldname].type=='id': del newfields[fieldname] table._db(table._id==id,ignore_common_filters=True).update(**newfields) colset.update(newfields) return colset class RecordDeleter(object): def __init__(self, table, id): self.db, self.tablename, self.id = table._db, table._tablename, id def __call__(self): return self.db(self.db[self.tablename]._id==self.id).delete() class LazySet(object): def __init__(self, field, id): self.db, self.tablename, self.fieldname, self.id = \ field.db, field._tablename, field.name, id def _getset(self): query = self.db[self.tablename][self.fieldname]==self.id return Set(self.db,query) def __repr__(self): return repr(self._getset()) def __call__(self, query, ignore_common_filters=False): return self._getset()(query, ignore_common_filters) def _count(self,distinct=None): return self._getset()._count(distinct) def _select(self, *fields, **attributes): return self._getset()._select(*fields,**attributes) def _delete(self): return self._getset()._delete() def _update(self, **update_fields): return self._getset()._update(**update_fields) def isempty(self): return self._getset().isempty() def count(self,distinct=None, cache=None): return self._getset().count(distinct,cache) def select(self, *fields, **attributes): return self._getset().select(*fields,**attributes) def nested_select(self,*fields,**attributes): return self._getset().nested_select(*fields,**attributes) def delete(self): return self._getset().delete() def update(self, **update_fields): return self._getset().update(**update_fields) def update_naive(self, **update_fields): return self._getset().update_naive(**update_fields) def validate_and_update(self, **update_fields): return self._getset().validate_and_update(**update_fields) def delete_uploaded_files(self, upload_fields=None): return self._getset().delete_uploaded_files(upload_fields) class VirtualCommand(object): def __init__(self,method,row): self.method=method self.row=row def __call__(self,*args,**kwargs): return self.method(self.row,*args,**kwargs) def lazy_virtualfield(f): f.__lazy__ = True return f class Rows(object): """ A wrapper for the return value of a select. It basically represents a table. It has an iterator and each row is represented as a dictionary. """ # ## TODO: this class still needs some work to care for ID/OID def __init__( self, db=None, records=[], colnames=[], compact=True, rawrows=None ): self.db = db self.records = records self.colnames = colnames self.compact = compact self.response = rawrows def __repr__(self): return '<Rows (%s)>' % len(self.records) def setvirtualfields(self,**keyed_virtualfields): """ db.define_table('x',Field('number','integer')) if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] from gluon.dal import lazy_virtualfield class MyVirtualFields(object): # normal virtual field (backward compatible, discouraged) def normal_shift(self): return self.x.number+1 # lazy virtual field (because of @staticmethod) @lazy_virtualfield def lazy_shift(instance,row,delta=4): return row.x.number+delta db.x.virtualfields.append(MyVirtualFields()) for row in db(db.x).select(): print row.number, row.normal_shift, row.lazy_shift(delta=7) """ if not keyed_virtualfields: return self for row in self.records: for (tablename,virtualfields) in keyed_virtualfields.iteritems(): attributes = dir(virtualfields) if not tablename in row: box = row[tablename] = Row() else: box = row[tablename] updated = False for attribute in attributes: if attribute[0] != '_': method = getattr(virtualfields,attribute) if hasattr(method,'__lazy__'): box[attribute]=VirtualCommand(method,row) elif type(method)==types.MethodType: if not updated: virtualfields.__dict__.update(row) updated = True box[attribute]=method() return self def __and__(self,other): if self.colnames!=other.colnames: raise Exception('Cannot & incompatible Rows objects') records = self.records+other.records return Rows(self.db,records,self.colnames) def __or__(self,other): if self.colnames!=other.colnames: raise Exception('Cannot | incompatible Rows objects') records = self.records records += [record for record in other.records \ if not record in records] return Rows(self.db,records,self.colnames) def __nonzero__(self): if len(self.records): return 1 return 0 def __len__(self): return len(self.records) def __getslice__(self, a, b): return Rows(self.db,self.records[a:b],self.colnames) def __getitem__(self, i): row = self.records[i] keys = row.keys() if self.compact and len(keys) == 1 and keys[0] != '_extra': return row[row.keys()[0]] return row def __iter__(self): """ iterator over records """ for i in xrange(len(self)): yield self[i] def __str__(self): """ serializes the table into a csv file """ s = StringIO.StringIO() self.export_to_csv_file(s) return s.getvalue() def first(self): if not self.records: return None return self[0] def last(self): if not self.records: return None return self[-1] def find(self,f,limitby=None): """ returns a new Rows object, a subset of the original object, filtered by the function f """ if not self: return Rows(self.db, [], self.colnames) records = [] if limitby: a,b = limitby else: a,b = 0,len(self) k = 0 for row in self: if f(row): if a<=k: records.append(row) k += 1 if k==b: break return Rows(self.db, records, self.colnames) def exclude(self, f): """ removes elements from the calling Rows object, filtered by the function f, and returns a new Rows object containing the removed elements """ if not self.records: return Rows(self.db, [], self.colnames) removed = [] i=0 while i<len(self): row = self[i] if f(row): removed.append(self.records[i]) del self.records[i] else: i += 1 return Rows(self.db, removed, self.colnames) def sort(self, f, reverse=False): """ returns a list of sorted elements (not sorted in place) """ rows = Rows(self.db,[],self.colnames,compact=False) rows.records = sorted(self,key=f,reverse=reverse) return rows def group_by_value(self, field): """ regroups the rows, by one of the fields """ if not self.records: return {} key = str(field) grouped_row_group = dict() for row in self: value = row[key] if not value in grouped_row_group: grouped_row_group[value] = [row] else: grouped_row_group[value].append(row) return grouped_row_group def as_list(self, compact=True, storage_to_dict=True, datetime_to_str=True, custom_types=None): """ returns the data as a list or dictionary. :param storage_to_dict: when True returns a dict, otherwise a list(default True) :param datetime_to_str: convert datetime fields as strings (default True) """ (oc, self.compact) = (self.compact, compact) if storage_to_dict: items = [item.as_dict(datetime_to_str, custom_types) for item in self] else: items = [item for item in self] self.compact = compact return items def as_dict(self, key='id', compact=True, storage_to_dict=True, datetime_to_str=True, custom_types=None): """ returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) :param key: the name of the field to be used as dict key, normally the id :param compact: ? (default True) :param storage_to_dict: when True returns a dict, otherwise a list(default True) :param datetime_to_str: convert datetime fields as strings (default True) """ # test for multiple rows multi = False f = self.first() if f: multi = any([isinstance(v, f.__class__) for v in f.values()]) if (not "." in key) and multi: # No key provided, default to int indices def new_key(): i = 0 while True: yield i i += 1 key_generator = new_key() key = lambda r: key_generator.next() rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) if isinstance(key,str) and key.count('.')==1: (table, field) = key.split('.') return dict([(r[table][field],r) for r in rows]) elif isinstance(key,str): return dict([(r[key],r) for r in rows]) else: return dict([(key(r),r) for r in rows]) def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs): """ export data to csv, the first line contains the column names :param ofile: where the csv must be exported to :param null: how null values must be represented (default '<NULL>') :param delimiter: delimiter to separate values (default ',') :param quotechar: character to use to quote string values (default '"') :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) :param represent: use the fields .represent value (default False) :param colnames: list of column names to use (default self.colnames) This will only work when exporting rows objects!!!! DO NOT use this with db.export_to_csv() """ delimiter = kwargs.get('delimiter', ',') quotechar = kwargs.get('quotechar', '"') quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) represent = kwargs.get('represent', False) writer = csv.writer(ofile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) colnames = kwargs.get('colnames', self.colnames) write_colnames = kwargs.get('write_colnames',True) # a proper csv starting with the column names if write_colnames: writer.writerow(colnames) def none_exception(value): """ returns a cleaned up value that can be used for csv export: - unicode text is encoded as such - None values are replaced with the given representation (default <NULL>) """ if value is None: return null elif isinstance(value, unicode): return value.encode('utf8') elif isinstance(value,Reference): return long(value) elif hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') elif isinstance(value, (list,tuple)): # for type='list:..' return bar_encode(value) return value for record in self: row = [] for col in colnames: if not REGEX_TABLE_DOT_FIELD.match(col): row.append(record._extra[col]) else: (t, f) = col.split('.') field = self.db[t][f] if isinstance(record.get(t, None), (Row,dict)): value = record[t][f] else: value = record[f] if field.type=='blob' and not value is None: value = base64.b64encode(value) elif represent and field.represent: value = field.represent(value) row.append(none_exception(value)) writer.writerow(row) def xml(self,strict=False,row_name='row',rows_name='rows'): """ serializes the table using sqlhtml.SQLTABLE (if present) """ if strict: ncols = len(self.colnames) return '<%s>\n%s\n</%s>' % (rows_name, '\n'.join(row.as_xml(row_name=row_name, colnames=self.colnames) for row in self), rows_name) import sqlhtml return sqlhtml.SQLTABLE(self).xml() def as_xml(self,row_name='row',rows_name='rows'): return self.xml(strict=True, row_name=row_name, rows_name=rows_name) def as_json(self, mode='object', default=None): """ serializes the table to a JSON list of objects """ items = [record.as_json(mode=mode, default=default, serialize=False, colnames=self.colnames) for record in self] if have_serializers: return serializers.json(items, default=default or serializers.custom_json) elif simplejson: return simplejson.dumps(items) else: raise RuntimeError("missing simplejson") # for consistent naming yet backwards compatible as_csv = __str__ json = as_json ################################################################################ # dummy function used to define some doctests ################################################################################ def test_all(): """ >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) >>> tmp = db.define_table('users',\ Field('stringf', 'string', length=32, required=True),\ Field('booleanf', 'boolean', default=False),\ Field('passwordf', 'password', notnull=True),\ Field('uploadf', 'upload'),\ Field('blobf', 'blob'),\ Field('integerf', 'integer', unique=True),\ Field('doublef', 'double', unique=True,notnull=True),\ Field('jsonf', 'json'),\ Field('datef', 'date', default=datetime.date.today()),\ Field('timef', 'time'),\ Field('datetimef', 'datetime'),\ migrate='test_user.table') Insert a field >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ uploadf=None, integerf=5, doublef=3.14,\ jsonf={"j": True},\ datef=datetime.date(2001, 1, 1),\ timef=datetime.time(12, 30, 15),\ datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 1 Drop the table >>> db.users.drop() Examples of insert, select, update, delete >>> tmp = db.define_table('person',\ Field('name'),\ Field('birth','date'),\ migrate='test_person.table') >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') commented len(db().select(db.person.ALL)) commented 2 >>> me = db(db.person.id==person_id).select()[0] # test select >>> me.name 'Massimo' >>> db.person[2].name 'Massimo' >>> db.person(2).name 'Massimo' >>> db.person(name='Massimo').name 'Massimo' >>> db.person(db.person.name=='Massimo').name 'Massimo' >>> row = db.person[2] >>> row.name == row['name'] == row['person.name'] == row('person.name') True >>> db(db.person.name=='Massimo').update(name='massimo') # test update 1 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 1 Update a single record >>> me.update_record(name=\"Max\") <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> >>> me.name 'Max' Examples of complex search conditions >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 1 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 1 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 1 >>> me = db(db.person.id==person_id).select(db.person.name)[0] >>> me.name 'Max' Examples of search conditions using extract from date/datetime/time >>> len(db(db.person.birth.month()==12).select()) 1 >>> len(db(db.person.birth.year()>1900).select()) 1 Example of usage of NULL >>> len(db(db.person.birth==None).select()) ### test NULL 0 >>> len(db(db.person.birth!=None).select()) ### test NULL 1 Examples of search conditions using lower, upper, and like >>> len(db(db.person.name.upper()=='MAX').select()) 1 >>> len(db(db.person.name.like('%ax')).select()) 1 >>> len(db(db.person.name.upper().like('%AX')).select()) 1 >>> len(db(~db.person.name.upper().like('%AX')).select()) 0 orderby, groupby and limitby >>> people = db().select(db.person.name, orderby=db.person.name) >>> order = db.person.name|~db.person.birth >>> people = db().select(db.person.name, orderby=order) >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) Example of one 2 many relation >>> tmp = db.define_table('dog',\ Field('name'),\ Field('birth','date'),\ Field('owner',db.person),\ migrate='test_dog.table') >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 1 A simple JOIN >>> len(db(db.dog.owner==db.person.id).select()) 1 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 1 Drop tables >>> db.dog.drop() >>> db.person.drop() Example of many 2 many relation and Set >>> tmp = db.define_table('author', Field('name'),\ migrate='test_author.table') >>> tmp = db.define_table('paper', Field('title'),\ migrate='test_paper.table') >>> tmp = db.define_table('authorship',\ Field('author_id', db.author),\ Field('paper_id', db.paper),\ migrate='test_authorship.table') >>> aid = db.author.insert(name='Massimo') >>> pid = db.paper.insert(title='QCD') >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) Define a Set >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) >>> rows = authored_papers.select(db.author.name, db.paper.title) >>> for row in rows: print row.author.name, row.paper.title Massimo QCD Example of search condition using belongs >>> set = (1, 2, 3) >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) >>> print rows[0].title QCD Example of search condition using nested select >>> nested_select = db()._select(db.authorship.paper_id) >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) >>> print rows[0].title QCD Example of expressions >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) >>> db(mynumber).delete() 0 >>> for i in range(10): tmp = mynumber.insert(x=i) >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 45 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 5 Output in csv >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() author.name,paper.title\r Massimo,QCD Delete all leftover tables >>> DAL.distributed_transaction_commit(db) >>> db.mynumber.drop() >>> db.authorship.drop() >>> db.author.drop() >>> db.paper.drop() """ ################################################################################ # deprecated since the new DAL; here only for backward compatibility ################################################################################ SQLField = Field SQLTable = Table SQLXorable = Expression SQLQuery = Query SQLSet = Set SQLRows = Rows SQLStorage = Row SQLDB = DAL GQLDB = DAL DAL.Field = Field # was necessary in gluon/globals.py session.connect DAL.Table = Table # was necessary in gluon/globals.py session.connect ################################################################################ # Geodal utils ################################################################################ def geoPoint(x,y): return "POINT (%f %f)" % (x,y) def geoLine(*line): return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line) def geoPolygon(*line): return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line) ################################################################################ # run tests ################################################################################ if __name__ == '__main__': import doctest doctest.testmod()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- import __builtin__ import os import re import sys import threading import traceback from gluon import current NATIVE_IMPORTER = __builtin__.__import__ INVALID_MODULES = set(('', 'gluon', 'applications', 'custom_import')) # backward compatibility API def custom_import_install(): if __builtin__.__import__ == NATIVE_IMPORTER: INVALID_MODULES.update(sys.modules.keys()) __builtin__.__import__ = custom_importer def track_changes(track=True): assert track in (True, False), "must be True or False" current.request._custom_import_track_changes = track def is_tracking_changes(): return current.request._custom_import_track_changes class CustomImportException(ImportError): pass def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1): """ The web2py custom importer. Like the standard Python importer but it tries to transform import statements as something like "import applications.app_name.modules.x". If the import failed, fall back on naive_importer """ globals = globals or {} locals = locals or {} fromlist = fromlist or [] try: if current.request._custom_import_track_changes: base_importer = TRACK_IMPORTER else: base_importer = NATIVE_IMPORTER except: # there is no current.request (should never happen) base_importer = NATIVE_IMPORTER # if not relative and not from applications: if hasattr(current, 'request') \ and level <= 0 \ and not name.split('.')[0] in INVALID_MODULES \ and isinstance(globals, dict): import_tb = None try: try: oname = name if not name.startswith('.') else '.'+name return NATIVE_IMPORTER(oname, globals, locals, fromlist, level) except ImportError: items = current.request.folder.split(os.path.sep) if not items[-1]: items = items[:-1] modules_prefix = '.'.join(items[-2:]) + '.modules' if not fromlist: # import like "import x" or "import x.y" result = None for itemname in name.split("."): new_mod = base_importer( modules_prefix, globals, locals, [itemname], level) try: result = result or new_mod.__dict__[itemname] except KeyError, e: raise ImportError, 'Cannot import module %s' % str(e) modules_prefix += "." + itemname return result else: # import like "from x import a, b, ..." pname = modules_prefix + "." + name return base_importer(pname, globals, locals, fromlist, level) except ImportError, e1: import_tb = sys.exc_info()[2] try: return NATIVE_IMPORTER(name, globals, locals, fromlist, level) except ImportError, e3: raise ImportError, e1, import_tb # there an import error in the module except Exception, e2: raise e2 # there is an error in the module finally: if import_tb: import_tb = None return NATIVE_IMPORTER(name, globals, locals, fromlist, level) class TrackImporter(object): """ An importer tracking the date of the module files and reloading them when they have changed. """ THREAD_LOCAL = threading.local() PACKAGE_PATH_SUFFIX = os.path.sep + "__init__.py" def __init__(self): self._import_dates = {} # Import dates of the files of the modules def __call__(self, name, globals=None, locals=None, fromlist=None, level=-1): """ The import method itself. """ globals = globals or {} locals = locals or {} fromlist = fromlist or [] if not hasattr(self.THREAD_LOCAL, '_modules_loaded'): self.THREAD_LOCAL._modules_loaded = set() try: # Check the date and reload if needed: self._update_dates(name, globals, locals, fromlist, level) # Try to load the module and update the dates if it works: result = NATIVE_IMPORTER(name, globals, locals, fromlist, level) # Module maybe loaded for the 1st time so we need to set the date self._update_dates(name, globals, locals, fromlist, level) return result except Exception, e: raise # Don't hide something that went wrong def _update_dates(self, name, globals, locals, fromlist, level): """ Update all the dates associated to the statement import. A single import statement may import many modules. """ self._reload_check(name, globals, locals, level) for fromlist_name in fromlist or []: pname = "%s.%s" % (name, fromlist_name) self._reload_check(pname, globals, locals, level) def _reload_check(self, name, globals, locals, level): """ Update the date associated to the module and reload the module if the file has changed. """ module = sys.modules.get(name) file = self._get_module_file(module) if file: date = self._import_dates.get(file) new_date = None reload_mod = False mod_to_pack = False # Module turning into a package? (special case) try: new_date = os.path.getmtime(file) except: self._import_dates.pop(file, None) # Clean up # Handle module changing in package and #package changing in module: if file.endswith(".py"): # Get path without file ext: file = os.path.splitext(file)[0] reload_mod = os.path.isdir(file) \ and os.path.isfile(file + self.PACKAGE_PATH_SUFFIX) mod_to_pack = reload_mod else: # Package turning into module? file += ".py" reload_mod = os.path.isfile(file) if reload_mod: new_date = os.path.getmtime(file) # Refresh file date if reload_mod or not date or new_date > date: self._import_dates[file] = new_date if reload_mod or (date and new_date > date): if module not in self.THREAD_LOCAL._modules_loaded: if mod_to_pack: # Module turning into a package: mod_name = module.__name__ del sys.modules[mod_name] # Delete the module # Reload the module: NATIVE_IMPORTER(mod_name, globals, locals, [], level) else: reload(module) self.THREAD_LOCAL._modules_loaded.add(module) def _get_module_file(self, module): """ Get the absolute path file associated to the module or None. """ file = getattr(module, "__file__", None) if file: # Make path absolute if not: file = os.path.splitext(file)[0] + ".py" # Change .pyc for .py if file.endswith(self.PACKAGE_PATH_SUFFIX): file = os.path.dirname(file) # Track dir for packages return file TRACK_IMPORTER = TrackImporter()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Basic caching classes and methods ================================= - Cache - The generic caching object interfacing with the others - CacheInRam - providing caching in ram - CacheOnDisk - provides caches on disk Memcache is also available via a different module (see gluon.contrib.memcache) When web2py is running on Google App Engine, caching will be provided by the GAE memcache (see gluon.contrib.gae_memcache) """ import traceback import time import portalocker import shelve import thread import os import logging import re import hashlib import datetime try: import settings have_settings = True except ImportError: have_settings = False logger = logging.getLogger("web2py.cache") __all__ = ['Cache', 'lazy_cache'] DEFAULT_TIME_EXPIRE = 300 class CacheAbstract(object): """ Abstract class for cache implementations. Main function is now to provide referenced api documentation. Use CacheInRam or CacheOnDisk instead which are derived from this class. Attentions, Michele says: There are signatures inside gdbm files that are used directly by the python gdbm adapter that often are lagging behind in the detection code in python part. On every occasion that a gdbm store is probed by the python adapter, the probe fails, because gdbm file version is newer. Using gdbm directly from C would work, because there is backward compatibility, but not from python! The .shelve file is discarded and a new one created (with new signature) and it works until it is probed again... The possible consequences are memory leaks and broken sessions. """ cache_stats_name = 'web2py_cache_statistics' def __init__(self, request=None): """ Paremeters ---------- request: the global request object """ raise NotImplementedError def __call__(self, key, f, time_expire=DEFAULT_TIME_EXPIRE): """ Tries retrieve the value corresponding to `key` from the cache of the object exists and if it did not expire, else it called the function `f` and stores the output in the cache corresponding to `key`. In the case the output of the function is returned. :param key: the key of the object to be store or retrieved :param f: the function, whose output is to be cached :param time_expire: expiration of the cache in microseconds - `time_expire` is used to compare the current time with the time when the requested object was last saved in cache. It does not affect future requests. - Setting `time_expire` to 0 or negative value forces the cache to refresh. If the function `f` is `None` the cache is cleared. """ raise NotImplementedError def clear(self, regex=None): """ Clears the cache of all keys that match the provided regular expression. If no regular expression is provided, it clears all entries in cache. Parameters ---------- regex: if provided, only keys matching the regex will be cleared. Otherwise all keys are cleared. """ raise NotImplementedError def increment(self, key, value=1): """ Increments the cached value for the given key by the amount in value Parameters ---------- key: key for the cached object to be incremeneted value: amount of the increment (defaults to 1, can be negative) """ raise NotImplementedError def _clear(self, storage, regex): """ Auxiliary function called by `clear` to search and clear cache entries """ r = re.compile(regex) for (key, value) in storage.items(): if r.match(str(key)): del storage[key] class CacheInRam(CacheAbstract): """ Ram based caching This is implemented as global (per process, shared by all threads) dictionary. A mutex-lock mechanism avoid conflicts. """ locker = thread.allocate_lock() meta_storage = {} def __init__(self, request=None): self.initialized = False self.request = request self.storage = {} def initialize(self): if self.initialized: return else: self.initialized = True self.locker.acquire() request = self.request if request: app = request.application else: app = '' if not app in self.meta_storage: self.storage = self.meta_storage[app] = { CacheAbstract.cache_stats_name: {'hit_total': 0, 'misses': 0}} else: self.storage = self.meta_storage[app] self.locker.release() def clear(self, regex=None): self.initialize() self.locker.acquire() storage = self.storage if regex is None: storage.clear() else: self._clear(storage, regex) if not CacheAbstract.cache_stats_name in storage.keys(): storage[CacheAbstract.cache_stats_name] = { 'hit_total': 0, 'misses': 0} self.locker.release() def __call__(self, key, f, time_expire=DEFAULT_TIME_EXPIRE, destroyer=None): """ Attention! cache.ram does not copy the cached object. It just stores a reference to it. Turns out the deepcopying the object has some problems: 1) would break backward compatibility 2) would be limiting because people may want to cache live objects 3) would work unless we deepcopy no storage and retrival which would make things slow. Anyway. You can deepcopy explicitly in the function generating the value to be cached. """ self.initialize() dt = time_expire now = time.time() self.locker.acquire() item = self.storage.get(key, None) if item and f is None: del self.storage[key] if destroyer: destroyer(item[1]) self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1 self.locker.release() if f is None: return None if item and (dt is None or item[0] > now - dt): return item[1] elif item and (item[0] < now - dt) and destroyer: destroyer(item[1]) value = f() self.locker.acquire() self.storage[key] = (now, value) self.storage[CacheAbstract.cache_stats_name]['misses'] += 1 self.locker.release() return value def increment(self, key, value=1): self.initialize() self.locker.acquire() try: if key in self.storage: value = self.storage[key][1] + value self.storage[key] = (time.time(), value) except BaseException, e: self.locker.release() raise e self.locker.release() return value class CacheOnDisk(CacheAbstract): """ Disk based cache This is implemented as a shelve object and it is shared by multiple web2py processes (and threads) as long as they share the same filesystem. The file is locked when accessed. Disk cache provides persistance when web2py is started/stopped but it slower than `CacheInRam` Values stored in disk cache must be pickable. """ def _close_shelve_and_unlock(self): try: if self.storage: self.storage.close() finally: if self.locker and self.locked: portalocker.unlock(self.locker) self.locker.close() self.locked = False def _open_shelve_and_lock(self): """Open and return a shelf object, obtaining an exclusive lock on self.locker first. Replaces the close method of the returned shelf instance with one that releases the lock upon closing.""" storage = None locker = None locked = False try: locker = locker = open(self.locker_name, 'a') portalocker.lock(locker, portalocker.LOCK_EX) locked = True try: storage = shelve.open(self.shelve_name) except: logger.error('corrupted cache file %s, will try rebuild it' % (self.shelve_name)) storage = None if not storage and os.path.exists(self.shelve_name): os.unlink(self.shelve_name) storage = shelve.open(self.shelve_name) if not CacheAbstract.cache_stats_name in storage.keys(): storage[CacheAbstract.cache_stats_name] = { 'hit_total': 0, 'misses': 0} storage.sync() except Exception, e: if storage: storage.close() storage = None if locked: portalocker.unlock(locker) locker.close() locked = False raise RuntimeError( 'unable to create/re-create cache file %s' % self.shelve_name) self.locker = locker self.locked = locked self.storage = storage return storage def __init__(self, request=None, folder=None): self.initialized = False self.request = request self.folder = folder self.storage = {} def initialize(self): if self.initialized: return else: self.initialized = True folder = self.folder request = self.request # Lets test if the cache folder exists, if not # we are going to create it folder = folder or os.path.join(request.folder, 'cache') if not os.path.exists(folder): os.mkdir(folder) ### we need this because of a possible bug in shelve that may ### or may not lock self.locker_name = os.path.join(folder, 'cache.lock') self.shelve_name = os.path.join(folder, 'cache.shelve') def clear(self, regex=None): self.initialize() storage = self._open_shelve_and_lock() try: if regex is None: storage.clear() else: self._clear(storage, regex) storage.sync() finally: self._close_shelve_and_unlock() def __call__(self, key, f, time_expire=DEFAULT_TIME_EXPIRE): self.initialize() dt = time_expire storage = self._open_shelve_and_lock() try: item = storage.get(key, None) storage[CacheAbstract.cache_stats_name]['hit_total'] += 1 if item and f is None: del storage[key] storage.sync() now = time.time() if f is None: value = None elif item and (dt is None or item[0] > now - dt): value = item[1] else: value = f() storage[key] = (now, value) storage[CacheAbstract.cache_stats_name]['misses'] += 1 storage.sync() finally: self._close_shelve_and_unlock() return value def increment(self, key, value=1): self.initialize() storage = self._open_shelve_and_lock() try: if key in storage: value = storage[key][1] + value storage[key] = (time.time(), value) storage.sync() finally: self._close_shelve_and_unlock() return value class CacheAction(object): def __init__(self, func, key, time_expire, cache, cache_model): self.__name__ = func.__name__ self.__doc__ = func.__doc__ self.func = func self.key = key self.time_expire = time_expire self.cache = cache self.cache_model = cache_model def __call__(self, *a, **b): if not self.key: key2 = self.__name__ + ':' + repr(a) + ':' + repr(b) else: key2 = self.key.replace('%(name)s', self.__name__)\ .replace('%(args)s', str(a)).replace('%(vars)s', str(b)) cache_model = self.cache_model if not cache_model or isinstance(cache_model, str): cache_model = getattr(self.cache, cache_model or 'ram') return cache_model(key2, lambda a=a, b=b: self.func(*a, **b), self.time_expire) class Cache(object): """ Sets up generic caching, creating an instance of both CacheInRam and CacheOnDisk. In case of GAE will make use of gluon.contrib.gae_memcache. - self.ram is an instance of CacheInRam - self.disk is an instance of CacheOnDisk """ autokey = ':%(name)s:%(args)s:%(vars)s' def __init__(self, request): """ Parameters ---------- request: the global request object """ # GAE will have a special caching if have_settings and settings.global_settings.web2py_runtime_gae: from contrib.gae_memcache import MemcacheClient self.ram = self.disk = MemcacheClient(request) else: # Otherwise use ram (and try also disk) self.ram = CacheInRam(request) try: self.disk = CacheOnDisk(request) except IOError: logger.warning('no cache.disk (IOError)') except AttributeError: # normally not expected anymore, as GAE has already # been accounted for logger.warning('no cache.disk (AttributeError)') def action(self, time_expire=DEFAULT_TIME_EXPIRE, cache_model=None, prefix=None, session=False, vars=True, lang=True, user_agent=False, public=True, valid_statuses=None, quick=None): """ Experimental! Currently only HTTP 1.1 compliant reference : http://code.google.com/p/doctype-mirror/wiki/ArticleHttpCaching time_expire: same as @cache cache_model: same as @cache prefix: add a prefix to the calculated key session: adds response.session_id to the key vars: adds request.env.query_string lang: adds T.accepted_language user_agent: if True, adds is_mobile and is_tablet to the key. Pass a dict to use all the needed values (uses str(.items())) (e.g. user_agent=request.user_agent()) used only if session is not True public: if False forces the Cache-Control to be 'private' valid_statuses: by default only status codes starting with 1,2,3 will be cached. pass an explicit list of statuses on which turn the cache on quick: Session,Vars,Lang,User-agent,Public: fast overrides with initial strings, e.g. 'SVLP' or 'VLP', or 'VLP' """ from gluon import current from gluon.http import HTTP def wrap(func): def wrapped_f(): if current.request.env.request_method != 'GET': return func() if time_expire: cache_control = 'max-age=%(time_expire)s, s-maxage=%(time_expire)s' % dict(time_expire=time_expire) if quick: session_ = True if 'S' in quick else False vars_ = True if 'V' in quick else False lang_ = True if 'L' in quick else False user_agent_ = True if 'U' in quick else False public_ = True if 'P' in quick else False else: session_, vars_, lang_, user_agent_, public_ = session, vars, lang, user_agent, public if not session_ and public_: cache_control += ', public' expires = (current.request.utcnow + datetime.timedelta(seconds=time_expire)).strftime('%a, %d %b %Y %H:%M:%S GMT') vary = None else: cache_control += ', private' expires = 'Fri, 01 Jan 1990 00:00:00 GMT' if cache_model: #figure out the correct cache key cache_key = [current.request.env.path_info, current.response.view] if session_: cache_key.append(current.response.session_id) elif user_agent_: if user_agent_ is True: cache_key.append("%(is_mobile)s_%(is_tablet)s" % current.request.user_agent()) else: cache_key.append(str(user_agent_.items())) if vars_: cache_key.append(current.request.env.query_string) if lang_: cache_key.append(current.T.accepted_language) cache_key = hashlib.md5('__'.join(cache_key)).hexdigest() if prefix: cache_key = prefix + cache_key try: #action returns something rtn = cache_model(cache_key, lambda : func(), time_expire=time_expire) http, status = None, current.response.status except HTTP, e: #action raises HTTP (can still be valid) rtn = cache_model(cache_key, lambda : e.body, time_expire=time_expire) http, status = HTTP(e.status, rtn, **e.headers), e.status else: #action raised a generic exception http = None else: #no server-cache side involved try: #action returns something rtn = func() http, status = None, current.response.status except HTTP, e: #action raises HTTP (can still be valid) status = e.status http = HTTP(e.status, e.body, **e.headers) else: #action raised a generic exception http = None send_headers = False if http and isinstance(valid_statuses, list): if status in valid_statuses: send_headers = True elif valid_statuses is None: if str(status)[0] in '123': send_headers = True if send_headers: headers = { 'Pragma' : None, 'Expires' : expires, 'Cache-Control' : cache_control } current.response.headers.update(headers) if cache_model and not send_headers: #we cached already the value, but the status is not valid #so we need to delete the cached value cache_model(cache_key, None) if http: if send_headers: http.headers.update(current.response.headers) raise http return rtn wrapped_f.__name__ = func.__name__ wrapped_f.__doc__ = func.__doc__ return wrapped_f return wrap def __call__(self, key=None, time_expire=DEFAULT_TIME_EXPIRE, cache_model=None): """ Decorator function that can be used to cache any function/method. Example:: @cache('key', 5000, cache.ram) def f(): return time.ctime() When the function f is called, web2py tries to retrieve the value corresponding to `key` from the cache of the object exists and if it did not expire, else it calles the function `f` and stores the output in the cache corresponding to `key`. In the case the output of the function is returned. :param key: the key of the object to be store or retrieved :param time_expire: expiration of the cache in microseconds :param cache_model: "ram", "disk", or other (like "memcache" if defined). It defaults to "ram". Notes ----- `time_expire` is used to compare the curret time with the time when the requested object was last saved in cache. It does not affect future requests. Setting `time_expire` to 0 or negative value forces the cache to refresh. If the function `f` is an action, we suggest using @cache.client instead """ def tmp(func, cache=self, cache_model=cache_model): return CacheAction(func, key, time_expire, self, cache_model) return tmp @staticmethod def with_prefix(cache_model, prefix): """ allow replacing cache.ram with cache.with_prefix(cache.ram,'prefix') it will add prefix to all the cache keys used. """ return lambda key, f, time_expire=DEFAULT_TIME_EXPIRE, prefix=prefix:\ cache_model(prefix + key, f, time_expire) def lazy_cache(key=None, time_expire=None, cache_model='ram'): """ can be used to cache any function including in modules, as long as the cached function is only called within a web2py request if a key is not provided, one is generated from the function name the time_expire defaults to None (no cache expiration) if cache_model is "ram" then the model is current.cache.ram, etc. """ def decorator(f, key=key, time_expire=time_expire, cache_model=cache_model): key = key or repr(f) def g(*c, **d): from gluon import current return current.cache(key, time_expire, cache_model)(f)(*c, **d) g.__name__ = f.__name__ return g return decorator
Python
#!/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Contains: - wsgibase: the gluon wsgi application """ if False: import import_all # DO NOT REMOVE PART OF FREEZE PROCESS import gc import cgi import cStringIO import Cookie import os import re import copy import sys import time import datetime import signal import socket import tempfile import random import string import urllib2 try: import simplejson as sj #external installed library except: try: import json as sj #standard installed library except: import contrib.simplejson as sj #pure python library from thread import allocate_lock from fileutils import abspath, write_file, parse_version, copystream from settings import global_settings from admin import add_path_first, create_missing_folders, create_missing_app_folders from globals import current # Remarks: # calling script has inserted path to script directory into sys.path # applications_parent (path to applications/, site-packages/ etc) # defaults to that directory set sys.path to # ("", gluon_parent/site-packages, gluon_parent, ...) # # this is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not want the path to this file which may be Library.zip # gluon_parent is the directory containing gluon, web2py.py, logging.conf # and the handlers. # applications_parent (web2py_path) is the directory containing applications/ # and routes.py # The two are identical unless web2py_path is changed via the web2py.py -f folder option # main.web2py_path is the same as applications_parent (for backward compatibility) web2py_path = global_settings.applications_parent # backward compatibility create_missing_folders() # set up logging for subsequent imports import logging import logging.config # This needed to prevent exception on Python 2.5: # NameError: name 'gluon' is not defined # See http://bugs.python.org/issue1436 import gluon.messageboxhandler logging.gluon = gluon exists = os.path.exists pjoin = os.path.join logpath = abspath("logging.conf") if exists(logpath): logging.config.fileConfig(abspath("logging.conf")) else: logging.basicConfig() logger = logging.getLogger("web2py") from restricted import RestrictedError from http import HTTP, redirect from globals import Request, Response, Session from compileapp import build_environment, run_models_in, \ run_controller_in, run_view_in from contenttype import contenttype from dal import BaseAdapter from settings import global_settings from validators import CRYPT from cache import CacheInRam from html import URL, xmlescape from utils import is_valid_ip_address, getipaddrinfo from rewrite import load, url_in, THREAD_LOCAL as rwthread, \ try_rewrite_on_error, fixup_missing_path_info import newcron __all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests = 0 # gc timer # Security Checks: validate URL and session_id here, # accept_language is validated in languages # pattern used to validate client address regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6 try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string web2py_version = global_settings.web2py_version except: raise RuntimeError("Cannot determine web2py version") try: import rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') load() HTTPS_SCHEMES = set(('https', 'HTTPS')) def get_client(env): """ guess the client address from the environment variables first tries 'http_x_forwarded_for', secondly 'remote_addr' if all fails, assume '127.0.0.1' or '::1' (running locally) """ g = regex_client.search(env.get('http_x_forwarded_for', '')) client = (g.group() or '').split(',')[0] if g else None if client in (None, '', 'unknown'): g = regex_client.search(env.get('remote_addr', '')) if g: client = g.group() elif env.http_host.startswith('['): # IPv6 client = '::1' else: client = '127.0.0.1' # IPv4 if not is_valid_ip_address(client): raise HTTP(400, "Bad Request (request.client=%s)" % client) return client def copystream_progress(request, chunk_size=10 ** 5): """ copies request.env.wsgi_input into request.body and stores progress upload status in cache_ram X-Progress-ID:length and X-Progress-ID:uploaded """ env = request.env if not env.content_length: return cStringIO.StringIO() source = env.wsgi_input try: size = int(env.content_length) except ValueError: raise HTTP(400, "Invalid Content-Length header") dest = tempfile.TemporaryFile() if not 'X-Progress-ID' in request.vars: copystream(source, dest, size, chunk_size) return dest cache_key = 'X-Progress-ID:' + request.vars['X-Progress-ID'] cache_ram = CacheInRam(request) # same as cache.ram because meta_storage cache_ram(cache_key + ':length', lambda: size, 0) cache_ram(cache_key + ':uploaded', lambda: 0, 0) while size > 0: if size < chunk_size: data = source.read(size) cache_ram.increment(cache_key + ':uploaded', size) else: data = source.read(chunk_size) cache_ram.increment(cache_key + ':uploaded', chunk_size) length = len(data) if length > size: (data, length) = (data[:size], size) size -= length if length == 0: break dest.write(data) if length < chunk_size: break dest.seek(0) cache_ram(cache_key + ':length', None) cache_ram(cache_key + ':uploaded', None) return dest def serve_controller(request, response, session): """ this function is used to generate a dynamic page. It first runs all models, then runs the function in the controller, and then tries to render the output using a view/template. this function must run from the [application] folder. A typical example would be the call to the url /[application]/[controller]/[function] that would result in a call to [function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html """ # ################################################## # build environment for controller and view # ################################################## environment = build_environment(request, response, session) # set default view, controller can override it response.view = '%s/%s.%s' % (request.controller, request.function, request.extension) # also, make sure the flash is passed through # ################################################## # process models, controller and view (if required) # ################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page = run_controller_in(request.controller, request.function, environment) if isinstance(page, dict): response._vars = page response._view_environment.update(page) run_view_in(response._view_environment) page = response.body.getvalue() # logic to garbage collect after exec, not always, once every 100 requests global requests requests = ('requests' in globals()) and (requests + 1) % 100 or 0 if not requests: gc.collect() # end garbage collection logic # ################################################## # set default headers it not set # ################################################## default_headers = [ ('Content-Type', contenttype('.' + request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')] for key, value in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers) def start_response_aux(status, headers, exc_info, response=None): """ in controller you can use:: - request.wsgi.environ - request.wsgi.start_response to call third party WSGI applications """ response.status = str(status).split(' ', 1)[0] response.headers = dict(headers) return lambda *args, **kargs: response.write(escape=False, *args, **kargs) def middleware_aux(request, response, *middleware_apps): """ In you controller use:: @request.wsgi.middleware(middleware1, middleware2, ...) to decorate actions with WSGI middleware. actions must return strings. uses a simulated environment so it may have weird behavior in some cases """ def middleware(f): def app(environ, start_response): data = f() start_response(response.status, response.headers.items()) if isinstance(data, list): return data return [data] for item in middleware_apps: app = item(app) def caller(app): wsgi = request.wsgi return app(wsgi.environ, wsgi.start_response) return lambda caller=caller, app=app: caller(app) return middleware def environ_aux(environ, request): new_environ = copy.copy(environ) new_environ['wsgi.input'] = request.body new_environ['wsgi.version'] = 1 return new_environ ISLE25 = sys.version_info[1] <= 5 def parse_get_post_vars(request, environ): # always parse variables in URL for GET, POST, PUT, DELETE, etc. in get_vars env = request.env dget = cgi.parse_qsl(env.query_string or '', keep_blank_values=1) for (key, value) in dget: if key in request.get_vars: if isinstance(request.get_vars[key], list): request.get_vars[key] += [value] else: request.get_vars[key] = [request.get_vars[key]] + [value] else: request.get_vars[key] = value request.vars[key] = request.get_vars[key] try: request.body = body = copystream_progress(request) except IOError: raise HTTP(400, "Bad Request - HTTP body is incomplete") #if content-type is application/json, we must read the body is_json = env.get('http_content_type', '')[:16] == 'application/json' if is_json: try: json_vars = sj.load(body) body.seek(0) except: # incoherent request bodies can still be parsed "ad-hoc" json_vars = {} pass # update vars and get_vars with what was posted as json request.get_vars.update(json_vars) request.vars.update(json_vars) # parse POST variables on POST, PUT, BOTH only in post_vars if (body and env.request_method in ('POST', 'PUT', 'BOTH')): dpost = cgi.FieldStorage(fp=body, environ=environ, keep_blank_values=1) # The same detection used by FieldStorage to detect multipart POSTs is_multipart = dpost.type[:10] == 'multipart/' body.seek(0) def listify(a): return (not isinstance(a, list) and [a]) or a try: keys = sorted(dpost) except TypeError: keys = [] for key in keys: if key is None: continue # not sure why cgi.FieldStorage returns None key dpk = dpost[key] # if en element is not a file replace it with its value else leave it alone if isinstance(dpk, list): value = [] for _dpk in dpk: if not _dpk.filename: value.append(_dpk.value) else: value.append(_dpk) elif not dpk.filename: value = dpk.value else: value = dpk pvalue = listify(value) if key in request.vars: gvalue = listify(request.vars[key]) if ISLE25: value = pvalue + gvalue elif is_multipart: pvalue = pvalue[len(gvalue):] else: pvalue = pvalue[:-len(gvalue)] request.vars[key] = value if len(pvalue): request.post_vars[key] = (len(pvalue) > 1 and pvalue) or pvalue[0] if is_json: # update post_vars with what was posted as json request.post_vars.update(json_vars) def wsgibase(environ, responder): """ this is the gluon wsgi application. the first function called when a page is requested (static or dynamic). it can be called by paste.httpserver or by apache mod_wsgi. - fills request with info - the environment variables, replacing '.' with '_' - adds web2py path and version info - compensates for fcgi missing path_info and query_string - validates the path in url The url path must be either: 1. for static pages: - /<application>/static/<file> 2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] - (sub may go several levels deep, currently 3 levels are supported: sub1/sub2/sub3) The naming conventions are: - application, controller, function and extension may only contain [a-zA-Z0-9_] - file and sub may also contain '-', '=', '.' and '/' """ current.__dict__.clear() request = Request() response = Response() session = Session() env = request.env env.web2py_path = global_settings.applications_parent env.web2py_version = web2py_version env.update(global_settings) static_file = False try: try: try: # ################################################## # handle fcgi missing path_info and query_string # select rewrite parameters # rewrite incoming URL # parse rewritten header variables # parse rewritten URL # serve file if static # ################################################## fixup_missing_path_info(environ) (static_file, version, environ) = url_in(request, environ) response.status = env.web2py_status_code or response.status if static_file: if environ.get('QUERY_STRING', '').startswith( 'attachment'): response.headers['Content-Disposition'] \ = 'attachment' if version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT' response.stream(static_file, request=request) # ################################################## # fill in request items # ################################################## app = request.application # must go after url_in! if not global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass global_settings.local_hosts = list(local_hosts) else: local_hosts = global_settings.local_hosts client = get_client(env) x_req_with = str(env.http_x_requested_with).lower() request.update( client = client, folder = abspath('applications', app) + os.sep, ajax = x_req_with == 'xmlhttprequest', cid = env.http_web2py_component_element, is_local = env.remote_addr in local_hosts, is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \ or env.https == 'on') request.compute_uuid() # requires client request.url = environ['PATH_INFO'] # ################################################## # access the requested application # ################################################## if not exists(request.folder): if app == rwthread.routes.default_application \ and app != 'welcome': redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else: raise HTTP(404, rwthread.routes.error_message % 'invalid request', web2py_error='invalid application') elif not request.is_local and \ exists(pjoin(request.folder, 'DISABLED')): raise HTTP(503, "<html><body><h1>Temporarily down for maintenance</h1></body></html>") # ################################################## # build missing folders # ################################################## create_missing_app_folders(request) # ################################################## # get the GET and POST data # ################################################## parse_get_post_vars(request, environ) # ################################################## # expose wsgi hooks for convenience # ################################################## request.wsgi.environ = environ_aux(environ, request) request.wsgi.start_response = \ lambda status='200', headers=[], \ exec_info=None, response=response: \ start_response_aux(status, headers, exec_info, response) request.wsgi.middleware = \ lambda *a: middleware_aux(request, response, *a) # ################################################## # load cookies # ################################################## if env.http_cookie: try: request.cookies.load(env.http_cookie) except Cookie.CookieError, e: pass # invalid cookies # ################################################## # try load session or create new session file # ################################################## if not env.web2py_disable_session: session.connect(request, response) # ################################################## # run controller # ################################################## if global_settings.debugging and app != "admin": import gluon.debug # activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except HTTP, http_response: if static_file: return http_response.to(responder, env=env) if request.body: request.body.close() # ################################################## # on success, try store session in database # ################################################## session._try_store_in_db(request, response) # ################################################## # on success, commit database # ################################################## if response.do_not_commit is True: BaseAdapter.close_all_instances(None) # elif response._custom_commit: # response._custom_commit() elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ################################################## # if session not in db try store session on filesystem # this must be done after trying to commit database! # ################################################## session._try_store_in_cookie_or_file(request, response) if request.cid: if response.flash: http_response.headers['web2py-component-flash'] = \ urllib2.quote(xmlescape(response.flash)\ .replace('\n','')) if response.js: http_response.headers['web2py-component-command'] = \ urllib2.quote(response.js.replace('\n','')) # ################################################## # store cookies in headers # ################################################## rcookies = response.cookies if session._forget and response.session_id_name in rcookies: del rcookies[response.session_id_name] elif session._secure: rcookies[response.session_id_name]['secure'] = True http_response.cookies2headers(rcookies) ticket = None except RestrictedError, e: if request.body: request.body.close() # ################################################## # on application error, rollback database # ################################################## ticket = e.log(request) or 'unknown' if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') http_response = \ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) except: if request.body: request.body.close() # ################################################## # on application error, rollback database # ################################################## try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework', '', '', locals()) ticket = e.log(request) or 'unrecoverable' http_response = \ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally: if response and hasattr(response, 'session_file') \ and response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ = try_rewrite_on_error( http_response, request, environ, ticket) if not http_response: return wsgibase(new_environ, responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def save_password(password, port): """ used by main() to save the password in the parameters_port.py file. """ password_file = abspath('parameters_%i.py' % port) if password == '<random>': # make up a new password chars = string.letters + string.digits password = ''.join([random.choice(chars) for i in range(8)]) cpassword = CRYPT()(password)[0] print '******************* IMPORTANT!!! ************************' print 'your admin password is "%s"' % password print '*********************************************************' elif password == '<recycle>': # reuse the current password if any if exists(password_file): return else: password = '' elif password.startswith('<pam_user:'): # use the pam password for specified user cpassword = password[1:-1] else: # use provided password cpassword = CRYPT()(password)[0] fp = open(password_file, 'w') if password: fp.write('password="%s"\n' % cpassword) else: fp.write('password=None\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profilerfilename='profiler.log'): """ generates a wsgi application that does logging and profiling and calls wsgibase .. function:: gluon.main.appfactory( [wsgiapp=wsgibase [, logfilename='httpserver.log' [, profilerfilename='profiler.log']]]) """ if profilerfilename and exists(profilerfilename): os.unlink(profilerfilename) locker = allocate_lock() def app_with_logging(environ, responder): """ a wsgi app that does logging and profiling and calls wsgibase """ status_headers = [] def responder2(s, h): """ wsgi responder app """ status_headers.append(s) status_headers.append(h) return responder(s, h) time_in = time.time() ret = [0] if not profilerfilename: ret[0] = wsgiapp(environ, responder2) else: import cProfile import pstats logger.warn('profiler is on. this makes web2py slower and serial') locker.acquire() cProfile.runctx('ret[0] = wsgiapp(environ, responder2)', globals(), locals(), profilerfilename + '.tmp') stat = pstats.Stats(profilerfilename + '.tmp') stat.stream = cStringIO.StringIO() stat.strip_dirs().sort_stats("time").print_stats(80) profile_out = stat.stream.getvalue() profile_file = open(profilerfilename, 'a') profile_file.write('%s\n%s\n%s\n%s\n\n' % ('=' * 60, environ['PATH_INFO'], '=' * 60, profile_out)) profile_file.close() locker.release() try: line = '%s, %s, %s, %s, %s, %s, %f\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, ) if not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line, 'a') else: logfilename.write(line) except: pass return ret[0] return app_with_logging class HttpServer(object): """ the web2py web server (Rocket) """ def __init__( self, ip='127.0.0.1', port=8000, password='', pid_filename='httpserver.pid', log_filename='httpserver.log', profiler_filename=None, ssl_certificate=None, ssl_private_key=None, ssl_ca_certificate=None, min_threads=None, max_threads=None, server_name=None, request_queue_size=5, timeout=10, socket_timeout=1, shutdown_timeout=None, # Rocket does not use a shutdown timeout path=None, interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string ): """ starts the web server. """ if interfaces: # if interfaces is specified, it must be tested for rocket parameter correctness # not necessarily completely tested (e.g. content of tuples or ip-format) import types if isinstance(interfaces, types.ListType): for i in interfaces: if not isinstance(i, types.TupleType): raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/" else: raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/" if path: # if a path is specified change the global variables so that web2py # runs from there instead of cwd or os.environ['web2py_path'] global web2py_path path = os.path.normpath(path) web2py_path = path global_settings.applications_parent = path os.chdir(path) [add_path_first(p) for p in (path, abspath('site-packages'), "")] if exists("logging.conf"): logging.config.fileConfig("logging.conf") save_password(password, port) self.pid_filename = pid_filename if not server_name: server_name = socket.gethostname() logger.info('starting web server...') rocket.SERVER_NAME = server_name rocket.SOCKET_TIMEOUT = socket_timeout sock_list = [ip, port] if not ssl_certificate or not ssl_private_key: logger.info('SSL is off') elif not rocket.ssl: logger.warning('Python "ssl" module unavailable. SSL is OFF') elif not exists(ssl_certificate): logger.warning('unable to open SSL certificate. SSL is OFF') elif not exists(ssl_private_key): logger.warning('unable to open SSL private key. SSL is OFF') else: sock_list.extend([ssl_private_key, ssl_certificate]) if ssl_ca_certificate: sock_list.append(ssl_ca_certificate) logger.info('SSL is ON') app_info = {'wsgi_app': appfactory(wsgibase, log_filename, profiler_filename)} self.server = rocket.Rocket(interfaces or tuple(sock_list), method='wsgi', app_info=app_info, min_threads=min_threads, max_threads=max_threads, queue_size=int(request_queue_size), timeout=int(timeout), handle_signals=False, ) def start(self): """ start the web server """ try: signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop()) signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop()) except: pass write_file(self.pid_filename, str(os.getpid())) self.server.start() def stop(self, stoplogging=False): """ stop cron and the web server """ newcron.stopcron() self.server.stop(stoplogging) try: os.unlink(self.pid_filename) except: pass
Python
# this file exists for backward compatibility __all__ = ['DAL', 'Field', 'DRIVERS'] from dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, DRIVERS, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ :: # from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942 # Title: Cross-site scripting (XSS) defense # Submitter: Josh Goldfoot (other recipes) # Last Updated: 2006/08/05 # Version no: 1.0 """ from htmllib import HTMLParser from cgi import escape from urlparse import urlparse from formatter import AbstractFormatter from htmlentitydefs import entitydefs from xml.sax.saxutils import quoteattr __all__ = ['sanitize'] def xssescape(text): """Gets rid of < and > and & and, for good measure, :""" return escape(text, quote=True).replace(':', '&#58;') class XssCleaner(HTMLParser): def __init__( self, permitted_tags=[ 'a', 'b', 'blockquote', 'br/', 'i', 'li', 'ol', 'ul', 'p', 'cite', 'code', 'pre', 'img/', ], allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt' ], 'blockquote': ['type']}, fmt=AbstractFormatter, strip_disallowed=False ): HTMLParser.__init__(self, fmt) self.result = '' self.open_tags = [] self.permitted_tags = [i for i in permitted_tags if i[-1] != '/'] self.requires_no_close = [i[:-1] for i in permitted_tags if i[-1] == '/'] self.permitted_tags += self.requires_no_close self.allowed_attributes = allowed_attributes # The only schemes allowed in URLs (for href and src attributes). # Adding "javascript" or "vbscript" to this list would not be smart. self.allowed_schemes = ['http', 'https', 'ftp', 'mailto'] #to strip or escape disallowed tags? self.strip_disallowed = strip_disallowed self.in_disallowed = False def handle_data(self, data): if data and not self.in_disallowed: self.result += xssescape(data) def handle_charref(self, ref): if self.in_disallowed: return elif len(ref) < 7 and ref.isdigit(): self.result += '&#%s;' % ref else: self.result += xssescape('&#%s' % ref) def handle_entityref(self, ref): if self.in_disallowed: return elif ref in entitydefs: self.result += '&%s;' % ref else: self.result += xssescape('&%s' % ref) def handle_comment(self, comment): if self.in_disallowed: return elif comment: self.result += xssescape('<!--%s-->' % comment) def handle_starttag( self, tag, method, attrs, ): if tag not in self.permitted_tags: if self.strip_disallowed: self.in_disallowed = True else: self.result += xssescape('<%s>' % tag) else: bt = '<' + tag if tag in self.allowed_attributes: attrs = dict(attrs) self.allowed_attributes_here = [x for x in self.allowed_attributes[tag] if x in attrs and len(attrs[x]) > 0] for attribute in self.allowed_attributes_here: if attribute in ['href', 'src', 'background']: if self.url_is_acceptable(attrs[attribute]): bt += ' %s="%s"' % (attribute, attrs[attribute]) else: bt += ' %s=%s' % (xssescape(attribute), quoteattr(attrs[attribute])) if bt == '<a' or bt == '<img': return if tag in self.requires_no_close: bt += ' /' bt += '>' self.result += bt self.open_tags.insert(0, tag) def handle_endtag(self, tag, attrs): bracketed = '</%s>' % tag if tag not in self.permitted_tags: if self.strip_disallowed: self.in_disallowed = False else: self.result += xssescape(bracketed) elif tag in self.open_tags: self.result += bracketed self.open_tags.remove(tag) def unknown_starttag(self, tag, attributes): self.handle_starttag(tag, None, attributes) def unknown_endtag(self, tag): self.handle_endtag(tag, None) def url_is_acceptable(self, url): """ Accepts relative, absolute, and mailto urls """ parsed = urlparse(url) return (parsed[0] in self.allowed_schemes and '.' in parsed[1]) \ or (parsed[0] in self.allowed_schemes and '@' in parsed[2]) \ or (parsed[0] == '' and parsed[2].startswith('/')) def strip(self, rawstring, escape=True): """ Returns the argument stripped of potentially harmful HTML or Javascript code @type escape: boolean @param escape: If True (default) it escapes the potentially harmful content, otherwise remove it """ if not isinstance(rawstring, str): return str(rawstring) for tag in self.requires_no_close: rawstring = rawstring.replace("<%s/>" % tag, "<%s />" % tag) if not escape: self.strip_disallowed = True self.result = '' self.feed(rawstring) for endtag in self.open_tags: if endtag not in self.requires_no_close: self.result += '</%s>' % endtag return self.result def xtags(self): """ Returns a printable string informing the user which tags are allowed """ tg = '' for x in sorted(self.permitted_tags): tg += '<' + x if x in self.allowed_attributes: for y in self.allowed_attributes[x]: tg += ' %s=""' % y tg += '> ' return xssescape(tg.strip()) def sanitize(text, permitted_tags=[ 'a', 'b', 'blockquote', 'br/', 'i', 'li', 'ol', 'ul', 'p', 'cite', 'code', 'pre', 'img/', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'table', 'tbody', 'thead', 'tfoot', 'tr', 'td', 'div', 'strong', 'span', ], allowed_attributes={ 'a': ['href', 'title'], 'img': ['src', 'alt'], 'blockquote': ['type'], 'td': ['colspan'], }, escape=True): if not isinstance(text, basestring): return str(text) return XssCleaner(permitted_tags=permitted_tags, allowed_attributes=allowed_attributes).strip(text, escape)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created by Attila Csipa <web2py@csipa.in.rs> Modified by Massimo Di Pierro <mdipierro@cs.depaul.edu> """ import sys import os import threading import logging import time import sched import re import datetime import platform import portalocker import fileutils import cPickle from settings import global_settings logger = logging.getLogger("web2py.cron") _cron_stopping = False _cron_subprocs = [] def absolute_path_link(path): """ Return an absolute path for the destination of a symlink """ if os.path.islink(path): link = os.readlink(path) if not os.path.isabs(link): link = os.path.join(os.path.dirname(path), link) else: link = os.path.abspath(path) return link def stopcron(): "graceful shutdown of cron" global _cron_stopping _cron_stopping = True while _cron_subprocs: _cron_subprocs.pop().terminate() class extcron(threading.Thread): def __init__(self, applications_parent, apps=None): threading.Thread.__init__(self) self.setDaemon(False) self.path = applications_parent self.apps = apps # crondance(self.path, 'external', startup=True, apps=self.apps) def run(self): if not _cron_stopping: logger.debug('external cron invocation') crondance(self.path, 'external', startup=False, apps=self.apps) class hardcron(threading.Thread): def __init__(self, applications_parent): threading.Thread.__init__(self) self.setDaemon(True) self.path = applications_parent crondance(self.path, 'hard', startup=True) def launch(self): if not _cron_stopping: logger.debug('hard cron invocation') crondance(self.path, 'hard', startup=False) def run(self): s = sched.scheduler(time.time, time.sleep) logger.info('Hard cron daemon started') while not _cron_stopping: now = time.time() s.enter(60 - now % 60, 1, self.launch, ()) s.run() class softcron(threading.Thread): def __init__(self, applications_parent): threading.Thread.__init__(self) self.path = applications_parent # crondance(self.path, 'soft', startup=True) def run(self): if not _cron_stopping: logger.debug('soft cron invocation') crondance(self.path, 'soft', startup=False) class Token(object): def __init__(self, path): self.path = os.path.join(path, 'cron.master') if not os.path.exists(self.path): fileutils.write_file(self.path, '', 'wb') self.master = None self.now = time.time() def acquire(self, startup=False): """ returns the time when the lock is acquired or None if cron already running lock is implemented by writing a pickle (start, stop) in cron.master start is time when cron job starts and stop is time when cron completed stop == 0 if job started but did not yet complete if a cron job started within less than 60 seconds, acquire returns None if a cron job started before 60 seconds and did not stop, a warning is issue "Stale cron.master detected" """ if portalocker.LOCK_EX is None: logger.warning('WEB2PY CRON: Disabled because no file locking') return None self.master = open(self.path, 'rb+') try: ret = None portalocker.lock(self.master, portalocker.LOCK_EX) try: (start, stop) = cPickle.load(self.master) except: (start, stop) = (0, 1) if startup or self.now - start > 59.99: ret = self.now if not stop: # this happens if previous cron job longer than 1 minute logger.warning('WEB2PY CRON: Stale cron.master detected') logger.debug('WEB2PY CRON: Acquiring lock') self.master.seek(0) cPickle.dump((self.now, 0), self.master) finally: portalocker.unlock(self.master) if not ret: # do this so no need to release self.master.close() return ret def release(self): """ this function writes into cron.master the time when cron job was completed """ if not self.master.closed: portalocker.lock(self.master, portalocker.LOCK_EX) logger.debug('WEB2PY CRON: Releasing cron lock') self.master.seek(0) (start, stop) = cPickle.load(self.master) if start == self.now: # if this is my lock self.master.seek(0) cPickle.dump((self.now, time.time()), self.master) portalocker.unlock(self.master) self.master.close() def rangetolist(s, period='min'): retval = [] if s.startswith('*'): if period == 'min': s = s.replace('*', '0-59', 1) elif period == 'hr': s = s.replace('*', '0-23', 1) elif period == 'dom': s = s.replace('*', '1-31', 1) elif period == 'mon': s = s.replace('*', '1-12', 1) elif period == 'dow': s = s.replace('*', '0-6', 1) m = re.compile(r'(\d+)-(\d+)/(\d+)') match = m.match(s) if match: for i in range(int(match.group(1)), int(match.group(2)) + 1): if i % int(match.group(3)) == 0: retval.append(i) return retval def parsecronline(line): task = {} if line.startswith('@reboot'): line = line.replace('@reboot', '-1 * * * *') elif line.startswith('@yearly'): line = line.replace('@yearly', '0 0 1 1 *') elif line.startswith('@annually'): line = line.replace('@annually', '0 0 1 1 *') elif line.startswith('@monthly'): line = line.replace('@monthly', '0 0 1 * *') elif line.startswith('@weekly'): line = line.replace('@weekly', '0 0 * * 0') elif line.startswith('@daily'): line = line.replace('@daily', '0 0 * * *') elif line.startswith('@midnight'): line = line.replace('@midnight', '0 0 * * *') elif line.startswith('@hourly'): line = line.replace('@hourly', '0 * * * *') params = line.strip().split(None, 6) if len(params) < 7: return None daysofweek = {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6} for (s, id) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']): if not s in [None, '*']: task[id] = [] vals = s.split(',') for val in vals: if val != '-1' and '-' in val and '/' not in val: val = '%s/1' % val if '/' in val: task[id] += rangetolist(val, id) elif val.isdigit() or val == '-1': task[id].append(int(val)) elif id == 'dow' and val[:3].lower() in daysofweek: task[id].append(daysofweek(val[:3].lower())) task['user'] = params[5] task['cmd'] = params[6] return task class cronlauncher(threading.Thread): def __init__(self, cmd, shell=True): threading.Thread.__init__(self) if platform.system() == 'Windows': shell = False self.cmd = cmd self.shell = shell def run(self): import subprocess global _cron_subprocs if isinstance(self.cmd, (list, tuple)): cmd = self.cmd else: cmd = self.cmd.split() proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=self.shell) _cron_subprocs.append(proc) (stdoutdata, stderrdata) = proc.communicate() if proc.returncode != 0: logger.warning( 'WEB2PY CRON Call returned code %s:\n%s' % (proc.returncode, stdoutdata + stderrdata)) else: logger.debug('WEB2PY CRON Call returned success:\n%s' % stdoutdata) def crondance(applications_parent, ctype='soft', startup=False, apps=None): apppath = os.path.join(applications_parent, 'applications') cron_path = os.path.join(applications_parent) token = Token(cron_path) cronmaster = token.acquire(startup=startup) if not cronmaster: return now_s = time.localtime() checks = (('min', now_s.tm_min), ('hr', now_s.tm_hour), ('mon', now_s.tm_mon), ('dom', now_s.tm_mday), ('dow', (now_s.tm_wday + 1) % 7)) if apps is None: apps = [x for x in os.listdir(apppath) if os.path.isdir(os.path.join(apppath, x))] full_apath_links = set() for app in apps: if _cron_stopping: break apath = os.path.join(apppath, app) # if app is a symbolic link to other app, skip it full_apath_link = absolute_path_link(apath) if full_apath_link in full_apath_links: continue else: full_apath_links.add(full_apath_link) cronpath = os.path.join(apath, 'cron') crontab = os.path.join(cronpath, 'crontab') if not os.path.exists(crontab): continue try: cronlines = fileutils.readlines_file(crontab, 'rt') lines = [x.strip() for x in cronlines if x.strip( ) and not x.strip().startswith('#')] tasks = [parsecronline(cline) for cline in lines] except Exception, e: logger.error('WEB2PY CRON: crontab read error %s' % e) continue for task in tasks: if _cron_stopping: break commands = [sys.executable] w2p_path = fileutils.abspath('web2py.py', gluon=True) if os.path.exists(w2p_path): commands.append(w2p_path) if global_settings.applications_parent != global_settings.gluon_parent: commands.extend(('-f', global_settings.applications_parent)) citems = [(k in task and not v in task[k]) for k, v in checks] task_min = task.get('min', []) if not task: continue elif not startup and task_min == [-1]: continue elif task_min != [-1] and reduce(lambda a, b: a or b, citems): continue logger.info('WEB2PY CRON (%s): %s executing %s in %s at %s' % (ctype, app, task.get('cmd'), os.getcwd(), datetime.datetime.now())) action, command, models = False, task['cmd'], '' if command.startswith('**'): (action, models, command) = (True, '', command[2:]) elif command.startswith('*'): (action, models, command) = (True, '-M', command[1:]) else: action = False if action and command.endswith('.py'): commands.extend(('-J', # cron job models, # import models? '-S', app, # app name '-a', '"<recycle>"', # password '-R', command)) # command elif action: commands.extend(('-J', # cron job models, # import models? '-S', app + '/' + command, # app name '-a', '"<recycle>"')) # password else: commands = command # from python docs: # You do not need shell=True to run a batch file or # console-based executable. shell = False try: cronlauncher(commands, shell=shell).start() except Exception, e: logger.warning( 'WEB2PY CRON: Execution error for %s: %s' % (task.get('cmd'), e)) token.release()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- USAGE = """ ## Example For any existing app Create File: app/models/scheduler.py ====== from gluon.scheduler import Scheduler def demo1(*args,**vars): print 'you passed args=%s and vars=%s' % (args, vars) return 'done!' def demo2(): 1/0 scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2)) ## run worker nodes with: cd web2py python web2py.py -K myapp or python gluon/scheduler.py -u sqlite://storage.sqlite \ -f applications/myapp/databases/ \ -t mytasks.py (-h for info) python scheduler.py -h ## schedule jobs using http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task ## monitor scheduled jobs http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id>0 ## view completed jobs http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id>0 ## view workers http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id>0 ## To install the scheduler as a permanent daemon on Linux (w/ Upstart), put ## the following into /etc/init/web2py-scheduler.conf: ## (This assumes your web2py instance is installed in <user>'s home directory, ## running as <user>, with app <myapp>, on network interface eth0.) description "web2py task scheduler" start on (local-filesystems and net-device-up IFACE=eth0) stop on shutdown respawn limit 8 60 # Give up if restart occurs 8 times in 60 seconds. exec sudo -u <user> python /home/<user>/web2py/web2py.py -K <myapp> respawn ## You can then start/stop/restart/check status of the daemon with: sudo start web2py-scheduler sudo stop web2py-scheduler sudo restart web2py-scheduler sudo status web2py-scheduler """ import os import time import multiprocessing import sys import threading import traceback import signal import socket import datetime import logging import optparse import types import Queue if 'WEB2PY_PATH' in os.environ: sys.path.append(os.environ['WEB2PY_PATH']) else: os.environ['WEB2PY_PATH'] = os.getcwd() if not os.environ['WEB2PY_PATH'] in sys.path: sys.path.append(os.environ['WEB2PY_PATH']) try: from gluon.contrib.simplejson import loads, dumps except: from simplejson import loads, dumps IDENTIFIER = "%s#%s" % (socket.gethostname(),os.getpid()) logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER) from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB, IS_INT_IN_RANGE, IS_DATETIME from gluon.utils import web2py_uuid from gluon.storage import Storage QUEUED = 'QUEUED' ASSIGNED = 'ASSIGNED' RUNNING = 'RUNNING' COMPLETED = 'COMPLETED' FAILED = 'FAILED' TIMEOUT = 'TIMEOUT' STOPPED = 'STOPPED' ACTIVE = 'ACTIVE' TERMINATE = 'TERMINATE' DISABLED = 'DISABLED' KILL = 'KILL' PICK = 'PICK' STOP_TASK = 'STOP_TASK' EXPIRED = 'EXPIRED' SECONDS = 1 HEARTBEAT = 3 * SECONDS MAXHIBERNATION = 10 CLEAROUT = '!clear!' CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType) class Task(object): def __init__(self, app, function, timeout, args='[]', vars='{}', **kwargs): logger.debug(' new task allocated: %s.%s', app, function) self.app = app self.function = function self.timeout = timeout self.args = args # json self.vars = vars # json self.__dict__.update(kwargs) def __str__(self): return '<Task: %s>' % self.function class TaskReport(object): def __init__(self, status, result=None, output=None, tb=None): logger.debug(' new task report: %s', status) if tb: logger.debug(' traceback: %s', tb) else: logger.debug(' result: %s', result) self.status = status self.result = result self.output = output self.tb = tb def __str__(self): return '<TaskReport: %s>' % self.status def demo_function(*argv, **kwargs): """ test function """ for i in range(argv[0]): print 'click', i time.sleep(1) return 'done' #the two functions below deal with simplejson decoding as unicode, esp for the dict decode #and subsequent usage as function Keyword arguments unicode variable names won't work! #borrowed from http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python def _decode_list(lst): newlist = [] for i in lst: if isinstance(i, unicode): i = i.encode('utf-8') elif isinstance(i, list): i = _decode_list(i) newlist.append(i) return newlist def _decode_dict(dct): newdict = {} for k, v in dct.iteritems(): if isinstance(k, unicode): k = k.encode('utf-8') if isinstance(v, unicode): v = v.encode('utf-8') elif isinstance(v, list): v = _decode_list(v) newdict[k] = v return newdict def executor(queue, task, out): """ the background process """ logger.debug(' task started') class LogOutput(object): """Facility to log output at intervals""" def __init__(self, out_queue): self.out_queue = out_queue self.stdout = sys.stdout sys.stdout = self def __del__(self): sys.stdout = self.stdout def flush(self): pass def write(self, data): self.out_queue.put(data) W2P_TASK = Storage({'id' : task.task_id, 'uuid' : task.uuid}) stdout = LogOutput(out) try: if task.app: os.chdir(os.environ['WEB2PY_PATH']) from gluon.shell import env, parse_path_info from gluon import current level = logging.getLogger().getEffectiveLevel() logging.getLogger().setLevel(logging.WARN) # Get controller-specific subdirectory if task.app is of # form 'app/controller' (a, c, f) = parse_path_info(task.app) _env = env(a=a, c=c, import_models=True) logging.getLogger().setLevel(level) f = task.function functions = current._scheduler.tasks if not functions: #look into env _function = _env.get(f) else: _function = functions.get(f) if not isinstance(_function, CALLABLETYPES): raise NameError( "name '%s' not found in scheduler's environment" % f) #Inject W2P_TASK into environment _env.update({'W2P_TASK' : W2P_TASK}) globals().update(_env) args = loads(task.args) vars = loads(task.vars, object_hook=_decode_dict) result = dumps(_function(*args, **vars)) else: ### for testing purpose only result = eval(task.function)( *loads(task.args, object_hook=_decode_dict), **loads(task.vars, object_hook=_decode_dict)) queue.put(TaskReport(COMPLETED, result=result)) except BaseException, e: tb = traceback.format_exc() queue.put(TaskReport(FAILED, tb=tb)) del stdout class MetaScheduler(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.process = None # the background process self.have_heartbeat = True # set to False to kill self.empty_runs = 0 def async(self, task): """ starts the background process and returns: ('ok',result,output) ('error',exception,None) ('timeout',None,None) ('terminated',None,None) """ db = self.db sr = db.scheduler_run out = multiprocessing.Queue() queue = multiprocessing.Queue(maxsize=1) p = multiprocessing.Process(target=executor, args=(queue, task, out)) self.process = p logger.debug(' task starting') p.start() task_output = "" tout = "" try: if task.sync_output > 0: run_timeout = task.sync_output else: run_timeout = task.timeout start = time.time() while p.is_alive() and ( not task.timeout or time.time() - start < task.timeout): if tout: try: logger.debug(' partial output saved') db(sr.id == task.run_id).update(run_output=task_output) db.commit() except: pass p.join(timeout=run_timeout) tout = "" while not out.empty(): tout += out.get() if tout: logger.debug(' partial output: "%s"' % str(tout)) if CLEAROUT in tout: task_output = tout[ tout.rfind(CLEAROUT) + len(CLEAROUT):] else: task_output += tout except: p.terminate() p.join() self.have_heartbeat = False logger.debug(' task stopped by general exception') tr = TaskReport(STOPPED) else: if p.is_alive(): p.terminate() logger.debug(' task timeout') try: # we try to get a traceback here tr = queue.get(timeout=2) tr.status = TIMEOUT tr.output = task_output except Queue.Empty: tr = TaskReport(TIMEOUT) elif queue.empty(): self.have_heartbeat = False logger.debug(' task stopped') tr = TaskReport(STOPPED) else: logger.debug(' task completed or failed') tr = queue.get() tr.output = task_output return tr def die(self): logger.info('die!') self.have_heartbeat = False self.terminate_process() def give_up(self): logger.info('Giving up as soon as possible!') self.have_heartbeat = False def terminate_process(self): try: self.process.terminate() except: pass # no process to terminate def run(self): """ the thread that sends heartbeat """ counter = 0 while self.have_heartbeat: self.send_heartbeat(counter) counter += 1 def start_heartbeats(self): self.start() def send_heartbeat(self, counter): print 'thum' time.sleep(1) def pop_task(self): return Task( app=None, function='demo_function', timeout=7, args='[2]', vars='{}') def report_task(self, task, task_report): print 'reporting task' pass def sleep(self): pass def loop(self): try: self.start_heartbeats() while True and self.have_heartbeat: logger.debug('looping...') task = self.pop_task() if task: self.empty_runs = 0 self.report_task(task, self.async(task)) else: self.empty_runs += 1 logger.debug('sleeping...') if self.max_empty_runs != 0: logger.debug('empty runs %s/%s', self.empty_runs, self.max_empty_runs) if self.empty_runs >= self.max_empty_runs: logger.info( 'empty runs limit reached, killing myself') self.die() self.sleep() except KeyboardInterrupt: self.die() TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED) RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED) WORKER_STATUS = (ACTIVE, PICK, DISABLED, TERMINATE, KILL, STOP_TASK) class TYPE(object): """ validator that check whether field is valid json and validate its type """ def __init__(self, myclass=list, parse=False): self.myclass = myclass self.parse = parse def __call__(self, value): from gluon import current try: obj = loads(value) except: return (value, current.T('invalid json')) else: if isinstance(obj, self.myclass): if self.parse: return (obj, None) else: return (value, None) else: return (value, current.T('Not of type: %s') % self.myclass) class Scheduler(MetaScheduler): def __init__(self, db, tasks=None, migrate=True, worker_name=None, group_names=['main'], heartbeat=HEARTBEAT, max_empty_runs=0, discard_results=False, utc_time=False): MetaScheduler.__init__(self) self.db = db self.db_thread = None self.tasks = tasks self.group_names = group_names self.heartbeat = heartbeat self.worker_name = worker_name or IDENTIFIER #list containing status as recorded in the table plus a boost parameter #for hibernation (i.e. when someone stop the worker acting on the worker table) self.worker_status = [RUNNING, 1] self.max_empty_runs = max_empty_runs self.discard_results = discard_results self.is_a_ticker = False self.do_assign_tasks = False self.greedy = False self.utc_time = utc_time from gluon import current current._scheduler = self self.define_tables(db, migrate=migrate) def now(self): return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now() def set_requirements(self, scheduler_task): from gluon import current if hasattr(current, 'request'): scheduler_task.application_name.default = '%s/%s' % ( current.request.application, current.request.controller ) def define_tables(self, db, migrate): from gluon.dal import DEFAULT logger.debug('defining tables (migrate=%s)', migrate) now = self.now db.define_table( 'scheduler_task', Field('application_name', requires=IS_NOT_EMPTY(), default=None, writable=False), Field('task_name', default=None), Field('group_name', default='main'), Field('status', requires=IS_IN_SET(TASK_STATUS), default=QUEUED, writable=False), Field('function_name', requires=IS_IN_SET(sorted(self.tasks.keys())) if self.tasks else DEFAULT), Field('uuid', requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'), unique=True, default=web2py_uuid), Field('args', 'text', default='[]', requires=TYPE(list)), Field('vars', 'text', default='{}', requires=TYPE(dict)), Field('enabled', 'boolean', default=True), Field('start_time', 'datetime', default=now, requires=IS_DATETIME()), Field('next_run_time', 'datetime', default=now), Field('stop_time', 'datetime'), Field('repeats', 'integer', default=1, comment="0=unlimited", requires=IS_INT_IN_RANGE(0, None)), Field('retry_failed', 'integer', default=0, comment="-1=unlimited", requires=IS_INT_IN_RANGE(-1, None)), Field('period', 'integer', default=60, comment='seconds', requires=IS_INT_IN_RANGE(0, None)), Field('timeout', 'integer', default=60, comment='seconds', requires=IS_INT_IN_RANGE(0, None)), Field('sync_output', 'integer', default=0, comment="update output every n sec: 0=never", requires=IS_INT_IN_RANGE(0, None)), Field('times_run', 'integer', default=0, writable=False), Field('times_failed', 'integer', default=0, writable=False), Field('last_run_time', 'datetime', writable=False, readable=False), Field('assigned_worker_name', default='', writable=False), on_define=self.set_requirements, migrate=migrate, format='%(task_name)s') db.define_table( 'scheduler_run', Field('task_id', 'reference scheduler_task'), Field('status', requires=IS_IN_SET(RUN_STATUS)), Field('start_time', 'datetime'), Field('stop_time', 'datetime'), Field('run_output', 'text'), Field('run_result', 'text'), Field('traceback', 'text'), Field('worker_name', default=self.worker_name), migrate=migrate) db.define_table( 'scheduler_worker', Field('worker_name', unique=True), Field('first_heartbeat', 'datetime'), Field('last_heartbeat', 'datetime'), Field('status', requires=IS_IN_SET(WORKER_STATUS)), Field('is_ticker', 'boolean', default=False, writable=False), Field('group_names', 'list:string', default=self.group_names),#FIXME writable=False or give the chance to update dinamically the groups? migrate=migrate) if migrate: db.commit() def loop(self, worker_name=None): signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) try: self.start_heartbeats() while True and self.have_heartbeat: if self.worker_status[0] == DISABLED: logger.debug('Someone stopped me, sleeping until better times come (%s)', self.worker_status[1]) self.sleep() continue logger.debug('looping...') task = self.wrapped_pop_task() if task: self.empty_runs = 0 self.worker_status[0] = RUNNING self.report_task(task, self.async(task)) self.worker_status[0] = ACTIVE else: self.empty_runs += 1 logger.debug('sleeping...') if self.max_empty_runs != 0: logger.debug('empty runs %s/%s', self.empty_runs, self.max_empty_runs) if self.empty_runs >= self.max_empty_runs: logger.info( 'empty runs limit reached, killing myself') self.die() self.sleep() except (KeyboardInterrupt, SystemExit): logger.info('catched') self.die() def wrapped_assign_tasks(self, db): logger.debug('Assigning tasks...') db.commit() #db.commit() only for Mysql x = 0 while x < 10: try: self.assign_tasks(db) db.commit() logger.debug('Tasks assigned...') break except: db.rollback() logger.error('TICKER: error assigning tasks (%s)', x) x += 1 time.sleep(0.5) def wrapped_pop_task(self): db = self.db db.commit() #another nifty db.commit() only for Mysql x = 0 while x < 10: try: rtn = self.pop_task(db) return rtn break except: db.rollback() logger.error(' error popping tasks') x += 1 time.sleep(0.5) def pop_task(self, db): now = self.now() st = self.db.scheduler_task if self.is_a_ticker and self.do_assign_tasks: #I'm a ticker, and 5 loops passed without reassigning tasks, let's do #that and loop again self.wrapped_assign_tasks(db) return None #ready to process something grabbed = db(st.assigned_worker_name == self.worker_name)( st.status == ASSIGNED) task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first() if task: task.update_record(status=RUNNING, last_run_time=now) #noone will touch my task! db.commit() logger.debug(' work to do %s', task.id) else: if self.greedy and self.is_a_ticker: #there are other tasks ready to be assigned logger.info('TICKER: greedy loop') self.wrapped_assign_tasks(db) else: logger.info('nothing to do') return None next_run_time = task.last_run_time + datetime.timedelta( seconds=task.period) times_run = task.times_run + 1 if times_run < task.repeats or task.repeats == 0: #need to run (repeating task) run_again = True else: #no need to run again run_again = False run_id = 0 while True and not self.discard_results: logger.debug(' new scheduler_run record') try: run_id = db.scheduler_run.insert( task_id=task.id, status=RUNNING, start_time=now, worker_name=self.worker_name) db.commit() break except: time.sleep(0.5) db.rollback() logger.info('new task %(id)s "%(task_name)s" %(application_name)s.%(function_name)s' % task) return Task( app=task.application_name, function=task.function_name, timeout=task.timeout, args=task.args, # in json vars=task.vars, # in json task_id=task.id, run_id=run_id, run_again=run_again, next_run_time=next_run_time, times_run=times_run, stop_time=task.stop_time, retry_failed=task.retry_failed, times_failed=task.times_failed, sync_output=task.sync_output, uuid=task.uuid) def report_task(self, task, task_report): db = self.db now = self.now() while True: try: if not self.discard_results: if task_report.result != 'null' or task_report.tb: #result is 'null' as a string if task completed #if it's stopped it's None as NoneType, so we record #the STOPPED "run" anyway logger.debug(' recording task report in db (%s)', task_report.status) db(db.scheduler_run.id == task.run_id).update( status=task_report.status, stop_time=now, run_result=task_report.result, run_output=task_report.output, traceback=task_report.tb) else: logger.debug(' deleting task report in db because of no result') db(db.scheduler_run.id == task.run_id).delete() #if there is a stop_time and the following run would exceed it is_expired = (task.stop_time and task.next_run_time > task.stop_time and True or False) status = (task.run_again and is_expired and EXPIRED or task.run_again and not is_expired and QUEUED or COMPLETED) if task_report.status == COMPLETED: d = dict(status=status, next_run_time=task.next_run_time, times_run=task.times_run, times_failed=0 ) db(db.scheduler_task.id == task.task_id)( db.scheduler_task.status == RUNNING).update(**d) else: st_mapping = {'FAILED': 'FAILED', 'TIMEOUT': 'TIMEOUT', 'STOPPED': 'QUEUED'}[task_report.status] status = (task.retry_failed and task.times_failed < task.retry_failed and QUEUED or task.retry_failed == -1 and QUEUED or st_mapping) db( (db.scheduler_task.id == task.task_id) & (db.scheduler_task.status == RUNNING) ).update( times_failed=db.scheduler_task.times_failed + 1, next_run_time=task.next_run_time, status=status ) db.commit() logger.info('task completed (%s)', task_report.status) break except: db.rollback() time.sleep(0.5) def adj_hibernation(self): if self.worker_status[0] == DISABLED: wk_st = self.worker_status[1] hibernation = wk_st + 1 if wk_st < MAXHIBERNATION else MAXHIBERNATION self.worker_status[1] = hibernation def send_heartbeat(self, counter): if not self.db_thread: logger.debug('thread building own DAL object') self.db_thread = DAL( self.db._uri, folder=self.db._adapter.folder) self.define_tables(self.db_thread, migrate=False) try: db = self.db_thread sw, st = db.scheduler_worker, db.scheduler_task now = self.now() # record heartbeat mybackedstatus = db(sw.worker_name == self.worker_name).select().first() if not mybackedstatus: sw.insert(status=ACTIVE, worker_name=self.worker_name, first_heartbeat=now, last_heartbeat=now, group_names=self.group_names) self.worker_status = [ACTIVE, 1] # activating the process mybackedstatus = ACTIVE else: mybackedstatus = mybackedstatus.status if mybackedstatus == DISABLED: # keep sleeping self.worker_status[0] = DISABLED if self.worker_status[1] == MAXHIBERNATION: logger.debug('........recording heartbeat (%s)', self.worker_status[0]) db(sw.worker_name == self.worker_name).update( last_heartbeat=now) elif mybackedstatus == TERMINATE: self.worker_status[0] = TERMINATE logger.debug("Waiting to terminate the current task") self.give_up() return elif mybackedstatus == KILL: self.worker_status[0] = KILL self.die() else: if mybackedstatus == STOP_TASK: logger.info('Asked to kill the current task') self.terminate_process() logger.debug('........recording heartbeat (%s)', self.worker_status[0]) db(sw.worker_name == self.worker_name).update( last_heartbeat=now, status=ACTIVE) self.worker_status[1] = 1 # re-activating the process if self.worker_status[0] != RUNNING: self.worker_status[0] = ACTIVE self.do_assign_tasks = False if counter % 5 == 0 or mybackedstatus == PICK: try: # delete inactive workers expiration = now - datetime.timedelta(seconds=self.heartbeat * 3) departure = now - datetime.timedelta( seconds=self.heartbeat * 3 * MAXHIBERNATION) logger.debug( ' freeing workers that have not sent heartbeat') inactive_workers = db( ((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) | ((sw.last_heartbeat < departure) & (sw.status != ACTIVE)) ) db(st.assigned_worker_name.belongs( inactive_workers._select(sw.worker_name)))(st.status == RUNNING)\ .update(assigned_worker_name='', status=QUEUED) inactive_workers.delete() try: self.is_a_ticker = self.being_a_ticker() except: logger.error('Error coordinating TICKER') if self.worker_status[0] == ACTIVE: self.do_assign_tasks = True except: logger.error('Error cleaning up') db.commit() except: logger.error('Error retrieving status') db.rollback() self.adj_hibernation() self.sleep() def being_a_ticker(self): db = self.db_thread sw = db.scheduler_worker all_active = db( (sw.worker_name != self.worker_name) & (sw.status == ACTIVE) ).select() ticker = all_active.find(lambda row: row.is_ticker is True).first() not_busy = self.worker_status[0] == ACTIVE if not ticker: #if no other tickers are around if not_busy: #only if I'm not busy db(sw.worker_name == self.worker_name).update(is_ticker=True) db(sw.worker_name != self.worker_name).update(is_ticker=False) logger.info("TICKER: I'm a ticker") else: #I'm busy if len(all_active) >= 1: #so I'll "downgrade" myself to a "poor worker" db(sw.worker_name == self.worker_name).update(is_ticker=False) else: not_busy = True db.commit() return not_busy else: logger.info( "%s is a ticker, I'm a poor worker" % ticker.worker_name) return False def assign_tasks(self, db): sw, st = db.scheduler_worker, db.scheduler_task now = self.now() all_workers = db(sw.status == ACTIVE).select() #build workers as dict of groups wkgroups = {} for w in all_workers: group_names = w.group_names for gname in group_names: if gname not in wkgroups: wkgroups[gname] = dict( workers=[{'name': w.worker_name, 'c': 0}]) else: wkgroups[gname]['workers'].append( {'name': w.worker_name, 'c': 0}) #set queued tasks that expired between "runs" (i.e., you turned off #the scheduler): then it wasn't expired, but now it is db(st.status.belongs( (QUEUED, ASSIGNED)))(st.stop_time < now).update(status=EXPIRED) all_available = db( (st.status.belongs((QUEUED, ASSIGNED))) & ((st.times_run < st.repeats) | (st.repeats == 0)) & (st.start_time <= now) & ((st.stop_time == None) | (st.stop_time > now)) & (st.next_run_time <= now) & (st.enabled == True) ) limit = len(all_workers) * (50 / (len(wkgroups) or 1)) #if there are a moltitude of tasks, let's figure out a maximum of tasks per worker. #this can be adjusted with some added intelligence (like esteeming how many tasks will #a worker complete before the ticker reassign them around, but the gain is quite small #50 is quite a sweet spot also for fast tasks, with sane heartbeat values #NB: ticker reassign tasks every 5 cycles, so if a worker completes his 50 tasks in less #than heartbeat*5 seconds, it won't pick new tasks until heartbeat*5 seconds pass. #If a worker is currently elaborating a long task, all other tasks assigned #to him needs to be reassigned "freely" to other workers, that may be free. #this shuffles up things a bit, in order to maintain the idea of a semi-linear scalability #let's freeze it up db.commit() x = 0 for group in wkgroups.keys(): tasks = all_available(st.group_name == group).select( limitby=(0, limit), orderby = st.next_run_time) #let's break up the queue evenly among workers for task in tasks: x += 1 gname = task.group_name ws = wkgroups.get(gname) if ws: counter = 0 myw = 0 for i, w in enumerate(ws['workers']): if w['c'] < counter: myw = i counter = w['c'] d = dict( status=ASSIGNED, assigned_worker_name=wkgroups[gname]['workers'][myw]['name'] ) if not task.task_name: d['task_name'] = task.function_name task.update_record(**d) wkgroups[gname]['workers'][myw]['c'] += 1 db.commit() #I didn't report tasks but I'm working nonetheless!!!! if x > 0: self.empty_runs = 0 #I'll be greedy only if tasks assigned are equal to the limit # (meaning there could be others ready to be assigned) self.greedy = x >= limit and True or False logger.info('TICKER: workers are %s', len(all_workers)) logger.info('TICKER: tasks are %s', x) def sleep(self): time.sleep(self.heartbeat * self.worker_status[1]) # should only sleep until next available task def set_worker_status(self, group_names=None, action=ACTIVE): if not group_names: group_names = self.group_names elif isinstance(group_names, str): group_names = [group_names] for group in group_names: self.db( self.db.scheduler_worker.group_names.contains(group) ).update(status=action) def disable(self, group_names=None): self.set_worker_status(group_names=group_names,action=DISABLED) def resume(self, group_names=None): self.set_worker_status(group_names=group_names,action=ACTIVE) def terminate(self, group_names=None): self.set_worker_status(group_names=group_names,action=TERMINATE) def kill(self, group_names=None): self.set_worker_status(group_names=group_names,action=KILL) def queue_task(self, function, pargs=[], pvars={}, **kwargs): """ Queue tasks. This takes care of handling the validation of all values. :param function: the function (anything callable with a __name__) :param pargs: "raw" args to be passed to the function. Automatically jsonified. :param pvars: "raw" kwargs to be passed to the function. Automatically jsonified :param kwargs: all the scheduler_task columns. args and vars here should be in json format already, they will override pargs and pvars returns a dict just as a normal validate_and_insert, plus a uuid key holding the uuid of the queued task. If validation is not passed, both id and uuid will be None, and you'll get an "error" dict holding the errors found. """ if hasattr(function, '__name__'): function = function.__name__ targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs) tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars) tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid() tname = 'task_name' in kwargs and kwargs.pop('task_name') or function immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None rtn = self.db.scheduler_task.validate_and_insert( function_name=function, task_name=tname, args=targs, vars=tvars, uuid=tuuid, **kwargs) if not rtn.errors: rtn.uuid = tuuid if immediate: self.db(self.db.scheduler_worker.is_ticker == True).update(status=PICK) else: rtn.uuid = None return rtn def task_status(self, ref, output=False): """ Shortcut for task status retrieval :param ref: can be - integer --> lookup will be done by scheduler_task.id - string --> lookup will be done by scheduler_task.uuid - query --> lookup as you wish (as in db.scheduler_task.task_name == 'test1') :param output: fetch also the scheduler_run record Returns a single Row object, for the last queued task If output == True, returns also the last scheduler_run record scheduler_run record is fetched by a left join, so it can have all fields == None """ from gluon.dal import Query sr, st = self.db.scheduler_run, self.db.scheduler_task if isinstance(ref, int): q = st.id == ref elif isinstance(ref, str): q = st.uuid == ref elif isinstance(ref, Query): q = ref else: raise SyntaxError( "You can retrieve results only by id, uuid or Query") fields = [st.ALL] left = False orderby = ~st.id if output: fields = st.ALL, sr.ALL left = sr.on(sr.task_id == st.id) orderby = ~st.id | ~sr.id row = self.db(q).select( *fields, **dict(orderby=orderby, left=left, limitby=(0, 1)) ).first() if row and output: row.result = row.scheduler_run.run_result and \ loads(row.scheduler_run.run_result, object_hook=_decode_dict) or None return row def stop_task(self, ref): """ Experimental!!! Shortcut for task termination. If the task is RUNNING it will terminate it --> execution will be set as FAILED If the task is QUEUED, its stop_time will be set as to "now", the enabled flag will be set to False, status to STOPPED :param ref: can be - integer --> lookup will be done by scheduler_task.id - string --> lookup will be done by scheduler_task.uuid Returns: - 1 if task was stopped (meaning an update has been done) - None if task was not found, or if task was not RUNNING or QUEUED """ from gluon.dal import Query st, sw = self.db.scheduler_task, self.db.scheduler_worker if isinstance(ref, int): q = st.id == ref elif isinstance(ref, str): q = st.uuid == ref else: raise SyntaxError( "You can retrieve results only by id or uuid") task = self.db(q).select(st.id, st.status, st.assigned_worker_name).first() rtn = None if not task: return rtn if task.status == 'RUNNING': rtn = self.db(sw.worker_name == task.assigned_worker_name).update(status=STOP_TASK) elif task.status == 'QUEUED': rtn = self.db(q).update(stop_time=self.now(), enabled=False, status=STOPPED) return rtn def main(): """ allows to run worker without python web2py.py .... by simply python this.py """ parser = optparse.OptionParser() parser.add_option( "-w", "--worker_name", dest="worker_name", default=None, help="start a worker with name") parser.add_option( "-b", "--heartbeat", dest="heartbeat", default=10, type='int', help="heartbeat time in seconds (default 10)") parser.add_option( "-L", "--logger_level", dest="logger_level", default=30, type='int', help="set debug output level (0-100, 0 means all, 100 means none;default is 30)") parser.add_option("-E", "--empty-runs", dest="max_empty_runs", type='int', default=0, help="max loops with no grabbed tasks permitted (0 for never check)") parser.add_option( "-g", "--group_names", dest="group_names", default='main', help="comma separated list of groups to be picked by the worker") parser.add_option( "-f", "--db_folder", dest="db_folder", default='/Users/mdipierro/web2py/applications/scheduler/databases', help="location of the dal database folder") parser.add_option( "-u", "--db_uri", dest="db_uri", default='sqlite://storage.sqlite', help="database URI string (web2py DAL syntax)") parser.add_option( "-t", "--tasks", dest="tasks", default=None, help="file containing task files, must define" + "tasks = {'task_name':(lambda: 'output')} or similar set of tasks") parser.add_option( "-U", "--utc-time", dest="utc_time", default=False, help="work with UTC timestamps" ) (options, args) = parser.parse_args() if not options.tasks or not options.db_uri: print USAGE if options.tasks: path, filename = os.path.split(options.tasks) if filename.endswith('.py'): filename = filename[:-3] sys.path.append(path) print 'importing tasks...' tasks = __import__(filename, globals(), locals(), [], -1).tasks print 'tasks found: ' + ', '.join(tasks.keys()) else: tasks = {} group_names = [x.strip() for x in options.group_names.split(',')] logging.getLogger().setLevel(options.logger_level) print 'groups for this worker: ' + ', '.join(group_names) print 'connecting to database in folder: ' + options.db_folder or './' print 'using URI: ' + options.db_uri db = DAL(options.db_uri, folder=options.db_folder) print 'instantiating scheduler...' scheduler = Scheduler(db=db, worker_name=options.worker_name, tasks=tasks, migrate=True, group_names=group_names, heartbeat=options.heartbeat, max_empty_runs=options.max_empty_runs, utc_time=options.utc_time) signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) print 'starting main worker loop...' scheduler.loop() if __name__ == '__main__': main()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework (Copyrighted, 2007-2011). License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Author: Thadeus Burgess Contributors: - Thank you to Massimo Di Pierro for creating the original gluon/template.py - Thank you to Jonathan Lundell for extensively testing the regex on Jython. - Thank you to Limodou (creater of uliweb) who inspired the block-element support for web2py. """ import os import cgi import logging from re import compile, sub, escape, DOTALL try: import cStringIO as StringIO except: from io import StringIO try: # have web2py from restricted import RestrictedError from globals import current except ImportError: # do not have web2py current = None def RestrictedError(a, b, c): logging.error(str(a) + ':' + str(b) + ':' + str(c)) return RuntimeError class Node(object): """ Basic Container Object """ def __init__(self, value=None, pre_extend=False): self.value = value self.pre_extend = pre_extend def __str__(self): return str(self.value) class SuperNode(Node): def __init__(self, name='', pre_extend=False): self.name = name self.value = None self.pre_extend = pre_extend def __str__(self): if self.value: return str(self.value) else: # raise SyntaxError("Undefined parent block ``%s``. \n" % self.name + "You must define a block before referencing it.\nMake sure you have not left out an ``{{end}}`` tag." ) return '' def __repr__(self): return "%s->%s" % (self.name, self.value) def output_aux(node, blocks): # If we have a block level # If we can override this block. # Override block from vars. # Else we take the default # Else its just a string return (blocks[node.name].output(blocks) if node.name in blocks else node.output(blocks)) \ if isinstance(node, BlockNode) \ else str(node) class BlockNode(Node): """ Block Container. This Node can contain other Nodes and will render in a hierarchical order of when nodes were added. ie:: {{ block test }} This is default block test {{ end }} """ def __init__(self, name='', pre_extend=False, delimiters=('{{', '}}')): """ name - Name of this Node. """ self.nodes = [] self.name = name self.pre_extend = pre_extend self.left, self.right = delimiters def __repr__(self): lines = ['%sblock %s%s' % (self.left, self.name, self.right)] lines += [str(node) for node in self.nodes] lines.append('%send%s' % (self.left, self.right)) return ''.join(lines) def __str__(self): """ Get this BlockNodes content, not including child Nodes """ return ''.join(str(node) for node in self.nodes if not isinstance(node, BlockNode)) def append(self, node): """ Add an element to the nodes. Keyword Arguments - node -- Node object or string to append. """ if isinstance(node, str) or isinstance(node, Node): self.nodes.append(node) else: raise TypeError("Invalid type; must be instance of ``str`` or ``BlockNode``. %s" % node) def extend(self, other): """ Extend the list of nodes with another BlockNode class. Keyword Arguments - other -- BlockNode or Content object to extend from. """ if isinstance(other, BlockNode): self.nodes.extend(other.nodes) else: raise TypeError( "Invalid type; must be instance of ``BlockNode``. %s" % other) def output(self, blocks): """ Merges all nodes into a single string. blocks -- Dictionary of blocks that are extending from this template. """ return ''.join(output_aux(node, blocks) for node in self.nodes) class Content(BlockNode): """ Parent Container -- Used as the root level BlockNode. Contains functions that operate as such. """ def __init__(self, name="ContentBlock", pre_extend=False): """ Keyword Arguments name -- Unique name for this BlockNode """ self.name = name self.nodes = [] self.blocks = {} self.pre_extend = pre_extend def __str__(self): return ''.join(output_aux(node, self.blocks) for node in self.nodes) def _insert(self, other, index=0): """ Inserts object at index. """ if isinstance(other, (str, Node)): self.nodes.insert(index, other) else: raise TypeError( "Invalid type, must be instance of ``str`` or ``Node``.") def insert(self, other, index=0): """ Inserts object at index. You may pass a list of objects and have them inserted. """ if isinstance(other, (list, tuple)): # Must reverse so the order stays the same. other.reverse() for item in other: self._insert(item, index) else: self._insert(other, index) def append(self, node): """ Adds a node to list. If it is a BlockNode then we assign a block for it. """ if isinstance(node, (str, Node)): self.nodes.append(node) if isinstance(node, BlockNode): self.blocks[node.name] = node else: raise TypeError("Invalid type, must be instance of ``str`` or ``BlockNode``. %s" % node) def extend(self, other): """ Extends the objects list of nodes with another objects nodes """ if isinstance(other, BlockNode): self.nodes.extend(other.nodes) self.blocks.update(other.blocks) else: raise TypeError( "Invalid type; must be instance of ``BlockNode``. %s" % other) def clear_content(self): self.nodes = [] class TemplateParser(object): default_delimiters = ('{{', '}}') r_tag = compile(r'(\{\{.*?\}\})', DOTALL) r_multiline = compile(r'(""".*?""")|(\'\'\'.*?\'\'\')', DOTALL) # These are used for re-indentation. # Indent + 1 re_block = compile('^(elif |else:|except:|except |finally:).*$', DOTALL) # Indent - 1 re_unblock = compile('^(return|continue|break|raise)( .*)?$', DOTALL) # Indent - 1 re_pass = compile('^pass( .*)?$', DOTALL) def __init__(self, text, name="ParserContainer", context=dict(), path='views/', writer='response.write', lexers={}, delimiters=('{{', '}}'), _super_nodes = [], ): """ text -- text to parse context -- context to parse in path -- folder path to templates writer -- string of writer class to use lexers -- dict of custom lexers to use. delimiters -- for example ('{{','}}') _super_nodes -- a list of nodes to check for inclusion this should only be set by "self.extend" It contains a list of SuperNodes from a child template that need to be handled. """ # Keep a root level name. self.name = name # Raw text to start parsing. self.text = text # Writer to use (refer to the default for an example). # This will end up as # "%s(%s, escape=False)" % (self.writer, value) self.writer = writer # Dictionary of custom name lexers to use. if isinstance(lexers, dict): self.lexers = lexers else: self.lexers = {} # Path of templates self.path = path # Context for templates. self.context = context # allow optional alternative delimiters self.delimiters = delimiters if delimiters != self.default_delimiters: escaped_delimiters = (escape(delimiters[0]), escape(delimiters[1])) self.r_tag = compile(r'(%s.*?%s)' % escaped_delimiters, DOTALL) elif hasattr(context.get('response', None), 'delimiters'): if context['response'].delimiters != self.default_delimiters: escaped_delimiters = ( escape(context['response'].delimiters[0]), escape(context['response'].delimiters[1])) self.r_tag = compile(r'(%s.*?%s)' % escaped_delimiters, DOTALL) # Create a root level Content that everything will go into. self.content = Content(name=name) # Stack will hold our current stack of nodes. # As we descend into a node, it will be added to the stack # And when we leave, it will be removed from the stack. # self.content should stay on the stack at all times. self.stack = [self.content] # This variable will hold a reference to every super block # that we come across in this template. self.super_nodes = [] # This variable will hold a reference to the child # super nodes that need handling. self.child_super_nodes = _super_nodes # This variable will hold a reference to every block # that we come across in this template self.blocks = {} # Begin parsing. self.parse(text) def to_string(self): """ Return the parsed template with correct indentation. Used to make it easier to port to python3. """ return self.reindent(str(self.content)) def __str__(self): "Make sure str works exactly the same as python 3" return self.to_string() def __unicode__(self): "Make sure str works exactly the same as python 3" return self.to_string() def reindent(self, text): """ Reindents a string of unindented python code. """ # Get each of our lines into an array. lines = text.split('\n') # Our new lines new_lines = [] # Keeps track of how many indents we have. # Used for when we need to drop a level of indentation # only to reindent on the next line. credit = 0 # Current indentation k = 0 ################# # THINGS TO KNOW ################# # k += 1 means indent # k -= 1 means unindent # credit = 1 means unindent on the next line. for raw_line in lines: line = raw_line.strip() # ignore empty lines if not line: continue # If we have a line that contains python code that # should be unindented for this line of code. # and then reindented for the next line. if TemplateParser.re_block.match(line): k = k + credit - 1 # We obviously can't have a negative indentation k = max(k, 0) # Add the indentation! new_lines.append(' ' * (4 * k) + line) # Bank account back to 0 again :( credit = 0 # If we are a pass block, we obviously de-dent. if TemplateParser.re_pass.match(line): k -= 1 # If we are any of the following, de-dent. # However, we should stay on the same level # But the line right after us will be de-dented. # So we add one credit to keep us at the level # while moving back one indentation level. if TemplateParser.re_unblock.match(line): credit = 1 k -= 1 # If we are an if statement, a try, or a semi-colon we # probably need to indent the next line. if line.endswith(':') and not line.startswith('#'): k += 1 # This must come before so that we can raise an error with the # right content. new_text = '\n'.join(new_lines) if k > 0: self._raise_error('missing "pass" in view', new_text) elif k < 0: self._raise_error('too many "pass" in view', new_text) return new_text def _raise_error(self, message='', text=None): """ Raise an error using itself as the filename and textual content. """ raise RestrictedError(self.name, text or self.text, message) def _get_file_text(self, filename): """ Attempt to open ``filename`` and retrieve its text. This will use self.path to search for the file. """ # If they didn't specify a filename, how can we find one! if not filename.strip(): self._raise_error('Invalid template filename') # Allow Views to include other views dynamically context = self.context if current and not "response" in context: context["response"] = getattr(current, 'response', None) # Get the filename; filename looks like ``"template.html"``. # We need to eval to remove the quotes and get the string type. filename = eval(filename, context) # Get the path of the file on the system. filepath = self.path and os.path.join(self.path, filename) or filename # try to read the text. try: fileobj = open(filepath, 'rb') text = fileobj.read() fileobj.close() except IOError: self._raise_error('Unable to open included view file: ' + filepath) return text def include(self, content, filename): """ Include ``filename`` here. """ text = self._get_file_text(filename) t = TemplateParser(text, name=filename, context=self.context, path=self.path, writer=self.writer, delimiters=self.delimiters) content.append(t.content) def extend(self, filename): """ Extend ``filename``. Anything not declared in a block defined by the parent will be placed in the parent templates ``{{include}}`` block. """ text = self._get_file_text(filename) # Create out nodes list to send to the parent super_nodes = [] # We want to include any non-handled nodes. super_nodes.extend(self.child_super_nodes) # And our nodes as well. super_nodes.extend(self.super_nodes) t = TemplateParser(text, name=filename, context=self.context, path=self.path, writer=self.writer, delimiters=self.delimiters, _super_nodes=super_nodes) # Make a temporary buffer that is unique for parent # template. buf = BlockNode( name='__include__' + filename, delimiters=self.delimiters) pre = [] # Iterate through each of our nodes for node in self.content.nodes: # If a node is a block if isinstance(node, BlockNode): # That happens to be in the parent template if node.name in t.content.blocks: # Do not include it continue if isinstance(node, Node): # Or if the node was before the extension # we should not include it if node.pre_extend: pre.append(node) continue # Otherwise, it should go int the # Parent templates {{include}} section. buf.append(node) else: buf.append(node) # Clear our current nodes. We will be replacing this with # the parent nodes. self.content.nodes = [] t_content = t.content # Set our include, unique by filename t_content.blocks['__include__' + filename] = buf # Make sure our pre_extended nodes go first t_content.insert(pre) # Then we extend our blocks t_content.extend(self.content) # Work off the parent node. self.content = t_content def parse(self, text): # Basically, r_tag.split will split the text into # an array containing, 'non-tag', 'tag', 'non-tag', 'tag' # so if we alternate this variable, we know # what to look for. This is alternate to # line.startswith("{{") in_tag = False extend = None pre_extend = True # Use a list to store everything in # This is because later the code will "look ahead" # for missing strings or brackets. ij = self.r_tag.split(text) # j = current index # i = current item stack = self.stack for j in range(len(ij)): i = ij[j] if i: if not stack: self._raise_error('The "end" tag is unmatched, please check if you have a starting "block" tag') # Our current element in the stack. top = stack[-1] if in_tag: line = i # Get rid of '{{' and '}}' line = line[2:-2].strip() # This is bad juju, but let's do it anyway if not line: continue # We do not want to replace the newlines in code, # only in block comments. def remove_newline(re_val): # Take the entire match and replace newlines with # escaped newlines. return re_val.group(0).replace('\n', '\\n') # Perform block comment escaping. # This performs escaping ON anything # in between """ and """ line = sub(TemplateParser.r_multiline, remove_newline, line) if line.startswith('='): # IE: {{=response.title}} name, value = '=', line[1:].strip() else: v = line.split(' ', 1) if len(v) == 1: # Example # {{ include }} # {{ end }} name = v[0] value = '' else: # Example # {{ block pie }} # {{ include "layout.html" }} # {{ for i in range(10): }} name = v[0] value = v[1] # This will replace newlines in block comments # with the newline character. This is so that they # retain their formatting, but squish down to one # line in the rendered template. # First check if we have any custom lexers if name in self.lexers: # Pass the information to the lexer # and allow it to inject in the environment # You can define custom names such as # '{{<<variable}}' which could potentially # write unescaped version of the variable. self.lexers[name](parser=self, value=value, top=top, stack=stack) elif name == '=': # So we have a variable to insert into # the template buf = "\n%s(%s)" % (self.writer, value) top.append(Node(buf, pre_extend=pre_extend)) elif name == 'block' and not value.startswith('='): # Make a new node with name. node = BlockNode(name=value.strip(), pre_extend=pre_extend, delimiters=self.delimiters) # Append this node to our active node top.append(node) # Make sure to add the node to the stack. # so anything after this gets added # to this node. This allows us to # "nest" nodes. stack.append(node) elif name == 'end' and not value.startswith('='): # We are done with this node. # Save an instance of it self.blocks[top.name] = top # Pop it. stack.pop() elif name == 'super' and not value.startswith('='): # Get our correct target name # If they just called {{super}} without a name # attempt to assume the top blocks name. if value: target_node = value else: target_node = top.name # Create a SuperNode instance node = SuperNode(name=target_node, pre_extend=pre_extend) # Add this to our list to be taken care of self.super_nodes.append(node) # And put in in the tree top.append(node) elif name == 'include' and not value.startswith('='): # If we know the target file to include if value: self.include(top, value) # Otherwise, make a temporary include node # That the child node will know to hook into. else: include_node = BlockNode( name='__include__' + self.name, pre_extend=pre_extend, delimiters=self.delimiters) top.append(include_node) elif name == 'extend' and not value.startswith('='): # We need to extend the following # template. extend = value pre_extend = False else: # If we don't know where it belongs # we just add it anyways without formatting. if line and in_tag: # Split on the newlines >.< tokens = line.split('\n') # We need to look for any instances of # for i in range(10): # = i # pass # So we can properly put a response.write() in place. continuation = False len_parsed = 0 for k, token in enumerate(tokens): token = tokens[k] = token.strip() len_parsed += len(token) if token.startswith('='): if token.endswith('\\'): continuation = True tokens[k] = "\n%s(%s" % ( self.writer, token[1:].strip()) else: tokens[k] = "\n%s(%s)" % ( self.writer, token[1:].strip()) elif continuation: tokens[k] += ')' continuation = False buf = "\n%s" % '\n'.join(tokens) top.append(Node(buf, pre_extend=pre_extend)) else: # It is HTML so just include it. buf = "\n%s(%r, escape=False)" % (self.writer, i) top.append(Node(buf, pre_extend=pre_extend)) # Remember: tag, not tag, tag, not tag in_tag = not in_tag # Make a list of items to remove from child to_rm = [] # Go through each of the children nodes for node in self.child_super_nodes: # If we declared a block that this node wants to include if node.name in self.blocks: # Go ahead and include it! node.value = self.blocks[node.name] # Since we processed this child, we don't need to # pass it along to the parent to_rm.append(node) # Remove some of the processed nodes for node in to_rm: # Since this is a pointer, it works beautifully. # Sometimes I miss C-Style pointers... I want my asterisk... self.child_super_nodes.remove(node) # If we need to extend a template. if extend: self.extend(extend) # We need this for integration with gluon def parse_template(filename, path='views/', context=dict(), lexers={}, delimiters=('{{', '}}') ): """ filename can be a view filename in the views folder or an input stream path is the path of a views folder context is a dictionary of symbols used to render the template """ # First, if we have a str try to open the file if isinstance(filename, str): try: fp = open(os.path.join(path, filename), 'rb') text = fp.read() fp.close() except IOError: raise RestrictedError(filename, '', 'Unable to find the file') else: text = filename.read() # Use the file contents to get a parsed template and return it. return str(TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters)) def get_parsed(text): """ Returns the indented python code of text. Useful for unit testing. """ return str(TemplateParser(text)) class DummyResponse(): def __init__(self): self.body = StringIO.StringIO() def write(self, data, escape=True): if not escape: self.body.write(str(data)) elif hasattr(data, 'as_html') and callable(data.as_html): self.body.write(data.as_html()) else: # make it a string if not isinstance(data, (str, unicode)): data = str(data) elif isinstance(data, unicode): data = data.encode('utf8', 'xmlcharrefreplace') data = cgi.escape(data, True).replace("'", "&#x27;") self.body.write(data) class NOESCAPE(): """ A little helper to avoid escaping. """ def __init__(self, text): self.text = text def xml(self): return self.text # And this is a generic render function. # Here for integration with gluon. def render(content="hello world", stream=None, filename=None, path=None, context={}, lexers={}, delimiters=('{{', '}}') ): """ >>> render() 'hello world' >>> render(content='abc') 'abc' >>> render(content='abc\\'') "abc'" >>> render(content='a"\\'bc') 'a"\\'bc' >>> render(content='a\\nbc') 'a\\nbc' >>> render(content='a"bcd"e') 'a"bcd"e' >>> render(content="'''a\\nc'''") "'''a\\nc'''" >>> render(content="'''a\\'c'''") "'''a\'c'''" >>> render(content='{{for i in range(a):}}{{=i}}<br />{{pass}}', context=dict(a=5)) '0<br />1<br />2<br />3<br />4<br />' >>> render(content='{%for i in range(a):%}{%=i%}<br />{%pass%}', context=dict(a=5),delimiters=('{%','%}')) '0<br />1<br />2<br />3<br />4<br />' >>> render(content="{{='''hello\\nworld'''}}") 'hello\\nworld' >>> render(content='{{for i in range(3):\\n=i\\npass}}') '012' """ # here to avoid circular Imports try: from globals import Response except ImportError: # Working standalone. Build a mock Response object. Response = DummyResponse # Add it to the context so we can use it. if not 'NOESCAPE' in context: context['NOESCAPE'] = NOESCAPE # save current response class if context and 'response' in context: old_response_body = context['response'].body context['response'].body = StringIO.StringIO() else: old_response_body = None context['response'] = Response() # If we don't have anything to render, why bother? if not content and not stream and not filename: raise SyntaxError("Must specify a stream or filename or content") # Here for legacy purposes, probably can be reduced to # something more simple. close_stream = False if not stream: if filename: stream = open(filename, 'rb') close_stream = True elif content: stream = StringIO.StringIO(content) # Execute the template. code = str(TemplateParser(stream.read( ), context=context, path=path, lexers=lexers, delimiters=delimiters)) try: exec(code) in context except Exception: # for i,line in enumerate(code.split('\n')): print i,line raise if close_stream: stream.close() # Returned the rendered content. text = context['response'].body.getvalue() if old_response_body is not None: context['response'].body = old_response_body return text if __name__ == '__main__': import doctest doctest.testmod()
Python
import codecs import encodings """Caller will hand this library a buffer and ask it to either convert it or auto-detect the type. Based on http://code.activestate.com/recipes/52257/ Licensed under the PSF License """ # None represents a potentially variable byte. "##" in the XML spec... autodetect_dict = { # bytepattern : ("name", (0x00, 0x00, 0xFE, 0xFF): ("ucs4_be"), (0xFF, 0xFE, 0x00, 0x00): ("ucs4_le"), (0xFE, 0xFF, None, None): ("utf_16_be"), (0xFF, 0xFE, None, None): ("utf_16_le"), (0x00, 0x3C, 0x00, 0x3F): ("utf_16_be"), (0x3C, 0x00, 0x3F, 0x00): ("utf_16_le"), (0x3C, 0x3F, 0x78, 0x6D): ("utf_8"), (0x4C, 0x6F, 0xA7, 0x94): ("EBCDIC") } def autoDetectXMLEncoding(buffer): """ buffer -> encoding_name The buffer should be at least 4 bytes long. Returns None if encoding cannot be detected. Note that encoding_name might not have an installed decoder (e.g. EBCDIC) """ # a more efficient implementation would not decode the whole # buffer at once but otherwise we'd have to decode a character at # a time looking for the quote character...that's a pain encoding = "utf_8" # according to the XML spec, this is the default # this code successively tries to refine the default # whenever it fails to refine, it falls back to # the last place encoding was set. if len(buffer) >= 4: bytes = (byte1, byte2, byte3, byte4) = tuple(map(ord, buffer[0:4])) enc_info = autodetect_dict.get(bytes, None) if not enc_info: # try autodetection again removing potentially # variable bytes bytes = (byte1, byte2, None, None) enc_info = autodetect_dict.get(bytes) else: enc_info = None if enc_info: encoding = enc_info # we've got a guess... these are #the new defaults # try to find a more precise encoding using xml declaration secret_decoder_ring = codecs.lookup(encoding)[1] (decoded, length) = secret_decoder_ring(buffer) first_line = decoded.split("\n")[0] if first_line and first_line.startswith(u"<?xml"): encoding_pos = first_line.find(u"encoding") if encoding_pos != -1: # look for double quote quote_pos = first_line.find('"', encoding_pos) if quote_pos == -1: # look for single quote quote_pos = first_line.find("'", encoding_pos) if quote_pos > -1: quote_char, rest = (first_line[quote_pos], first_line[quote_pos + 1:]) encoding = rest[:rest.find(quote_char)] return encoding def decoder(buffer): encoding = autoDetectXMLEncoding(buffer) return buffer.decode(encoding).encode('utf8')
Python
import logging import os try: import Tkinter except: Tkinter = None class MessageBoxHandler(logging.Handler): def __init__(self): logging.Handler.__init__(self) def emit(self, record): if Tkinter: msg = self.format(record) root = Tkinter.Tk() root.wm_title("web2py logger message") text = Tkinter.Text() text["height"] = 12 text.insert(0.1, msg) text.pack() button = Tkinter.Button(root, text="OK", command=root.destroy) button.pack() root.mainloop() class NotifySendHandler(logging.Handler): def __init__(self): logging.Handler.__init__(self) def emit(self, record): if Tkinter: msg = self.format(record) os.system("notify-send '%s'" % msg)
Python
# encoding utf-8 __author__ = "Thadeus Burgess <thadeusb@thadeusb.com>" # we classify as "non-reserved" those key words that are explicitly known # to the parser but are allowed as column or table names. Some key words # that are otherwise non-reserved cannot be used as function or data type n # ames and are in the nonreserved list. (Most of these words represent # built-in functions or data types with special syntax. The function # or type is still available but it cannot be redefined by the user.) # Labeled "reserved" are those tokens that are not allowed as column or # table names. Some reserved key words are allowable as names for # functions or data typesself. # Note at the bottom of the list is a dict containing references to the # tuples, and also if you add a list don't forget to remove its default # set of COMMON. # Keywords that are adapter specific. Such as a list of "postgresql" # or "mysql" keywords # These are keywords that are common to all SQL dialects, and should # never be used as a table or column. Even if you use one of these # the cursor will throw an OperationalError for the SQL syntax. COMMON = set(( 'SELECT', 'INSERT', 'DELETE', 'UPDATE', 'DROP', 'CREATE', 'ALTER', 'WHERE', 'FROM', 'INNER', 'JOIN', 'AND', 'OR', 'LIKE', 'ON', 'IN', 'SET', 'BY', 'GROUP', 'ORDER', 'LEFT', 'OUTER', 'IF', 'END', 'THEN', 'LOOP', 'AS', 'ELSE', 'FOR', 'CASE', 'WHEN', 'MIN', 'MAX', 'DISTINCT', )) POSTGRESQL = set(( 'FALSE', 'TRUE', 'ALL', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASC', 'ASYMMETRIC', 'AUTHORIZATION', 'BETWEEN', 'BIGINT', 'BINARY', 'BIT', 'BOOLEAN', 'BOTH', 'CASE', 'CAST', 'CHAR', 'CHARACTER', 'CHECK', 'COALESCE', 'COLLATE', 'COLUMN', 'CONSTRAINT', 'CREATE', 'CROSS', 'CURRENT_CATALOG', 'CURRENT_DATE', 'CURRENT_ROLE', 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'DEC', 'DECIMAL', 'DEFAULT', 'DEFERRABLE', 'DESC', 'DISTINCT', 'DO', 'ELSE', 'END', 'EXCEPT', 'EXISTS', 'EXTRACT', 'FETCH', 'FLOAT', 'FOR', 'FOREIGN', 'FREEZE', 'FROM', 'FULL', 'GRANT', 'GREATEST', 'GROUP', 'HAVING', 'ILIKE', 'IN', 'INITIALLY', 'INNER', 'INOUT', 'INT', 'INTEGER', 'INTERSECT', 'INTERVAL', 'INTO', 'IS', 'ISNULL', 'JOIN', 'LEADING', 'LEAST', 'LEFT', 'LIKE', 'LIMIT', 'LOCALTIME', 'LOCALTIMESTAMP', 'NATIONAL', 'NATURAL', 'NCHAR', 'NEW', 'NONE', 'NOT', 'NOTNULL', 'NULL', 'NULLIF', 'NUMERIC', 'OFF', 'OFFSET', 'OLD', 'ON', 'ONLY', 'OR', 'ORDER', 'OUT', 'OUTER', 'OVERLAPS', 'OVERLAY', 'PLACING', 'POSITION', 'PRECISION', 'PRIMARY', 'REAL', 'REFERENCES', 'RETURNING', 'RIGHT', 'ROW', 'SELECT', 'SESSION_USER', 'SETOF', 'SIMILAR', 'SMALLINT', 'SOME', 'SUBSTRING', 'SYMMETRIC', 'TABLE', 'THEN', 'TIME', 'TIMESTAMP', 'TO', 'TRAILING', 'TREAT', 'TRIM', 'UNION', 'UNIQUE', 'USER', 'USING', 'VALUES', 'VARCHAR', 'VARIADIC', 'VERBOSE', 'WHEN', 'WHERE', 'WITH', 'XMLATTRIBUTES', 'XMLCONCAT', 'XMLELEMENT', 'XMLFOREST', 'XMLPARSE', 'XMLPI', 'XMLROOT', 'XMLSERIALIZE', )) POSTGRESQL_NONRESERVED = set(( 'A', 'ABORT', 'ABS', 'ABSENT', 'ABSOLUTE', 'ACCESS', 'ACCORDING', 'ACTION', 'ADA', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', 'ALIAS', 'ALLOCATE', 'ALSO', 'ALTER', 'ALWAYS', 'ARE', 'ARRAY_AGG', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT', 'AT', 'ATOMIC', 'ATTRIBUTE', 'ATTRIBUTES', 'AVG', 'BACKWARD', 'BASE64', 'BEFORE', 'BEGIN', 'BERNOULLI', 'BIT_LENGTH', 'BITVAR', 'BLOB', 'BOM', 'BREADTH', 'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY', 'CASCADE', 'CASCADED', 'CATALOG', 'CATALOG_NAME', 'CEIL', 'CEILING', 'CHAIN', 'CHAR_LENGTH', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG', 'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHARACTERISTICS', 'CHARACTERS', 'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE', 'CLUSTER', 'COBOL', 'COLLATION', 'COLLATION_CATALOG', 'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLLECT', 'COLUMN_NAME', 'COLUMNS', 'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPLETION', 'CONCURRENTLY', 'CONDITION', 'CONDITION_NUMBER', 'CONFIGURATION', 'CONNECT', 'CONNECTION', 'CONNECTION_NAME', 'CONSTRAINT_CATALOG', 'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTAINS', 'CONTENT', 'CONTINUE', 'CONVERSION', 'CONVERT', 'COPY', 'CORR', 'CORRESPONDING', 'COST', 'COUNT', 'COVAR_POP', 'COVAR_SAMP', 'CREATEDB', 'CREATEROLE', 'CREATEUSER', 'CSV', 'CUBE', 'CUME_DIST', 'CURRENT', 'CURRENT_DEFAULT_TRANSFORM_GROUP', 'CURRENT_PATH', 'CURRENT_TRANSFORM_GROUP_FOR_TYPE', 'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY', 'DEALLOCATE', 'DECLARE', 'DEFAULTS', 'DEFERRED', 'DEFINED', 'DEFINER', 'DEGREE', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DENSE_RANK', 'DEPTH', 'DEREF', 'DERIVED', 'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR', 'DETERMINISTIC', 'DIAGNOSTICS', 'DICTIONARY', 'DISABLE', 'DISCARD', 'DISCONNECT', 'DISPATCH', 'DOCUMENT', 'DOMAIN', 'DOUBLE', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE', 'EACH', 'ELEMENT', 'EMPTY', 'ENABLE', 'ENCODING', 'ENCRYPTED', 'END-EXEC', 'ENUM', 'EQUALS', 'ESCAPE', 'EVERY', 'EXCEPTION', 'EXCLUDE', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING', 'EXP', 'EXPLAIN', 'EXTERNAL', 'FAMILY', 'FILTER', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLAG', 'FLOOR', 'FOLLOWING', 'FORCE', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE', 'FUNCTION', 'FUSION', 'G', 'GENERAL', 'GENERATED', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANTED', 'GROUPING', 'HANDLER', 'HEADER', 'HEX', 'HIERARCHY', 'HOLD', 'HOST', 'HOUR', # 'ID', 'IDENTITY', 'IF', 'IGNORE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXES', 'INDICATOR', 'INFIX', 'INHERIT', 'INHERITS', 'INITIALIZE', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTANCE', 'INSTANTIABLE', 'INSTEAD', 'INTERSECTION', 'INVOKER', 'ISOLATION', 'ITERATE', 'K', 'KEY', 'KEY_MEMBER', 'KEY_TYPE', 'LAG', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST', 'LAST_VALUE', 'LATERAL', 'LC_COLLATE', 'LC_CTYPE', 'LEAD', 'LENGTH', 'LESS', 'LEVEL', 'LIKE_REGEX', 'LISTEN', 'LN', 'LOAD', 'LOCAL', 'LOCATION', 'LOCATOR', 'LOCK', 'LOGIN', 'LOWER', 'M', 'MAP', 'MAPPING', 'MATCH', 'MATCHED', 'MAX', 'MAX_CARDINALITY', 'MAXVALUE', 'MEMBER', 'MERGE', 'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH', 'MESSAGE_TEXT', 'METHOD', 'MIN', 'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES', 'MODIFY', 'MODULE', 'MONTH', 'MORE', 'MOVE', 'MULTISET', 'MUMPS', # 'NAME', 'NAMES', 'NAMESPACE', 'NCLOB', 'NESTING', 'NEXT', 'NFC', 'NFD', 'NFKC', 'NFKD', 'NIL', 'NO', 'NOCREATEDB', 'NOCREATEROLE', 'NOCREATEUSER', 'NOINHERIT', 'NOLOGIN', 'NORMALIZE', 'NORMALIZED', 'NOSUPERUSER', 'NOTHING', 'NOTIFY', 'NOWAIT', 'NTH_VALUE', 'NTILE', 'NULLABLE', 'NULLS', 'NUMBER', 'OBJECT', 'OCCURRENCES_REGEX', 'OCTET_LENGTH', 'OCTETS', 'OF', 'OIDS', 'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS', 'ORDERING', 'ORDINALITY', 'OTHERS', 'OUTPUT', 'OVER', 'OVERRIDING', 'OWNED', 'OWNER', 'P', 'PAD', 'PARAMETER', 'PARAMETER_MODE', 'PARAMETER_NAME', 'PARAMETER_ORDINAL_POSITION', 'PARAMETER_SPECIFIC_CATALOG', 'PARAMETER_SPECIFIC_NAME', 'PARAMETER_SPECIFIC_SCHEMA', 'PARAMETERS', 'PARSER', 'PARTIAL', 'PARTITION', 'PASCAL', 'PASSING', # 'PASSWORD', 'PATH', 'PERCENT_RANK', 'PERCENTILE_CONT', 'PERCENTILE_DISC', 'PLANS', 'PLI', 'POSITION_REGEX', 'POSTFIX', 'POWER', 'PRECEDING', 'PREFIX', 'PREORDER', 'PREPARE', 'PREPARED', 'PRESERVE', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL', 'PROCEDURE', 'PUBLIC', 'QUOTE', 'RANGE', 'RANK', 'READ', 'READS', 'REASSIGN', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCING', 'REGR_AVGX', 'REGR_AVGY', 'REGR_COUNT', 'REGR_INTERCEPT', 'REGR_R2', 'REGR_SLOPE', 'REGR_SXX', 'REGR_SXY', 'REGR_SYY', 'REINDEX', 'RELATIVE', 'RELEASE', 'RENAME', 'REPEATABLE', 'REPLACE', 'REPLICA', 'RESET', 'RESPECT', 'RESTART', 'RESTRICT', 'RESULT', 'RETURN', 'RETURNED_CARDINALITY', 'RETURNED_LENGTH', 'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE', 'RETURNS', 'REVOKE', # 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINE_CATALOG', 'ROUTINE_NAME', 'ROUTINE_SCHEMA', 'ROW_COUNT', 'ROW_NUMBER', 'ROWS', 'RULE', 'SAVEPOINT', 'SCALE', 'SCHEMA', 'SCHEMA_NAME', 'SCOPE', 'SCOPE_CATALOG', 'SCOPE_NAME', 'SCOPE_SCHEMA', 'SCROLL', 'SEARCH', 'SECOND', 'SECTION', 'SECURITY', 'SELF', 'SENSITIVE', 'SEQUENCE', 'SERIALIZABLE', 'SERVER', 'SERVER_NAME', 'SESSION', 'SET', 'SETS', 'SHARE', 'SHOW', 'SIMPLE', 'SIZE', 'SOURCE', 'SPACE', 'SPECIFIC', 'SPECIFIC_NAME', 'SPECIFICTYPE', 'SQL', 'SQLCODE', 'SQLERROR', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNING', 'SQRT', 'STABLE', 'STANDALONE', 'START', 'STATE', 'STATEMENT', 'STATIC', 'STATISTICS', 'STDDEV_POP', 'STDDEV_SAMP', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRIP', 'STRUCTURE', 'STYLE', 'SUBCLASS_ORIGIN', 'SUBLIST', 'SUBMULTISET', 'SUBSTRING_REGEX', 'SUM', 'SUPERUSER', 'SYSID', 'SYSTEM', 'SYSTEM_USER', 'T', # 'TABLE_NAME', 'TABLESAMPLE', 'TABLESPACE', 'TEMP', 'TEMPLATE', 'TEMPORARY', 'TERMINATE', 'TEXT', 'THAN', 'TIES', 'TIMEZONE_HOUR', 'TIMEZONE_MINUTE', 'TOP_LEVEL_COUNT', 'TRANSACTION', 'TRANSACTION_ACTIVE', 'TRANSACTIONS_COMMITTED', 'TRANSACTIONS_ROLLED_BACK', 'TRANSFORM', 'TRANSFORMS', 'TRANSLATE', 'TRANSLATE_REGEX', 'TRANSLATION', 'TRIGGER', 'TRIGGER_CATALOG', 'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM_ARRAY', 'TRUNCATE', 'TRUSTED', 'TYPE', 'UESCAPE', 'UNBOUNDED', 'UNCOMMITTED', 'UNDER', 'UNENCRYPTED', 'UNKNOWN', 'UNLISTEN', 'UNNAMED', 'UNNEST', 'UNTIL', 'UNTYPED', 'UPDATE', 'UPPER', 'URI', 'USAGE', 'USER_DEFINED_TYPE_CATALOG', 'USER_DEFINED_TYPE_CODE', 'USER_DEFINED_TYPE_NAME', 'USER_DEFINED_TYPE_SCHEMA', 'VACUUM', 'VALID', 'VALIDATOR', 'VALUE', 'VAR_POP', 'VAR_SAMP', 'VARBINARY', 'VARIABLE', 'VARYING', 'VERSION', 'VIEW', 'VOLATILE', 'WHENEVER', 'WHITESPACE', 'WIDTH_BUCKET', 'WINDOW', 'WITHIN', 'WITHOUT', 'WORK', 'WRAPPER', 'WRITE', 'XML', 'XMLAGG', 'XMLBINARY', 'XMLCAST', 'XMLCOMMENT', 'XMLDECLARATION', 'XMLDOCUMENT', 'XMLEXISTS', 'XMLITERATE', 'XMLNAMESPACES', 'XMLQUERY', 'XMLSCHEMA', 'XMLTABLE', 'XMLTEXT', 'XMLVALIDATE', 'YEAR', 'YES', 'ZONE', )) #Thanks villas FIREBIRD = set(( 'ABS', 'ACTIVE', 'ADMIN', 'AFTER', 'ASCENDING', 'AUTO', 'AUTODDL', 'BASED', 'BASENAME', 'BASE_NAME', 'BEFORE', 'BIT_LENGTH', 'BLOB', 'BLOBEDIT', 'BOOLEAN', 'BOTH', 'BUFFER', 'CACHE', 'CHAR_LENGTH', 'CHARACTER_LENGTH', 'CHECK_POINT_LEN', 'CHECK_POINT_LENGTH', 'CLOSE', 'COMMITTED', 'COMPILETIME', 'COMPUTED', 'CONDITIONAL', 'CONNECT', 'CONTAINING', 'CROSS', 'CSTRING', 'CURRENT_CONNECTION', 'CURRENT_ROLE', 'CURRENT_TRANSACTION', 'CURRENT_USER', 'DATABASE', 'DB_KEY', 'DEBUG', 'DESCENDING', 'DISCONNECT', 'DISPLAY', 'DO', 'ECHO', 'EDIT', 'ENTRY_POINT', 'EVENT', 'EXIT', 'EXTERN', 'FALSE', 'FETCH', 'FILE', 'FILTER', 'FREE_IT', 'FUNCTION', 'GDSCODE', 'GENERATOR', 'GEN_ID', 'GLOBAL', 'GROUP_COMMIT_WAIT', 'GROUP_COMMIT_WAIT_TIME', 'HELP', 'IF', 'INACTIVE', 'INDEX', 'INIT', 'INPUT_TYPE', 'INSENSITIVE', 'ISQL', 'LC_MESSAGES', 'LC_TYPE', 'LEADING', 'LENGTH', 'LEV', 'LOGFILE', 'LOG_BUFFER_SIZE', 'LOG_BUF_SIZE', 'LONG', 'LOWER', 'MANUAL', 'MAXIMUM', 'MAXIMUM_SEGMENT', 'MAX_SEGMENT', 'MERGE', 'MESSAGE', 'MINIMUM', 'MODULE_NAME', 'NOAUTO', 'NUM_LOG_BUFS', 'NUM_LOG_BUFFERS', 'OCTET_LENGTH', 'OPEN', 'OUTPUT_TYPE', 'OVERFLOW', 'PAGE', 'PAGELENGTH', 'PAGES', 'PAGE_SIZE', 'PARAMETER', # 'PASSWORD', 'PLAN', 'POST_EVENT', 'QUIT', 'RAW_PARTITIONS', 'RDB$DB_KEY', 'RECORD_VERSION', 'RECREATE', 'RECURSIVE', 'RELEASE', 'RESERV', 'RESERVING', 'RETAIN', 'RETURN', 'RETURNING_VALUES', 'RETURNS', # 'ROLE', 'ROW_COUNT', 'ROWS', 'RUNTIME', 'SAVEPOINT', 'SECOND', 'SENSITIVE', 'SHADOW', 'SHARED', 'SHELL', 'SHOW', 'SINGULAR', 'SNAPSHOT', 'SORT', 'STABILITY', 'START', 'STARTING', 'STARTS', 'STATEMENT', 'STATIC', 'STATISTICS', 'SUB_TYPE', 'SUSPEND', 'TERMINATOR', 'TRAILING', 'TRIGGER', 'TRIM', 'TRUE', 'TYPE', 'UNCOMMITTED', 'UNKNOWN', 'USING', 'VARIABLE', 'VERSION', 'WAIT', 'WEEKDAY', 'WHILE', 'YEARDAY', )) FIREBIRD_NONRESERVED = set(( 'BACKUP', 'BLOCK', 'COALESCE', 'COLLATION', 'COMMENT', 'DELETING', 'DIFFERENCE', 'IIF', 'INSERTING', 'LAST', 'LEAVE', 'LOCK', 'NEXT', 'NULLIF', 'NULLS', 'RESTART', 'RETURNING', 'SCALAR_ARRAY', 'SEQUENCE', 'STATEMENT', 'UPDATING', 'ABS', 'ACCENT', 'ACOS', 'ALWAYS', 'ASCII_CHAR', 'ASCII_VAL', 'ASIN', 'ATAN', 'ATAN2', 'BACKUP', 'BIN_AND', 'BIN_OR', 'BIN_SHL', 'BIN_SHR', 'BIN_XOR', 'BLOCK', 'CEIL', 'CEILING', 'COLLATION', 'COMMENT', 'COS', 'COSH', 'COT', 'DATEADD', 'DATEDIFF', 'DECODE', 'DIFFERENCE', 'EXP', 'FLOOR', 'GEN_UUID', 'GENERATED', 'HASH', 'IIF', 'LIST', 'LN', 'LOG', 'LOG10', 'LPAD', 'MATCHED', 'MATCHING', 'MAXVALUE', 'MILLISECOND', 'MINVALUE', 'MOD', 'NEXT', 'OVERLAY', 'PAD', 'PI', 'PLACING', 'POWER', 'PRESERVE', 'RAND', 'REPLACE', 'RESTART', 'RETURNING', 'REVERSE', 'ROUND', 'RPAD', 'SCALAR_ARRAY', 'SEQUENCE', 'SIGN', 'SIN', 'SINH', 'SPACE', 'SQRT', 'TAN', 'TANH', 'TEMPORARY', 'TRUNC', 'WEEK', )) # Thanks Jonathan Lundell MYSQL = set(( 'ACCESSIBLE', 'ADD', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'AS', 'ASC', 'ASENSITIVE', 'BEFORE', 'BETWEEN', 'BIGINT', 'BINARY', 'BLOB', 'BOTH', 'BY', 'CALL', 'CASCADE', 'CASE', 'CHANGE', 'CHAR', 'CHARACTER', 'CHECK', 'COLLATE', 'COLUMN', 'CONDITION', 'CONSTRAINT', 'CONTINUE', 'CONVERT', 'CREATE', 'CROSS', 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'DATABASE', 'DATABASES', 'DAY_HOUR', 'DAY_MICROSECOND', 'DAY_MINUTE', 'DAY_SECOND', 'DEC', 'DECIMAL', 'DECLARE', 'DEFAULT', 'DELAYED', 'DELETE', 'DESC', 'DESCRIBE', 'DETERMINISTIC', 'DISTINCT', 'DISTINCTROW', 'DIV', 'DOUBLE', 'DROP', 'DUAL', 'EACH', 'ELSE', 'ELSEIF', 'ENCLOSED', 'ESCAPED', 'EXISTS', 'EXIT', 'EXPLAIN', 'FALSE', 'FETCH', 'FLOAT', 'FLOAT4', 'FLOAT8', 'FOR', 'FORCE', 'FOREIGN', 'FROM', 'FULLTEXT', 'GRANT', 'GROUP', 'HAVING', 'HIGH_PRIORITY', 'HOUR_MICROSECOND', 'HOUR_MINUTE', 'HOUR_SECOND', 'IF', 'IGNORE', 'IGNORE_SERVER_IDS', 'IGNORE_SERVER_IDS', 'IN', 'INDEX', 'INFILE', 'INNER', 'INOUT', 'INSENSITIVE', 'INSERT', 'INT', 'INT1', 'INT2', 'INT3', 'INT4', 'INT8', 'INTEGER', 'INTERVAL', 'INTO', 'IS', 'ITERATE', 'JOIN', 'KEY', 'KEYS', 'KILL', 'LEADING', 'LEAVE', 'LEFT', 'LIKE', 'LIMIT', 'LINEAR', 'LINES', 'LOAD', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCK', 'LONG', 'LONGBLOB', 'LONGTEXT', 'LOOP', 'LOW_PRIORITY', 'MASTER_HEARTBEAT_PERIOD', 'MASTER_HEARTBEAT_PERIOD', 'MASTER_SSL_VERIFY_SERVER_CERT', 'MATCH', 'MAXVALUE', 'MAXVALUE', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'MIDDLEINT', 'MINUTE_MICROSECOND', 'MINUTE_SECOND', 'MOD', 'MODIFIES', 'NATURAL', 'NO_WRITE_TO_BINLOG', 'NOT', 'NULL', 'NUMERIC', 'ON', 'OPTIMIZE', 'OPTION', 'OPTIONALLY', 'OR', 'ORDER', 'OUT', 'OUTER', 'OUTFILE', 'PRECISION', 'PRIMARY', 'PROCEDURE', 'PURGE', 'RANGE', 'READ', 'READ_WRITE', 'READS', 'REAL', 'REFERENCES', 'REGEXP', 'RELEASE', 'RENAME', 'REPEAT', 'REPLACE', 'REQUIRE', 'RESIGNAL', 'RESIGNAL', 'RESTRICT', 'RETURN', 'REVOKE', 'RIGHT', 'RLIKE', 'SCHEMA', 'SCHEMAS', 'SECOND_MICROSECOND', 'SELECT', 'SENSITIVE', 'SEPARATOR', 'SET', 'SHOW', 'SIGNAL', 'SIGNAL', 'SMALLINT', 'SPATIAL', 'SPECIFIC', 'SQL', 'SQL_BIG_RESULT', 'SQL_CALC_FOUND_ROWS', 'SQL_SMALL_RESULT', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNING', 'SSL', 'STARTING', 'STRAIGHT_JOIN', 'TABLE', 'TERMINATED', 'THEN', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'TO', 'TRAILING', 'TRIGGER', 'TRUE', 'UNDO', 'UNION', 'UNIQUE', 'UNLOCK', 'UNSIGNED', 'UPDATE', 'USAGE', 'USE', 'USING', 'UTC_DATE', 'UTC_TIME', 'UTC_TIMESTAMP', 'VALUES', 'VARBINARY', 'VARCHAR', 'VARCHARACTER', 'VARYING', 'WHEN', 'WHERE', 'WHILE', 'WITH', 'WRITE', 'XOR', 'YEAR_MONTH', 'ZEROFILL', )) MSSQL = set(( 'ADD', 'ALL', 'ALTER', 'AND', 'ANY', 'AS', 'ASC', 'AUTHORIZATION', 'BACKUP', 'BEGIN', 'BETWEEN', 'BREAK', 'BROWSE', 'BULK', 'BY', 'CASCADE', 'CASE', 'CHECK', 'CHECKPOINT', 'CLOSE', 'CLUSTERED', 'COALESCE', 'COLLATE', 'COLUMN', 'COMMIT', 'COMPUTE', 'CONSTRAINT', 'CONTAINS', 'CONTAINSTABLE', 'CONTINUE', 'CONVERT', 'CREATE', 'CROSS', 'CURRENT', 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'DATABASE', 'DBCC', 'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DELETE', 'DENY', 'DESC', 'DISK', 'DISTINCT', 'DISTRIBUTED', 'DOUBLE', 'DROP', 'DUMMY', 'DUMP', 'ELSE', 'END', 'ERRLVL', 'ESCAPE', 'EXCEPT', 'EXEC', 'EXECUTE', 'EXISTS', 'EXIT', 'FETCH', 'FILE', 'FILLFACTOR', 'FOR', 'FOREIGN', 'FREETEXT', 'FREETEXTTABLE', 'FROM', 'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'HAVING', 'HOLDLOCK', 'IDENTITY', 'IDENTITY_INSERT', 'IDENTITYCOL', 'IF', 'IN', 'INDEX', 'INNER', 'INSERT', 'INTERSECT', 'INTO', 'IS', 'JOIN', 'KEY', 'KILL', 'LEFT', 'LIKE', 'LINENO', 'LOAD', 'NATIONAL ', 'NOCHECK', 'NONCLUSTERED', 'NOT', 'NULL', 'NULLIF', 'OF', 'OFF', 'OFFSETS', 'ON', 'OPEN', 'OPENDATASOURCE', 'OPENQUERY', 'OPENROWSET', 'OPENXML', 'OPTION', 'OR', 'ORDER', 'OUTER', 'OVER', 'PERCENT', 'PLAN', 'PRECISION', 'PRIMARY', 'PRINT', 'PROC', 'PROCEDURE', 'PUBLIC', 'RAISERROR', 'READ', 'READTEXT', 'RECONFIGURE', 'REFERENCES', 'REPLICATION', 'RESTORE', 'RESTRICT', 'RETURN', 'REVOKE', 'RIGHT', 'ROLLBACK', 'ROWCOUNT', 'ROWGUIDCOL', 'RULE', 'SAVE', 'SCHEMA', 'SELECT', 'SESSION_USER', 'SET', 'SETUSER', 'SHUTDOWN', 'SOME', 'STATISTICS', 'SYSTEM_USER', 'TABLE', 'TEXTSIZE', 'THEN', 'TO', 'TOP', 'TRAN', 'TRANSACTION', 'TRIGGER', 'TRUNCATE', 'TSEQUAL', 'UNION', 'UNIQUE', 'UPDATE', 'UPDATETEXT', 'USE', 'USER', 'VALUES', 'VARYING', 'VIEW', 'WAITFOR', 'WHEN', 'WHERE', 'WHILE', 'WITH', 'WRITETEXT', )) ORACLE = set(( 'ACCESS', 'ADD', 'ALL', 'ALTER', 'AND', 'ANY', 'AS', 'ASC', 'AUDIT', 'BETWEEN', 'BY', 'CHAR', 'CHECK', 'CLUSTER', 'COLUMN', 'COMMENT', 'COMPRESS', 'CONNECT', 'CREATE', 'CURRENT', 'DATE', 'DECIMAL', 'DEFAULT', 'DELETE', 'DESC', 'DISTINCT', 'DROP', 'ELSE', 'EXCLUSIVE', 'EXISTS', 'FILE', 'FLOAT', 'FOR', 'FROM', 'GRANT', 'GROUP', 'HAVING', 'IDENTIFIED', 'IMMEDIATE', 'IN', 'INCREMENT', 'INDEX', 'INITIAL', 'INSERT', 'INTEGER', 'INTERSECT', 'INTO', 'IS', 'LEVEL', 'LIKE', 'LOCK', 'LONG', 'MAXEXTENTS', 'MINUS', 'MLSLABEL', 'MODE', 'MODIFY', 'NOAUDIT', 'NOCOMPRESS', 'NOT', 'NOWAIT', 'NULL', 'NUMBER', 'OF', 'OFFLINE', 'ON', 'ONLINE', 'OPTION', 'OR', 'ORDER', 'PCTFREE', 'PRIOR', 'PRIVILEGES', 'PUBLIC', 'RAW', 'RENAME', 'RESOURCE', 'REVOKE', 'ROW', 'ROWID', 'ROWNUM', 'ROWS', 'SELECT', 'SESSION', 'SET', 'SHARE', 'SIZE', 'SMALLINT', 'START', 'SUCCESSFUL', 'SYNONYM', 'SYSDATE', 'TABLE', 'THEN', 'TO', 'TRIGGER', 'UID', 'UNION', 'UNIQUE', 'UPDATE', 'USER', 'VALIDATE', 'VALUES', 'VARCHAR', 'VARCHAR2', 'VIEW', 'WHENEVER', 'WHERE', 'WITH', )) SQLITE = set(( 'ABORT', 'ACTION', 'ADD', 'AFTER', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'AS', 'ASC', 'ATTACH', 'AUTOINCREMENT', 'BEFORE', 'BEGIN', 'BETWEEN', 'BY', 'CASCADE', 'CASE', 'CAST', 'CHECK', 'COLLATE', 'COLUMN', 'COMMIT', 'CONFLICT', 'CONSTRAINT', 'CREATE', 'CROSS', 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'DATABASE', 'DEFAULT', 'DEFERRABLE', 'DEFERRED', 'DELETE', 'DESC', 'DETACH', 'DISTINCT', 'DROP', 'EACH', 'ELSE', 'END', 'ESCAPE', 'EXCEPT', 'EXCLUSIVE', 'EXISTS', 'EXPLAIN', 'FAIL', 'FOR', 'FOREIGN', 'FROM', 'FULL', 'GLOB', 'GROUP', 'HAVING', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INDEX', 'INDEXED', 'INITIALLY', 'INNER', 'INSERT', 'INSTEAD', 'INTERSECT', 'INTO', 'IS', 'ISNULL', 'JOIN', 'KEY', 'LEFT', 'LIKE', 'LIMIT', 'MATCH', 'NATURAL', 'NO', 'NOT', 'NOTNULL', 'NULL', 'OF', 'OFFSET', 'ON', 'OR', 'ORDER', 'OUTER', 'PLAN', 'PRAGMA', 'PRIMARY', 'QUERY', 'RAISE', 'REFERENCES', 'REGEXP', 'REINDEX', 'RELEASE', 'RENAME', 'REPLACE', 'RESTRICT', 'RIGHT', 'ROLLBACK', 'ROW', 'SAVEPOINT', 'SELECT', 'SET', 'TABLE', 'TEMP', 'TEMPORARY', 'THEN', 'TO', 'TRANSACTION', 'TRIGGER', 'UNION', 'UNIQUE', 'UPDATE', 'USING', 'VACUUM', 'VALUES', 'VIEW', 'VIRTUAL', 'WHEN', 'WHERE', )) MONGODB_NONRESERVED = set(('SAFE',)) # remove from here when you add a list. JDBCSQLITE = SQLITE DB2 = INFORMIX = INGRES = JDBCPOSTGRESQL = COMMON ADAPTERS = { 'sqlite': SQLITE, 'mysql': MYSQL, 'postgres': POSTGRESQL, 'postgres_nonreserved': POSTGRESQL_NONRESERVED, 'oracle': ORACLE, 'mssql': MSSQL, 'mssql2': MSSQL, 'db2': DB2, 'informix': INFORMIX, 'firebird': FIREBIRD, 'firebird_embedded': FIREBIRD, 'firebird_nonreserved': FIREBIRD_NONRESERVED, 'ingres': INGRES, 'ingresu': INGRES, 'jdbc:sqlite': JDBCSQLITE, 'jdbc:postgres': JDBCPOSTGRESQL, 'common': COMMON, 'mongodb_nonreserved': MONGODB_NONRESERVED } ADAPTERS['all'] = reduce(lambda a, b: a.union(b), ( x for x in ADAPTERS.values()))
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>, limodou <limodou@gmail.com> and srackham <srackham@gmail.com>. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import os import sys import code import logging import types import re import optparse import glob import traceback import fileutils from settings import global_settings from utils import web2py_uuid from compileapp import build_environment, read_pyc, run_models_in from restricted import RestrictedError from globals import Request, Response, Session from storage import Storage from admin import w2p_unpack from dal import BaseAdapter logger = logging.getLogger("web2py") def exec_environment( pyfile='', request=None, response=None, session=None, ): """ .. function:: gluon.shell.exec_environment([pyfile=''[, request=Request() [, response=Response[, session=Session()]]]]) Environment builder and module loader. Builds a web2py environment and optionally executes a Python file into the environment. A Storage dictionary containing the resulting environment is returned. The working directory must be web2py root -- this is the web2py default. """ if request is None: request = Request() if response is None: response = Response() if session is None: session = Session() if request.folder is None: mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile) if mo: appname = mo.group('appname') request.folder = os.path.join('applications', appname) else: request.folder = '' env = build_environment(request, response, session, store_current=False) if pyfile: pycfile = pyfile + 'c' if os.path.isfile(pycfile): exec read_pyc(pycfile) in env else: execfile(pyfile, env) return Storage(env) def env( a, import_models=False, c=None, f=None, dir='', extra_request={}, ): """ Return web2py execution environment for application (a), controller (c), function (f). If import_models is True the exec all application models into the environment. extra_request allows you to pass along any extra variables to the request object before your models get executed. This was mainly done to support web2py_utils.test_runner, however you can use it with any wrapper scripts that need access to the web2py environment. """ request = Request() response = Response() session = Session() request.application = a # Populate the dummy environment with sensible defaults. if not dir: request.folder = os.path.join('applications', a) else: request.folder = dir request.controller = c or 'default' request.function = f or 'index' response.view = '%s/%s.html' % (request.controller, request.function) request.env.path_info = '/%s/%s/%s' % (a, c, f) request.env.http_host = '127.0.0.1:8000' request.env.remote_addr = '127.0.0.1' request.env.web2py_runtime_gae = global_settings.web2py_runtime_gae for k, v in extra_request.items(): request[k] = v # Monkey patch so credentials checks pass. def check_credentials(request, other_application='admin'): return True fileutils.check_credentials = check_credentials environment = build_environment(request, response, session) if import_models: try: run_models_in(environment) except RestrictedError, e: sys.stderr.write(e.traceback + '\n') sys.exit(1) environment['__name__'] = '__main__' return environment def exec_pythonrc(): pythonrc = os.environ.get('PYTHONSTARTUP') if pythonrc and os.path.isfile(pythonrc): def execfile_getlocals(file): execfile(file) return locals() try: return execfile_getlocals(pythonrc) except NameError: pass return dict() def run( appname, plain=False, import_models=False, startfile=None, bpython=False, python_code=False ): """ Start interactive shell or run Python script (startfile) in web2py controller environment. appname is formatted like: a web2py application name a/c exec the controller c into the application environment """ (a, c, f) = parse_path_info(appname) errmsg = 'invalid application name: %s' % appname if not a: die(errmsg) adir = os.path.join('applications', a) if not os.path.exists(adir): if sys.stdin and not sys.stdin.name == '/dev/null': confirm = raw_input( 'application %s does not exist, create (y/n)?' % a) else: logging.warn('application does not exist and will not be created') return if confirm.lower() in ['y', 'yes']: os.mkdir(adir) w2p_unpack('welcome.w2p', adir) for subfolder in ['models', 'views', 'controllers', 'databases', 'modules', 'cron', 'errors', 'sessions', 'languages', 'static', 'private', 'uploads']: subpath = os.path.join(adir, subfolder) if not os.path.exists(subpath): os.mkdir(subpath) db = os.path.join(adir, 'models/db.py') if os.path.exists(db): data = fileutils.read_file(db) data = data.replace( '<your secret key>', 'sha512:' + web2py_uuid()) fileutils.write_file(db, data) if c: import_models = True _env = env(a, c=c, f=f, import_models=import_models) if c: cfile = os.path.join('applications', a, 'controllers', c + '.py') if not os.path.isfile(cfile): cfile = os.path.join('applications', a, 'compiled', "controllers_%s_%s.pyc" % (c, f)) if not os.path.isfile(cfile): die(errmsg) else: exec read_pyc(cfile) in _env else: execfile(cfile, _env) if f: exec ('print %s()' % f, _env) return _env.update(exec_pythonrc()) if startfile: try: ccode = None if startfile.endswith('.pyc'): ccode = read_pyc(startfile) exec ccode in _env else: execfile(startfile, _env) if import_models: BaseAdapter.close_all_instances('commit') except Exception, e: print traceback.format_exc() if import_models: BaseAdapter.close_all_instances('rollback') elif python_code: try: exec(python_code, _env) if import_models: BaseAdapter.close_all_instances('commit') except Exception, e: print traceback.format_exc() if import_models: BaseAdapter.close_all_instances('rollback') else: if not plain: if bpython: try: import bpython bpython.embed(locals_=_env) return except: logger.warning( 'import bpython error; trying ipython...') else: try: import IPython if IPython.__version__ >= '0.11': from IPython.frontend.terminal.embed import InteractiveShellEmbed shell = InteractiveShellEmbed(user_ns=_env) shell() return else: # following 2 lines fix a problem with # IPython; thanks Michael Toomim if '__builtins__' in _env: del _env['__builtins__'] shell = IPython.Shell.IPShell(argv=[], user_ns=_env) shell.mainloop() return except: logger.warning( 'import IPython error; use default python shell') try: import readline import rlcompleter except ImportError: pass else: readline.set_completer(rlcompleter.Completer(_env).complete) readline.parse_and_bind('tab:complete') code.interact(local=_env) def parse_path_info(path_info): """ Parse path info formatted like a/c/f where c and f are optional and a leading / accepted. Return tuple (a, c, f). If invalid path_info a is set to None. If c or f are omitted they are set to None. """ mo = re.match(r'^/?(?P<a>\w+)(/(?P<c>\w+)(/(?P<f>\w+))?)?$', path_info) if mo: return (mo.group('a'), mo.group('c'), mo.group('f')) else: return (None, None, None) def die(msg): print >> sys.stderr, msg sys.exit(1) def test(testpath, import_models=True, verbose=False): """ Run doctests in web2py environment. testpath is formatted like: a tests all controllers in application a a/c tests controller c in application a a/c/f test function f in controller c, application a Where a, c and f are application, controller and function names respectively. If the testpath is a file name the file is tested. If a controller is specified models are executed by default. """ import doctest if os.path.isfile(testpath): mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath) if not mo: die('test file is not in application directory: %s' % testpath) a = mo.group('a') c = f = None files = [testpath] else: (a, c, f) = parse_path_info(testpath) errmsg = 'invalid test path: %s' % testpath if not a: die(errmsg) cdir = os.path.join('applications', a, 'controllers') if not os.path.isdir(cdir): die(errmsg) if c: cfile = os.path.join(cdir, c + '.py') if not os.path.isfile(cfile): die(errmsg) files = [cfile] else: files = glob.glob(os.path.join(cdir, '*.py')) for testfile in files: globs = env(a, import_models) ignores = globs.keys() execfile(testfile, globs) def doctest_object(name, obj): """doctest obj and enclosed methods and classes.""" if type(obj) in (types.FunctionType, types.TypeType, types.ClassType, types.MethodType, types.UnboundMethodType): # Reload environment before each test. globs = env(a, c=c, f=f, import_models=import_models) execfile(testfile, globs) doctest.run_docstring_examples(obj, globs=globs, name='%s: %s' % (os.path.basename(testfile), name), verbose=verbose) if type(obj) in (types.TypeType, types.ClassType): for attr_name in dir(obj): # Execute . operator so decorators are executed. o = eval('%s.%s' % (name, attr_name), globs) doctest_object(attr_name, o) for (name, obj) in globs.items(): if name not in ignores and (f is None or f == name): doctest_object(name, obj) def get_usage(): usage = """ %prog [options] pythonfile """ return usage def execute_from_command_line(argv=None): if argv is None: argv = sys.argv parser = optparse.OptionParser(usage=get_usage()) parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME', help='run web2py in interactive shell or IPython(if installed) ' + 'with specified appname') msg = 'run web2py in interactive shell or bpython (if installed) with' msg += ' specified appname (if app does not exist it will be created).' msg += '\n Use combined with --shell' parser.add_option( '-B', '--bpython', action='store_true', default=False, dest='bpython', help=msg, ) parser.add_option( '-P', '--plain', action='store_true', default=False, dest='plain', help='only use plain python shell, should be used with --shell option', ) parser.add_option( '-M', '--import_models', action='store_true', default=False, dest='import_models', help='auto import model files, default is False, ' + ' should be used with --shell option', ) parser.add_option( '-R', '--run', dest='run', metavar='PYTHON_FILE', default='', help='run PYTHON_FILE in web2py environment, ' + 'should be used with --shell option', ) (options, args) = parser.parse_args(argv[1:]) if len(sys.argv) == 1: parser.print_help() sys.exit(0) if len(args) > 0: startfile = args[0] else: startfile = '' run(options.shell, options.plain, startfile=startfile, bpython=options.bpython) if __name__ == '__main__': execute_from_command_line()
Python
# -*- coding: utf-8 -*- # This file is part of the Rocket Web Server # Copyright (c) 2011 Timothy Farrell # Modified by Massimo Di Pierro # Import System Modules import sys import errno import socket import logging import platform # Define Constants VERSION = '1.2.6' SERVER_NAME = socket.gethostname() SERVER_SOFTWARE = 'Rocket %s' % VERSION HTTP_SERVER_SOFTWARE = '%s Python/%s' % ( SERVER_SOFTWARE, sys.version.split(' ')[0]) BUF_SIZE = 16384 SOCKET_TIMEOUT = 10 # in secs THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message? IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET]) DEFAULT_LISTEN_QUEUE_SIZE = 5 DEFAULT_MIN_THREADS = 10 DEFAULT_MAX_THREADS = 0 DEFAULTS = dict(LISTEN_QUEUE_SIZE=DEFAULT_LISTEN_QUEUE_SIZE, MIN_THREADS=DEFAULT_MIN_THREADS, MAX_THREADS=DEFAULT_MAX_THREADS) PY3K = sys.version_info[0] > 2 class NullHandler(logging.Handler): "A Logging handler to prevent library errors." def emit(self, record): pass if PY3K: def b(val): """ Convert string/unicode/bytes literals into bytes. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, str): return val.encode() else: return val def u(val, encoding="us-ascii"): """ Convert bytes into string/unicode. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, bytes): return val.decode(encoding) else: return val else: def b(val): """ Convert string/unicode/bytes literals into bytes. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, unicode): return val.encode() else: return val def u(val, encoding="us-ascii"): """ Convert bytes into string/unicode. This allows for the same code to run on Python 2.x and 3.x. """ if isinstance(val, str): return val.decode(encoding) else: return val # Import Package Modules # package imports removed in monolithic build __all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE', 'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u', 'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler'] # Monolithic build...end of module: rocket/__init__.py # Monolithic build...start of module: rocket/connection.py # Import System Modules import sys import time import socket try: import ssl has_ssl = True except ImportError: has_ssl = False # Import Package Modules # package imports removed in monolithic build # TODO - This part is still very experimental. #from .filelike import FileLikeSocket class Connection(object): __slots__ = [ 'setblocking', 'sendall', 'shutdown', 'makefile', 'fileno', 'client_addr', 'client_port', 'server_port', 'socket', 'start_time', 'ssl', 'secure', 'recv', 'send', 'read', 'write' ] def __init__(self, sock_tuple, port, secure=False): self.client_addr, self.client_port = sock_tuple[1][:2] self.server_port = port self.socket = sock_tuple[0] self.start_time = time.time() self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket) self.secure = secure if IS_JYTHON: # In Jython we must set TCP_NODELAY here since it does not # inherit from the listening socket. # See: http://bugs.jython.org/issue1309 self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.socket.settimeout(SOCKET_TIMEOUT) self.shutdown = self.socket.shutdown self.fileno = self.socket.fileno self.setblocking = self.socket.setblocking self.recv = self.socket.recv self.send = self.socket.send self.makefile = self.socket.makefile if sys.platform == 'darwin': self.sendall = self._sendall_darwin else: self.sendall = self.socket.sendall def _sendall_darwin(self, buf): pending = len(buf) offset = 0 while pending: try: sent = self.socket.send(buf[offset:]) pending -= sent offset += sent except socket.error: import errno info = sys.exc_info() if info[1].args[0] != errno.EAGAIN: raise return offset # FIXME - this is not ready for prime-time yet. # def makefile(self, buf_size=BUF_SIZE): # return FileLikeSocket(self, buf_size) def close(self): if hasattr(self.socket, '_sock'): try: self.socket._sock.close() except socket.error: info = sys.exc_info() if info[1].args[0] != socket.EBADF: raise info[1] else: pass self.socket.close() # Monolithic build...end of module: rocket/connection.py # Monolithic build...start of module: rocket/filelike.py # Import System Modules import socket try: from io import StringIO except ImportError: try: from cStringIO import StringIO except ImportError: from StringIO import StringIO # Import Package Modules # package imports removed in monolithic build class FileLikeSocket(object): def __init__(self, conn, buf_size=BUF_SIZE): self.conn = conn self.buf_size = buf_size self.buffer = StringIO() self.content_length = None if self.conn.socket.gettimeout() == 0.0: self.read = self.non_blocking_read else: self.read = self.blocking_read def __iter__(self): return self def recv(self, size): while True: try: return self.conn.recv(size) except socket.error: exc = sys.exc_info() e = exc[1] # FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr if (e.args[0] not in set()): raise def next(self): data = self.readline() if data == '': raise StopIteration return data def non_blocking_read(self, size=None): # Shamelessly adapted from Cherrypy! bufr = self.buffer bufr.seek(0, 2) if size is None: while True: data = self.recv(self.buf_size) if not data: break bufr.write(data) self.buffer = StringIO() return bufr.getvalue() else: buf_len = self.buffer.tell() if buf_len >= size: bufr.seek(0) data = bufr.read(size) self.buffer = StringIO(bufr.read()) return data self.buffer = StringIO() while True: remaining = size - buf_len data = self.recv(remaining) if not data: break n = len(data) if n == size and not buf_len: return data if n == remaining: bufr.write(data) del data break bufr.write(data) buf_len += n del data return bufr.getvalue() def blocking_read(self, length=None): if length is None: if self.content_length is not None: length = self.content_length else: length = 1 try: data = self.conn.recv(length) except: data = b('') return data def readline(self): data = b("") char = self.read(1) while char != b('\n') and char is not b(''): line = repr(char) data += char char = self.read(1) data += char return data def readlines(self, hint="ignored"): return list(self) def close(self): self.conn = None self.content_length = None # Monolithic build...end of module: rocket/filelike.py # Monolithic build...start of module: rocket/futures.py # Import System Modules import time try: from concurrent.futures import Future, ThreadPoolExecutor from concurrent.futures.thread import _WorkItem has_futures = True except ImportError: has_futures = False class Future: pass class ThreadPoolExecutor: pass class _WorkItem: pass class WSGIFuture(Future): def __init__(self, f_dict, *args, **kwargs): Future.__init__(self, *args, **kwargs) self.timeout = None self._mem_dict = f_dict self._lifespan = 30 self._name = None self._start_time = time.time() def set_running_or_notify_cancel(self): if time.time() - self._start_time >= self._lifespan: self.cancel() else: return super(WSGIFuture, self).set_running_or_notify_cancel() def remember(self, name, lifespan=None): self._lifespan = lifespan or self._lifespan if name in self._mem_dict: raise NameError('Cannot remember future by name "%s". ' % name + 'A future already exists with that name.') self._name = name self._mem_dict[name] = self return self def forget(self): if self._name in self._mem_dict and self._mem_dict[self._name] is self: del self._mem_dict[self._name] self._name = None class _WorkItem(object): def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn self.args = args self.kwargs = kwargs def run(self): if not self.future.set_running_or_notify_cancel(): return try: result = self.fn(*self.args, **self.kwargs) except BaseException: e = sys.exc_info()[1] self.future.set_exception(e) else: self.future.set_result(result) class WSGIExecutor(ThreadPoolExecutor): multithread = True multiprocess = False def __init__(self, *args, **kwargs): ThreadPoolExecutor.__init__(self, *args, **kwargs) self.futures = dict() def submit(self, fn, *args, **kwargs): if self._shutdown_lock.acquire(): if self._shutdown: self._shutdown_lock.release() raise RuntimeError( 'Cannot schedule new futures after shutdown') f = WSGIFuture(self.futures) w = _WorkItem(f, fn, args, kwargs) self._work_queue.put(w) self._adjust_thread_count() self._shutdown_lock.release() return f else: return False class FuturesMiddleware(object): "Futures middleware that adds a Futures Executor to the environment" def __init__(self, app, threads=5): self.app = app self.executor = WSGIExecutor(threads) def __call__(self, environ, start_response): environ["wsgiorg.executor"] = self.executor environ["wsgiorg.futures"] = self.executor.futures return self.app(environ, start_response) # Monolithic build...end of module: rocket/futures.py # Monolithic build...start of module: rocket/listener.py # Import System Modules import os import socket import logging import traceback from threading import Thread try: import ssl from ssl import SSLError has_ssl = True except ImportError: has_ssl = False class SSLError(socket.error): pass # Import Package Modules # package imports removed in monolithic build class Listener(Thread): """The Listener class is a class responsible for accepting connections and queuing them to be processed by a worker thread.""" def __init__(self, interface, queue_size, active_queue, *args, **kwargs): Thread.__init__(self, *args, **kwargs) # Instance variables self.active_queue = active_queue self.interface = interface self.addr = interface[0] self.port = interface[1] self.secure = len(interface) >= 4 self.clientcert_req = (len(interface) == 5 and interface[4]) self.thread = None self.ready = False # Error Log self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port) self.err_log.addHandler(NullHandler()) # Build the socket if ':' in self.addr: listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if not listener: self.err_log.error("Failed to get socket.") return if self.secure: if not has_ssl: self.err_log.error("ssl module required to serve HTTPS.") return elif not os.path.exists(interface[2]): data = (interface[2], interface[0], interface[1]) self.err_log.error("Cannot find key file " "'%s'. Cannot bind to %s:%s" % data) return elif not os.path.exists(interface[3]): data = (interface[3], interface[0], interface[1]) self.err_log.error("Cannot find certificate file " "'%s'. Cannot bind to %s:%s" % data) return if self.clientcert_req and not os.path.exists(interface[4]): data = (interface[4], interface[0], interface[1]) self.err_log.error("Cannot find root ca certificate file " "'%s'. Cannot bind to %s:%s" % data) return # Set socket options try: listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except: msg = "Cannot share socket. Using %s:%i exclusively." self.err_log.warning(msg % (self.addr, self.port)) try: if not IS_JYTHON: listener.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) except: msg = "Cannot set TCP_NODELAY, things might run a little slower" self.err_log.warning(msg) try: listener.bind((self.addr, self.port)) except: msg = "Socket %s:%i in use by other process and it won't share." self.err_log.error(msg % (self.addr, self.port)) else: # We want socket operations to timeout periodically so we can # check if the server is shutting down listener.settimeout(THREAD_STOP_CHECK_INTERVAL) # Listen for new connections allowing queue_size number of # connections to wait before rejecting a connection. listener.listen(queue_size) self.listener = listener self.ready = True def wrap_socket(self, sock): try: if self.clientcert_req: ca_certs = self.interface[4] cert_reqs = ssl.CERT_OPTIONAL sock = ssl.wrap_socket(sock, keyfile=self.interface[2], certfile=self.interface[3], server_side=True, cert_reqs=cert_reqs, ca_certs=ca_certs, ssl_version=ssl.PROTOCOL_SSLv23) else: sock = ssl.wrap_socket(sock, keyfile=self.interface[2], certfile=self.interface[3], server_side=True, ssl_version=ssl.PROTOCOL_SSLv23) except SSLError: # Generally this happens when an HTTP request is received on a # secure socket. We don't do anything because it will be detected # by Worker and dealt with appropriately. pass return sock def start(self): if not self.ready: self.err_log.warning('Listener started when not ready.') return if self.thread is not None and self.thread.isAlive(): self.err_log.warning('Listener already running.') return self.thread = Thread(target=self.listen, name="Port" + str(self.port)) self.thread.start() def isAlive(self): if self.thread is None: return False return self.thread.isAlive() def join(self): if self.thread is None: return self.ready = False self.thread.join() del self.thread self.thread = None self.ready = True def listen(self): if __debug__: self.err_log.debug('Entering main loop.') while True: try: sock, addr = self.listener.accept() if self.secure: sock = self.wrap_socket(sock) self.active_queue.put(((sock, addr), self.interface[1], self.secure)) except socket.timeout: # socket.timeout will be raised every # THREAD_STOP_CHECK_INTERVAL seconds. When that happens, # we check if it's time to die. if not self.ready: if __debug__: self.err_log.debug('Listener exiting.') return else: continue except: self.err_log.error(traceback.format_exc()) # Monolithic build...end of module: rocket/listener.py # Monolithic build...start of module: rocket/main.py # Import System Modules import sys import time import socket import logging import traceback from threading import Lock try: from queue import Queue except ImportError: from Queue import Queue # Import Package Modules # package imports removed in monolithic build # Setup Logging log = logging.getLogger('Rocket') log.addHandler(NullHandler()) class Rocket(object): """The Rocket class is responsible for handling threads and accepting and dispatching connections.""" def __init__(self, interfaces=('127.0.0.1', 8000), method='wsgi', app_info=None, min_threads=None, max_threads=None, queue_size=None, timeout=600, handle_signals=True): self.handle_signals = handle_signals self.startstop_lock = Lock() self.timeout = timeout if not isinstance(interfaces, list): self.interfaces = [interfaces] else: self.interfaces = interfaces if min_threads is None: min_threads = DEFAULTS['MIN_THREADS'] if max_threads is None: max_threads = DEFAULTS['MAX_THREADS'] if not queue_size: if hasattr(socket, 'SOMAXCONN'): queue_size = socket.SOMAXCONN else: queue_size = DEFAULTS['LISTEN_QUEUE_SIZE'] if max_threads and queue_size > max_threads: queue_size = max_threads if isinstance(app_info, dict): app_info['server_software'] = SERVER_SOFTWARE self.monitor_queue = Queue() self.active_queue = Queue() self._threadpool = ThreadPool(get_method(method), app_info=app_info, active_queue=self.active_queue, monitor_queue=self.monitor_queue, min_threads=min_threads, max_threads=max_threads) # Build our socket listeners self.listeners = [Listener( i, queue_size, self.active_queue) for i in self.interfaces] for ndx in range(len(self.listeners) - 1, 0, -1): if not self.listeners[ndx].ready: del self.listeners[ndx] if not self.listeners: log.critical("No interfaces to listen on...closing.") sys.exit(1) def _sigterm(self, signum, frame): log.info('Received SIGTERM') self.stop() def _sighup(self, signum, frame): log.info('Received SIGHUP') self.restart() def start(self, background=False): log.info('Starting %s' % SERVER_SOFTWARE) self.startstop_lock.acquire() try: # Set up our shutdown signals if self.handle_signals: try: import signal signal.signal(signal.SIGTERM, self._sigterm) signal.signal(signal.SIGUSR1, self._sighup) except: log.debug('This platform does not support signals.') # Start our worker threads self._threadpool.start() # Start our monitor thread self._monitor = Monitor(self.monitor_queue, self.active_queue, self.timeout, self._threadpool) self._monitor.setDaemon(True) self._monitor.start() # I know that EXPR and A or B is bad but I'm keeping it for Py2.4 # compatibility. str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '') msg = 'Listening on sockets: ' msg += ', '.join( ['%s:%i%s' % str_extract(l) for l in self.listeners]) log.info(msg) for l in self.listeners: l.start() finally: self.startstop_lock.release() if background: return while self._monitor.isAlive(): try: time.sleep(THREAD_STOP_CHECK_INTERVAL) except KeyboardInterrupt: # Capture a keyboard interrupt when running from a console break except: if self._monitor.isAlive(): log.error(traceback.format_exc()) continue return self.stop() def stop(self, stoplogging=False): log.info('Stopping %s' % SERVER_SOFTWARE) self.startstop_lock.acquire() try: # Stop listeners for l in self.listeners: l.ready = False # Encourage a context switch time.sleep(0.01) for l in self.listeners: if l.isAlive(): l.join() # Stop Monitor self._monitor.stop() if self._monitor.isAlive(): self._monitor.join() # Stop Worker threads self._threadpool.stop() if stoplogging: logging.shutdown() msg = "Calling logging.shutdown() is now the responsibility of \ the application developer. Please update your \ applications to no longer call rocket.stop(True)" try: import warnings raise warnings.DeprecationWarning(msg) except ImportError: raise RuntimeError(msg) finally: self.startstop_lock.release() def restart(self): self.stop() self.start() def CherryPyWSGIServer(bind_addr, wsgi_app, numthreads=10, server_name=None, max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5): """ A Cherrypy wsgiserver-compatible wrapper. """ max_threads = max if max_threads < 0: max_threads = 0 return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app}, min_threads=numthreads, max_threads=max_threads, queue_size=request_queue_size, timeout=timeout) # Monolithic build...end of module: rocket/main.py # Monolithic build...start of module: rocket/monitor.py # Import System Modules import time import logging import select from threading import Thread # Import Package Modules # package imports removed in monolithic build class Monitor(Thread): # Monitor worker class. def __init__(self, monitor_queue, active_queue, timeout, threadpool, *args, **kwargs): Thread.__init__(self, *args, **kwargs) self._threadpool = threadpool # Instance Variables self.monitor_queue = monitor_queue self.active_queue = active_queue self.timeout = timeout self.log = logging.getLogger('Rocket.Monitor') self.log.addHandler(NullHandler()) self.connections = set() self.active = False def run(self): self.active = True conn_list = list() list_changed = False # We need to make sure the queue is empty before we start while not self.monitor_queue.empty(): self.monitor_queue.get() if __debug__: self.log.debug('Entering monitor loop.') # Enter thread main loop while self.active: # Move the queued connections to the selection pool while not self.monitor_queue.empty(): if __debug__: self.log.debug('In "receive timed-out connections" loop.') c = self.monitor_queue.get() if c is None: # A non-client is a signal to die if __debug__: self.log.debug('Received a death threat.') self.stop() break self.log.debug('Received a timed out connection.') if __debug__: assert(c not in self.connections) if IS_JYTHON: # Jython requires a socket to be in Non-blocking mode in # order to select on it. c.setblocking(False) if __debug__: self.log.debug('Adding connection to monitor list.') self.connections.add(c) list_changed = True # Wait on those connections if list_changed: conn_list = list(self.connections) list_changed = False try: if len(conn_list): readable = select.select(conn_list, [], [], THREAD_STOP_CHECK_INTERVAL)[0] else: time.sleep(THREAD_STOP_CHECK_INTERVAL) readable = [] if not self.active: break # If we have any readable connections, put them back for r in readable: if __debug__: self.log.debug('Restoring readable connection') if IS_JYTHON: # Jython requires a socket to be in Non-blocking mode in # order to select on it, but the rest of the code requires # that it be in blocking mode. r.setblocking(True) r.start_time = time.time() self.active_queue.put(r) self.connections.remove(r) list_changed = True except: if self.active: raise else: break # If we have any stale connections, kill them off. if self.timeout: now = time.time() stale = set() for c in self.connections: if (now - c.start_time) >= self.timeout: stale.add(c) for c in stale: if __debug__: # "EXPR and A or B" kept for Py2.4 compatibility data = ( c.client_addr, c.server_port, c.ssl and '*' or '') self.log.debug( 'Flushing stale connection: %s:%i%s' % data) self.connections.remove(c) list_changed = True try: c.close() finally: del c # Dynamically resize the threadpool to adapt to our changing needs. self._threadpool.dynamic_resize() def stop(self): self.active = False if __debug__: self.log.debug('Flushing waiting connections') while self.connections: c = self.connections.pop() try: c.close() finally: del c if __debug__: self.log.debug('Flushing queued connections') while not self.monitor_queue.empty(): c = self.monitor_queue.get() if c is None: continue try: c.close() finally: del c # Place a None sentry value to cause the monitor to die. self.monitor_queue.put(None) # Monolithic build...end of module: rocket/monitor.py # Monolithic build...start of module: rocket/threadpool.py # Import System Modules import logging # Import Package Modules # package imports removed in monolithic build # Setup Logging log = logging.getLogger('Rocket.Errors.ThreadPool') log.addHandler(NullHandler()) class ThreadPool: """The ThreadPool class is a container class for all the worker threads. It manages the number of actively running threads.""" def __init__(self, method, app_info, active_queue, monitor_queue, min_threads=DEFAULTS['MIN_THREADS'], max_threads=DEFAULTS['MAX_THREADS'], ): if __debug__: log.debug("Initializing ThreadPool.") self.check_for_dead_threads = 0 self.active_queue = active_queue self.worker_class = method self.min_threads = min_threads self.max_threads = max_threads self.monitor_queue = monitor_queue self.stop_server = False self.alive = False # TODO - Optimize this based on some real-world usage data self.grow_threshold = int(max_threads / 10) + 2 if not isinstance(app_info, dict): app_info = dict() if has_futures and app_info.get('futures'): app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'], 2])) app_info.update(max_threads=max_threads, min_threads=min_threads) self.min_threads = min_threads self.app_info = app_info self.threads = set() def start(self): self.stop_server = False if __debug__: log.debug("Starting threads.") self.grow(self.min_threads) self.alive = True def stop(self): self.alive = False if __debug__: log.debug("Stopping threads.") self.stop_server = True # Prompt the threads to die self.shrink(len(self.threads)) # Stop futures initially if has_futures and self.app_info.get('futures'): if __debug__: log.debug("Future executor is present. Python will not " "exit until all jobs have finished.") self.app_info['executor'].shutdown(wait=False) # Give them the gun #active_threads = [t for t in self.threads if t.isAlive()] #while active_threads: # t = active_threads.pop() # t.kill() # Wait until they pull the trigger for t in self.threads: if t.isAlive(): t.join() # Clean up the mess self.bring_out_your_dead() def bring_out_your_dead(self): # Remove dead threads from the pool dead_threads = [t for t in self.threads if not t.isAlive()] for t in dead_threads: if __debug__: log.debug("Removing dead thread: %s." % t.getName()) try: # Py2.4 complains here so we put it in a try block self.threads.remove(t) except: pass self.check_for_dead_threads -= len(dead_threads) def grow(self, amount=None): if self.stop_server: return if not amount: amount = self.max_threads if self.alive: amount = min([amount, self.max_threads - len(self.threads)]) if __debug__: log.debug("Growing by %i." % amount) for x in range(amount): worker = self.worker_class(self.app_info, self.active_queue, self.monitor_queue) worker.setDaemon(True) self.threads.add(worker) worker.start() def shrink(self, amount=1): if __debug__: log.debug("Shrinking by %i." % amount) self.check_for_dead_threads += amount for x in range(amount): self.active_queue.put(None) def dynamic_resize(self): if (self.max_threads > self.min_threads or self.max_threads == 0): if self.check_for_dead_threads > 0: self.bring_out_your_dead() queueSize = self.active_queue.qsize() threadCount = len(self.threads) if __debug__: log.debug("Examining ThreadPool. %i threads and %i Q'd conxions" % (threadCount, queueSize)) if queueSize == 0 and threadCount > self.min_threads: self.shrink() elif queueSize > self.grow_threshold: self.grow(queueSize) # Monolithic build...end of module: rocket/threadpool.py # Monolithic build...start of module: rocket/worker.py # Import System Modules import re import sys import socket import logging import traceback from wsgiref.headers import Headers from threading import Thread from datetime import datetime try: from urllib import unquote except ImportError: from urllib.parse import unquote try: from io import StringIO except ImportError: try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: from ssl import SSLError except ImportError: class SSLError(socket.error): pass # Import Package Modules # package imports removed in monolithic build # Define Constants re_SLASH = re.compile('%2F', re.IGNORECASE) re_REQUEST_LINE = re.compile(r"""^ (?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method \ # (single space) ( (?P<scheme>[^:/]+) # Scheme (://) # (?P<host>[^/]+) # Host )? # (?P<path>(\*|/[^ \?]*)) # Path (\? (?P<query_string>[^ ]*))? # Query String \ # (single space) (?P<protocol>HTTPS?/1\.[01]) # Protocol $ """, re.X) LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s' RESPONSE = '''\ %s %s Content-Length: %i Content-Type: %s %s ''' if IS_JYTHON: HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT']) class Worker(Thread): """The Worker class is a base class responsible for receiving connections and (a subclass) will run an application to process the the connection """ def __init__(self, app_info, active_queue, monitor_queue, *args, **kwargs): Thread.__init__(self, *args, **kwargs) # Instance Variables self.app_info = app_info self.active_queue = active_queue self.monitor_queue = monitor_queue self.size = 0 self.status = "200 OK" self.closeConnection = True self.request_line = "" self.protocol = 'HTTP/1.1' # Request Log self.req_log = logging.getLogger('Rocket.Requests') self.req_log.addHandler(NullHandler()) # Error Log self.err_log = logging.getLogger('Rocket.Errors.' + self.getName()) self.err_log.addHandler(NullHandler()) def _handleError(self, typ, val, tb): if typ == SSLError: if 'timed out' in str(val.args[0]): typ = SocketTimeout if typ == SocketTimeout: if __debug__: self.err_log.debug('Socket timed out') self.monitor_queue.put(self.conn) return True if typ == SocketClosed: self.closeConnection = True if __debug__: self.err_log.debug('Client closed socket') return False if typ == BadRequest: self.closeConnection = True if __debug__: self.err_log.debug('Client sent a bad request') return True if typ == socket.error: self.closeConnection = True if val.args[0] in IGNORE_ERRORS_ON_CLOSE: if __debug__: self.err_log.debug('Ignorable socket Error received...' 'closing connection.') return False else: self.status = "999 Utter Server Failure" tb_fmt = traceback.format_exception(typ, val, tb) self.err_log.error('Unhandled Error when serving ' 'connection:\n' + '\n'.join(tb_fmt)) return False self.closeConnection = True tb_fmt = traceback.format_exception(typ, val, tb) self.err_log.error('\n'.join(tb_fmt)) self.send_response('500 Server Error') return False def run(self): if __debug__: self.err_log.debug('Entering main loop.') # Enter thread main loop while True: conn = self.active_queue.get() if not conn: # A non-client is a signal to die if __debug__: self.err_log.debug('Received a death threat.') return conn if isinstance(conn, tuple): conn = Connection(*conn) self.conn = conn if conn.ssl != conn.secure: self.err_log.info('Received HTTP connection on HTTPS port.') self.send_response('400 Bad Request') self.closeConnection = True conn.close() continue else: if __debug__: self.err_log.debug('Received a connection.') self.closeConnection = False # Enter connection serve loop while True: if __debug__: self.err_log.debug('Serving a request') try: self.run_app(conn) except: exc = sys.exc_info() handled = self._handleError(*exc) if handled: break finally: if self.request_line: log_info = dict(client_ip=conn.client_addr, time=datetime.now().strftime('%c'), status=self.status.split(' ')[0], size=self.size, request_line=self.request_line) self.req_log.info(LOG_LINE % log_info) if self.closeConnection: try: conn.close() except: self.err_log.error(str(traceback.format_exc())) break def run_app(self, conn): # Must be overridden with a method reads the request from the socket # and sends a response. self.closeConnection = True raise NotImplementedError('Overload this method!') def send_response(self, status): stat_msg = status.split(' ', 1)[1] msg = RESPONSE % (self.protocol, status, len(stat_msg), 'text/plain', stat_msg) try: self.conn.sendall(b(msg)) except socket.timeout: self.closeConnection = True msg = 'Tried to send "%s" to client but received timeout error' self.err_log.error(msg % status) except socket.error: self.closeConnection = True msg = 'Tried to send "%s" to client but received socket error' self.err_log.error(msg % status) def read_request_line(self, sock_file): self.request_line = '' try: # Grab the request line d = sock_file.readline() if PY3K: d = d.decode('ISO-8859-1') if d == '\r\n': # Allow an extra NEWLINE at the beginning per HTTP 1.1 spec if __debug__: self.err_log.debug('Client sent newline') d = sock_file.readline() if PY3K: d = d.decode('ISO-8859-1') except socket.timeout: raise SocketTimeout('Socket timed out before request.') except TypeError: raise SocketClosed( 'SSL bug caused closure of socket. See ' '"https://groups.google.com/d/topic/web2py/P_Gw0JxWzCs".') d = d.strip() if not d: if __debug__: self.err_log.debug( 'Client did not send a recognizable request.') raise SocketClosed('Client closed socket.') self.request_line = d # NOTE: I've replaced the traditional method of procedurally breaking # apart the request line with a (rather unsightly) regular expression. # However, Java's regexp support sucks so bad that it actually takes # longer in Jython to process the regexp than procedurally. So I've # left the old code here for Jython's sake...for now. if IS_JYTHON: return self._read_request_line_jython(d) match = re_REQUEST_LINE.match(d) if not match: self.send_response('400 Bad Request') raise BadRequest req = match.groupdict() for k, v in req.iteritems(): if not v: req[k] = "" if k == 'path': req['path'] = r'%2F'.join( [unquote(x) for x in re_SLASH.split(v)]) self.protocol = req['protocol'] return req def _read_request_line_jython(self, d): d = d.strip() try: method, uri, proto = d.split(' ') if not proto.startswith('HTTP') or \ proto[-3:] not in ('1.0', '1.1') or \ method not in HTTP_METHODS: self.send_response('400 Bad Request') raise BadRequest except ValueError: self.send_response('400 Bad Request') raise BadRequest req = dict(method=method, protocol=proto) scheme = '' host = '' if uri == '*' or uri.startswith('/'): path = uri elif '://' in uri: scheme, rest = uri.split('://') host, path = rest.split('/', 1) path = '/' + path else: self.send_response('400 Bad Request') raise BadRequest query_string = '' if '?' in path: path, query_string = path.split('?', 1) path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)]) req.update(path=path, query_string=query_string, scheme=scheme.lower(), host=host) return req def read_headers(self, sock_file): try: headers = dict() lname = None lval = None while True: l = sock_file.readline() if PY3K: try: l = str(l, 'ISO-8859-1') except UnicodeDecodeError: self.err_log.warning( 'Client sent invalid header: ' + repr(l)) if l.strip().replace('\0', '') == '': break if l[0] in ' \t' and lname: # Some headers take more than one line lval += ' ' + l.strip() else: # HTTP header values are latin-1 encoded l = l.split(':', 1) # HTTP header names are us-ascii encoded lname = l[0].strip().upper().replace('-', '_') lval = l[-1].strip() headers[str(lname)] = str(lval) except socket.timeout: raise SocketTimeout("Socket timed out before request.") return headers class SocketTimeout(Exception): "Exception for when a socket times out between requests." pass class BadRequest(Exception): "Exception for when a client sends an incomprehensible request." pass class SocketClosed(Exception): "Exception for when a socket is closed by the client." pass class ChunkedReader(object): def __init__(self, sock_file): self.stream = sock_file self.chunk_size = 0 def _read_header(self): chunk_len = "" try: while "" == chunk_len: chunk_len = self.stream.readline().strip() return int(chunk_len, 16) except ValueError: return 0 def read(self, size): data = b('') chunk_size = self.chunk_size while size: if not chunk_size: chunk_size = self._read_header() if size < chunk_size: data += self.stream.read(size) chunk_size -= size break else: if not chunk_size: break data += self.stream.read(chunk_size) size -= chunk_size chunk_size = 0 self.chunk_size = chunk_size return data def readline(self): data = b('') c = self.read(1) while c and c != b('\n'): data += c c = self.read(1) data += c return data def readlines(self): yield self.readline() def get_method(method): methods = dict(wsgi=WSGIWorker) return methods[method.lower()] # Monolithic build...end of module: rocket/worker.py # Monolithic build...start of module: rocket/methods/__init__.py # Monolithic build...end of module: rocket/methods/__init__.py # Monolithic build...start of module: rocket/methods/wsgi.py # Import System Modules import sys import socket from wsgiref.headers import Headers from wsgiref.util import FileWrapper # Import Package Modules # package imports removed in monolithic build if PY3K: from email.utils import formatdate else: # Caps Utils for Py2.4 compatibility from email.Utils import formatdate # Define Constants NEWLINE = b('\r\n') HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s''' BASE_ENV = {'SERVER_NAME': SERVER_NAME, 'SCRIPT_NAME': '', # Direct call WSGI does not need a name 'wsgi.errors': sys.stderr, 'wsgi.version': (1, 0), 'wsgi.multiprocess': False, 'wsgi.run_once': False, 'wsgi.file_wrapper': FileWrapper } class WSGIWorker(Worker): def __init__(self, *args, **kwargs): """Builds some instance variables that will last the life of the thread.""" Worker.__init__(self, *args, **kwargs) if isinstance(self.app_info, dict): multithreaded = self.app_info.get('max_threads') != 1 else: multithreaded = False self.base_environ = dict( {'SERVER_SOFTWARE': self.app_info['server_software'], 'wsgi.multithread': multithreaded, }) self.base_environ.update(BASE_ENV) # Grab our application self.app = self.app_info.get('wsgi_app') if not hasattr(self.app, "__call__"): raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app)) # Enable futures if has_futures and self.app_info.get('futures'): executor = self.app_info['executor'] self.base_environ.update({"wsgiorg.executor": executor, "wsgiorg.futures": executor.futures}) def build_environ(self, sock_file, conn): """ Build the execution environment. """ # Grab the request line request = self.read_request_line(sock_file) # Copy the Base Environment environ = self.base_environ.copy() # Grab the headers for k, v in self.read_headers(sock_file).iteritems(): environ[str('HTTP_' + k)] = v # Add CGI Variables environ['REQUEST_METHOD'] = request['method'] environ['PATH_INFO'] = request['path'] environ['SERVER_PROTOCOL'] = request['protocol'] environ['SERVER_PORT'] = str(conn.server_port) environ['REMOTE_PORT'] = str(conn.client_port) environ['REMOTE_ADDR'] = str(conn.client_addr) environ['QUERY_STRING'] = request['query_string'] if 'HTTP_CONTENT_LENGTH' in environ: environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH'] if 'HTTP_CONTENT_TYPE' in environ: environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE'] # Save the request method for later self.request_method = environ['REQUEST_METHOD'] # Add Dynamic WSGI Variables if conn.ssl: environ['wsgi.url_scheme'] = 'https' environ['HTTPS'] = 'on' try: peercert = conn.socket.getpeercert(binary_form=True) environ['SSL_CLIENT_RAW_CERT'] = \ peercert and ssl.DER_cert_to_PEM_cert(peercert) except Exception: print sys.exc_info()[1] else: environ['wsgi.url_scheme'] = 'http' if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked': environ['wsgi.input'] = ChunkedReader(sock_file) else: environ['wsgi.input'] = sock_file return environ def send_headers(self, data, sections): h_set = self.header_set # Does the app want us to send output chunked? self.chunked = h_set.get('Transfer-Encoding', '').lower() == 'chunked' # Add a Date header if it's not there already if not 'Date' in h_set: h_set['Date'] = formatdate(usegmt=True) # Add a Server header if it's not there already if not 'Server' in h_set: h_set['Server'] = HTTP_SERVER_SOFTWARE if 'Content-Length' in h_set: self.size = int(h_set['Content-Length']) else: s = int(self.status.split(' ')[0]) if (s < 200 or s not in (204, 205, 304)) and not self.chunked: if sections == 1 or self.protocol != 'HTTP/1.1': # Add a Content-Length header because it's not there self.size = len(data) h_set['Content-Length'] = str(self.size) else: # If they sent us more than one section, we blow chunks h_set['Transfer-Encoding'] = 'Chunked' self.chunked = True if __debug__: self.err_log.debug('Adding header...' 'Transfer-Encoding: Chunked') if 'Connection' not in h_set: # If the application did not provide a connection header, # fill it in client_conn = self.environ.get('HTTP_CONNECTION', '').lower() if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1': # HTTP = 1.1 defaults to keep-alive connections if client_conn: h_set['Connection'] = client_conn else: h_set['Connection'] = 'keep-alive' else: # HTTP < 1.1 supports keep-alive but it's quirky # so we don't support it h_set['Connection'] = 'close' # Close our connection if we need to. self.closeConnection = h_set.get('Connection', '').lower() == 'close' # Build our output headers header_data = HEADER_RESPONSE % (self.status, str(h_set)) # Send the headers if __debug__: self.err_log.debug('Sending Headers: %s' % repr(header_data)) self.conn.sendall(b(header_data)) self.headers_sent = True def write_warning(self, data, sections=None): self.err_log.warning('WSGI app called write method directly. This is ' 'deprecated behavior. Please update your app.') return self.write(data, sections) def write(self, data, sections=None): """ Write the data to the output socket. """ if self.error[0]: self.status = self.error[0] data = b(self.error[1]) if not self.headers_sent: self.send_headers(data, sections) if self.request_method != 'HEAD': try: if self.chunked: self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data))) else: self.conn.sendall(data) except socket.timeout: self.closeConnection = True except socket.error: # But some clients will close the connection before that # resulting in a socket error. self.closeConnection = True def start_response(self, status, response_headers, exc_info=None): """ Store the HTTP status and headers to be sent when self.write is called. """ if exc_info: try: if self.headers_sent: # Re-raise original exception if headers sent # because this violates WSGI specification. raise finally: exc_info = None elif self.header_set: raise AssertionError("Headers already set!") if PY3K and not isinstance(status, str): self.status = str(status, 'ISO-8859-1') else: self.status = status # Make sure headers are bytes objects try: self.header_set = Headers(response_headers) except UnicodeDecodeError: self.error = ('500 Internal Server Error', 'HTTP Headers should be bytes') self.err_log.error('Received HTTP Headers from client that contain' ' invalid characters for Latin-1 encoding.') return self.write_warning def run_app(self, conn): self.size = 0 self.header_set = Headers([]) self.headers_sent = False self.error = (None, None) self.chunked = False sections = None output = None if __debug__: self.err_log.debug('Getting sock_file') # Build our file-like object if PY3K: sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE) else: sock_file = conn.makefile(BUF_SIZE) try: # Read the headers and build our WSGI environment self.environ = environ = self.build_environ(sock_file, conn) # Handle 100 Continue if environ.get('HTTP_EXPECT', '') == '100-continue': res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n' conn.sendall(b(res)) # Send it to our WSGI application output = self.app(environ, self.start_response) if not hasattr(output, '__len__') and not hasattr(output, '__iter__'): self.error = ('500 Internal Server Error', 'WSGI applications must return a list or ' 'generator type.') if hasattr(output, '__len__'): sections = len(output) for data in output: # Don't send headers until body appears if data: self.write(data, sections) if self.chunked: # If chunked, send our final chunk length self.conn.sendall(b('0\r\n\r\n')) elif not self.headers_sent: # Send headers if the body was empty self.send_headers('', sections) # Don't capture exceptions here. The Worker class handles # them appropriately. finally: if __debug__: self.err_log.debug('Finally closing output and sock_file') if hasattr(output, 'close'): output.close() sock_file.close() # Monolithic build...end of module: rocket/methods/wsgi.py
Python
#!/bin/python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import base64 import cPickle import datetime import thread import logging import sys import glob import os import re import time import traceback import smtplib import urllib import urllib2 import Cookie import cStringIO from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string, Charset from gluon.contenttype import contenttype from gluon.storage import Storage, StorageList, Settings, Messages from gluon.utils import web2py_uuid from gluon.fileutils import read_file, check_credentials from gluon import * from gluon.contrib.autolinks import expand_one from gluon.contrib.markmin.markmin2html import \ replace_at_urls, replace_autolinks, replace_components from gluon.dal import Row, Set, Query import gluon.serializers as serializers try: # try stdlib (Python 2.6) import json as json_parser except ImportError: try: # try external module import simplejson as json_parser except: # fallback to pure-Python module import contrib.simplejson as json_parser __all__ = ['Mail', 'Auth', 'Recaptcha', 'Crud', 'Service', 'Wiki', 'PluginManager', 'fetch', 'geocode', 'prettydate'] ### mind there are two loggers here (logger and crud.settings.logger)! logger = logging.getLogger("web2py") DEFAULT = lambda: None def getarg(position, default=None): args = current.request.args if position < 0 and len(args) >= -position: return args[position] elif position >= 0 and len(args) > position: return args[position] else: return default def callback(actions, form, tablename=None): if actions: if tablename and isinstance(actions, dict): actions = actions.get(tablename, []) if not isinstance(actions, (list, tuple)): actions = [actions] [action(form) for action in actions] def validators(*a): b = [] for item in a: if isinstance(item, (list, tuple)): b = b + list(item) else: b.append(item) return b def call_or_redirect(f, *args): if callable(f): redirect(f(*args)) else: redirect(f) def replace_id(url, form): if url: url = url.replace('[id]', str(form.vars.id)) if url[0] == '/' or url[:4] == 'http': return url return URL(url) class Mail(object): """ Class for configuring and sending emails with alternative text / html body, multiple attachments and encryption support Works with SMTP and Google App Engine. """ class Attachment(MIMEBase.MIMEBase): """ Email attachment Arguments: payload: path to file or file-like object with read() method filename: name of the attachment stored in message; if set to None, it will be fetched from payload path; file-like object payload must have explicit filename specified content_id: id of the attachment; automatically contained within < and > content_type: content type of the attachment; if set to None, it will be fetched from filename using gluon.contenttype module encoding: encoding of all strings passed to this function (except attachment body) Content ID is used to identify attachments within the html body; in example, attached image with content ID 'photo' may be used in html message as a source of img tag <img src="cid:photo" />. Examples: #Create attachment from text file: attachment = Mail.Attachment('/path/to/file.txt') Content-Type: text/plain MIME-Version: 1.0 Content-Disposition: attachment; filename="file.txt" Content-Transfer-Encoding: base64 SOMEBASE64CONTENT= #Create attachment from image file with custom filename and cid: attachment = Mail.Attachment('/path/to/file.png', filename='photo.png', content_id='photo') Content-Type: image/png MIME-Version: 1.0 Content-Disposition: attachment; filename="photo.png" Content-Id: <photo> Content-Transfer-Encoding: base64 SOMEOTHERBASE64CONTENT= """ def __init__( self, payload, filename=None, content_id=None, content_type=None, encoding='utf-8'): if isinstance(payload, str): if filename is None: filename = os.path.basename(payload) payload = read_file(payload, 'rb') else: if filename is None: raise Exception('Missing attachment name') payload = payload.read() filename = filename.encode(encoding) if content_type is None: content_type = contenttype(filename) self.my_filename = filename self.my_payload = payload MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1)) self.set_payload(payload) self['Content-Disposition'] = 'attachment; filename="%s"' % filename if not content_id is None: self['Content-Id'] = '<%s>' % content_id.encode(encoding) Encoders.encode_base64(self) def __init__(self, server=None, sender=None, login=None, tls=True): """ Main Mail object Arguments: server: SMTP server address in address:port notation sender: sender email address login: sender login name and password in login:password notation or None if no authentication is required tls: enables/disables encryption (True by default) In Google App Engine use: server='gae' For sake of backward compatibility all fields are optional and default to None, however, to be able to send emails at least server and sender must be specified. They are available under following fields: mail.settings.server mail.settings.sender mail.settings.login When server is 'logging', email is logged but not sent (debug mode) Optionally you can use PGP encryption or X509: mail.settings.cipher_type = None mail.settings.gpg_home = None mail.settings.sign = True mail.settings.sign_passphrase = None mail.settings.encrypt = True mail.settings.x509_sign_keyfile = None mail.settings.x509_sign_certfile = None mail.settings.x509_nocerts = False mail.settings.x509_crypt_certfiles = None cipher_type : None gpg - need a python-pyme package and gpgme lib x509 - smime gpg_home : you can set a GNUPGHOME environment variable to specify home of gnupg sign : sign the message (True or False) sign_passphrase : passphrase for key signing encrypt : encrypt the message ... x509 only ... x509_sign_keyfile : the signers private key filename (PEM format) x509_sign_certfile: the signers certificate filename (PEM format) x509_nocerts : if True then no attached certificate in mail x509_crypt_certfiles: the certificates file to encrypt the messages with can be a file name or a list of file names (PEM format) Examples: #Create Mail object with authentication data for remote server: mail = Mail('example.com:25', 'me@example.com', 'me:password') """ settings = self.settings = Settings() settings.server = server settings.sender = sender settings.login = login settings.tls = tls settings.hostname = None settings.ssl = False settings.cipher_type = None settings.gpg_home = None settings.sign = True settings.sign_passphrase = None settings.encrypt = True settings.x509_sign_keyfile = None settings.x509_sign_certfile = None settings.x509_nocerts = False settings.x509_crypt_certfiles = None settings.debug = False settings.lock_keys = True self.result = {} self.error = None def send( self, to, subject = '[no subject]', message = '[no message]', attachments=None, cc=None, bcc=None, reply_to=None, sender=None, encoding='utf-8', raw=False, headers={} ): """ Sends an email using data specified in constructor Arguments: to: list or tuple of receiver addresses; will also accept single object subject: subject of the email message: email body text; depends on type of passed object: if 2-list or 2-tuple is passed: first element will be source of plain text while second of html text; otherwise: object will be the only source of plain text and html source will be set to None; If text or html source is: None: content part will be ignored, string: content part will be set to it, file-like object: content part will be fetched from it using it's read() method attachments: list or tuple of Mail.Attachment objects; will also accept single object cc: list or tuple of carbon copy receiver addresses; will also accept single object bcc: list or tuple of blind carbon copy receiver addresses; will also accept single object reply_to: address to which reply should be composed encoding: encoding of all strings passed to this method (including message bodies) headers: dictionary of headers to refine the headers just before sending mail, e.g. {'Return-Path' : 'bounces@example.org'} Examples: #Send plain text message to single address: mail.send('you@example.com', 'Message subject', 'Plain text body of the message') #Send html message to single address: mail.send('you@example.com', 'Message subject', '<html>Plain text body of the message</html>') #Send text and html message to three addresses (two in cc): mail.send('you@example.com', 'Message subject', ('Plain text body', '<html>html body</html>'), cc=['other1@example.com', 'other2@example.com']) #Send html only message with image attachment available from the message by 'photo' content id: mail.send('you@example.com', 'Message subject', (None, '<html><img src="cid:photo" /></html>'), Mail.Attachment('/path/to/photo.jpg' content_id='photo')) #Send email with two attachments and no body text mail.send('you@example.com, 'Message subject', None, [Mail.Attachment('/path/to/fist.file'), Mail.Attachment('/path/to/second.file')]) Returns True on success, False on failure. Before return, method updates two object's fields: self.result: return value of smtplib.SMTP.sendmail() or GAE's mail.send_mail() method self.error: Exception message or None if above was successful """ # We don't want to use base64 encoding for unicode mail Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8') def encode_header(key): if [c for c in key if 32 > ord(c) or ord(c) > 127]: return Header.Header(key.encode('utf-8'), 'utf-8') else: return key # encoded or raw text def encoded_or_raw(text): if raw: text = encode_header(text) return text sender = sender or self.settings.sender if not isinstance(self.settings.server, str): raise Exception('Server address not specified') if not isinstance(sender, str): raise Exception('Sender address not specified') if not raw and attachments: # Use multipart/mixed if there is attachments payload_in = MIMEMultipart.MIMEMultipart('mixed') elif raw: # no encoding configuration for raw messages if not isinstance(message, basestring): message = message.read() if isinstance(message, unicode): text = message.encode('utf-8') elif not encoding == 'utf-8': text = message.decode(encoding).encode('utf-8') else: text = message # No charset passed to avoid transport encoding # NOTE: some unicode encoded strings will produce # unreadable mail contents. payload_in = MIMEText.MIMEText(text) if to: if not isinstance(to, (list, tuple)): to = [to] else: raise Exception('Target receiver address not specified') if cc: if not isinstance(cc, (list, tuple)): cc = [cc] if bcc: if not isinstance(bcc, (list, tuple)): bcc = [bcc] if message is None: text = html = None elif isinstance(message, (list, tuple)): text, html = message elif message.strip().startswith('<html') and \ message.strip().endswith('</html>'): text = self.settings.server == 'gae' and message or None html = message else: text = message html = None if (not text is None or not html is None) and (not raw): if not text is None: if not isinstance(text, basestring): text = text.read() if isinstance(text, unicode): text = text.encode('utf-8') elif not encoding == 'utf-8': text = text.decode(encoding).encode('utf-8') if not html is None: if not isinstance(html, basestring): html = html.read() if isinstance(html, unicode): html = html.encode('utf-8') elif not encoding == 'utf-8': html = html.decode(encoding).encode('utf-8') # Construct mime part only if needed if text and html: # We have text and html we need multipart/alternative attachment = MIMEMultipart.MIMEMultipart('alternative') attachment.attach(MIMEText.MIMEText(text, _charset='utf-8')) attachment.attach( MIMEText.MIMEText(html, 'html', _charset='utf-8')) elif text: attachment = MIMEText.MIMEText(text, _charset='utf-8') elif html: attachment = \ MIMEText.MIMEText(html, 'html', _charset='utf-8') if attachments: # If there is attachments put text and html into # multipart/mixed payload_in.attach(attachment) else: # No attachments no multipart/mixed payload_in = attachment if (attachments is None) or raw: pass elif isinstance(attachments, (list, tuple)): for attachment in attachments: payload_in.attach(attachment) else: payload_in.attach(attachments) ####################################################### # CIPHER # ####################################################### cipher_type = self.settings.cipher_type sign = self.settings.sign sign_passphrase = self.settings.sign_passphrase encrypt = self.settings.encrypt ####################################################### # GPGME # ####################################################### if cipher_type == 'gpg': if self.settings.gpg_home: # Set GNUPGHOME environment variable to set home of gnupg import os os.environ['GNUPGHOME'] = self.settings.gpg_home if not sign and not encrypt: self.error = "No sign and no encrypt is set but cipher type to gpg" return False # need a python-pyme package and gpgme lib from pyme import core, errors from pyme.constants.sig import mode ############################################ # sign # ############################################ if sign: import string core.check_version(None) pin = string.replace(payload_in.as_string(), '\n', '\r\n') plain = core.Data(pin) sig = core.Data() c = core.Context() c.set_armor(1) c.signers_clear() # search for signing key for From: for sigkey in c.op_keylist_all(sender, 1): if sigkey.can_sign: c.signers_add(sigkey) if not c.signers_enum(0): self.error = 'No key for signing [%s]' % sender return False c.set_passphrase_cb(lambda x, y, z: sign_passphrase) try: # make a signature c.op_sign(plain, sig, mode.DETACH) sig.seek(0, 0) # make it part of the email payload = MIMEMultipart.MIMEMultipart('signed', boundary=None, _subparts=None, **dict( micalg="pgp-sha1", protocol="application/pgp-signature")) # insert the origin payload payload.attach(payload_in) # insert the detached signature p = MIMEBase.MIMEBase("application", 'pgp-signature') p.set_payload(sig.read()) payload.attach(p) # it's just a trick to handle the no encryption case payload_in = payload except errors.GPGMEError, ex: self.error = "GPG error: %s" % ex.getstring() return False ############################################ # encrypt # ############################################ if encrypt: core.check_version(None) plain = core.Data(payload_in.as_string()) cipher = core.Data() c = core.Context() c.set_armor(1) # collect the public keys for encryption recipients = [] rec = to[:] if cc: rec.extend(cc) if bcc: rec.extend(bcc) for addr in rec: c.op_keylist_start(addr, 0) r = c.op_keylist_next() if r is None: self.error = 'No key for [%s]' % addr return False recipients.append(r) try: # make the encryption c.op_encrypt(recipients, 1, plain, cipher) cipher.seek(0, 0) # make it a part of the email payload = MIMEMultipart.MIMEMultipart('encrypted', boundary=None, _subparts=None, **dict(protocol="application/pgp-encrypted")) p = MIMEBase.MIMEBase("application", 'pgp-encrypted') p.set_payload("Version: 1\r\n") payload.attach(p) p = MIMEBase.MIMEBase("application", 'octet-stream') p.set_payload(cipher.read()) payload.attach(p) except errors.GPGMEError, ex: self.error = "GPG error: %s" % ex.getstring() return False ####################################################### # X.509 # ####################################################### elif cipher_type == 'x509': if not sign and not encrypt: self.error = "No sign and no encrypt is set but cipher type to x509" return False x509_sign_keyfile = self.settings.x509_sign_keyfile if self.settings.x509_sign_certfile: x509_sign_certfile = self.settings.x509_sign_certfile else: # if there is no sign certfile we'll assume the # cert is in keyfile x509_sign_certfile = self.settings.x509_sign_keyfile # crypt certfiles could be a string or a list x509_crypt_certfiles = self.settings.x509_crypt_certfiles x509_nocerts = self.settings.x509_nocerts # need m2crypto try: from M2Crypto import BIO, SMIME, X509 except Exception, e: self.error = "Can't load M2Crypto module" return False msg_bio = BIO.MemoryBuffer(payload_in.as_string()) s = SMIME.SMIME() # SIGN if sign: #key for signing try: s.load_key(x509_sign_keyfile, x509_sign_certfile, callback=lambda x: sign_passphrase) except Exception, e: self.error = "Something went wrong on certificate / private key loading: <%s>" % str(e) return False try: if x509_nocerts: flags = SMIME.PKCS7_NOCERTS else: flags = 0 if not encrypt: flags += SMIME.PKCS7_DETACHED p7 = s.sign(msg_bio, flags=flags) msg_bio = BIO.MemoryBuffer(payload_in.as_string( )) # Recreate coz sign() has consumed it. except Exception, e: self.error = "Something went wrong on signing: <%s> %s" % ( str(e), str(flags)) return False # ENCRYPT if encrypt: try: sk = X509.X509_Stack() if not isinstance(x509_crypt_certfiles, (list, tuple)): x509_crypt_certfiles = [x509_crypt_certfiles] # make an encryption cert's stack for x in x509_crypt_certfiles: sk.push(X509.load_cert(x)) s.set_x509_stack(sk) s.set_cipher(SMIME.Cipher('des_ede3_cbc')) tmp_bio = BIO.MemoryBuffer() if sign: s.write(tmp_bio, p7) else: tmp_bio.write(payload_in.as_string()) p7 = s.encrypt(tmp_bio) except Exception, e: self.error = "Something went wrong on encrypting: <%s>" % str(e) return False # Final stage in sign and encryption out = BIO.MemoryBuffer() if encrypt: s.write(out, p7) else: if sign: s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED) else: out.write('\r\n') out.write(payload_in.as_string()) out.close() st = str(out.read()) payload = message_from_string(st) else: # no cryptography process as usual payload = payload_in payload['From'] = encoded_or_raw(sender.decode(encoding)) origTo = to[:] if to: payload['To'] = encoded_or_raw(', '.join(to).decode(encoding)) if reply_to: payload['Reply-To'] = encoded_or_raw(reply_to.decode(encoding)) if cc: payload['Cc'] = encoded_or_raw(', '.join(cc).decode(encoding)) to.extend(cc) if bcc: to.extend(bcc) payload['Subject'] = encoded_or_raw(subject.decode(encoding)) payload['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) for k, v in headers.iteritems(): payload[k] = encoded_or_raw(v.decode(encoding)) result = {} try: if self.settings.server == 'logging': logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % ('-' * 40, sender, ', '.join(to), subject, text or html, '-' * 40)) elif self.settings.server == 'gae': xcc = dict() if cc: xcc['cc'] = cc if bcc: xcc['bcc'] = bcc if reply_to: xcc['reply_to'] = reply_to from google.appengine.api import mail attachments = attachments and [(a.my_filename, a.my_payload) for a in attachments if not raw] if attachments: result = mail.send_mail( sender=sender, to=origTo, subject=subject, body=text, html=html, attachments=attachments, **xcc) elif html and (not raw): result = mail.send_mail( sender=sender, to=origTo, subject=subject, body=text, html=html, **xcc) else: result = mail.send_mail( sender=sender, to=origTo, subject=subject, body=text, **xcc) else: smtp_args = self.settings.server.split(':') if self.settings.ssl: server = smtplib.SMTP_SSL(*smtp_args) else: server = smtplib.SMTP(*smtp_args) if self.settings.tls and not self.settings.ssl: server.ehlo(self.settings.hostname) server.starttls() server.ehlo(self.settings.hostname) if self.settings.login: server.login(*self.settings.login.split(':', 1)) result = server.sendmail( sender, to, payload.as_string()) server.quit() except Exception, e: logger.warn('Mail.send failure:%s' % e) self.result = result self.error = e return False self.result = result self.error = None return True class Recaptcha(DIV): """ Usage: form = FORM(Recaptcha(public_key='...',private_key='...')) or form = SQLFORM(...) form.append(Recaptcha(public_key='...',private_key='...')) """ API_SSL_SERVER = 'https://www.google.com/recaptcha/api' API_SERVER = 'http://www.google.com/recaptcha/api' VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify' def __init__( self, request=None, public_key='', private_key='', use_ssl=False, error=None, error_message='invalid', label='Verify:', options='' ): self.request_vars = request and request.vars or current.request.vars self.remote_addr = request.env.remote_addr self.public_key = public_key self.private_key = private_key self.use_ssl = use_ssl self.error = error self.errors = Storage() self.error_message = error_message self.components = [] self.attributes = {} self.label = label self.options = options self.comment = '' def _validate(self): # for local testing: recaptcha_challenge_field = \ self.request_vars.recaptcha_challenge_field recaptcha_response_field = \ self.request_vars.recaptcha_response_field private_key = self.private_key remoteip = self.remote_addr if not (recaptcha_response_field and recaptcha_challenge_field and len(recaptcha_response_field) and len(recaptcha_challenge_field)): self.errors['captcha'] = self.error_message return False params = urllib.urlencode({ 'privatekey': private_key, 'remoteip': remoteip, 'challenge': recaptcha_challenge_field, 'response': recaptcha_response_field, }) request = urllib2.Request( url=self.VERIFY_SERVER, data=params, headers={'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python'}) httpresp = urllib2.urlopen(request) return_values = httpresp.read().splitlines() httpresp.close() return_code = return_values[0] if return_code == 'true': del self.request_vars.recaptcha_challenge_field del self.request_vars.recaptcha_response_field self.request_vars.captcha = '' return True else: # In case we get an error code, store it so we can get an error message # from the /api/challenge URL as described in the reCAPTCHA api docs. self.error = return_values[1] self.errors['captcha'] = self.error_message return False def xml(self): public_key = self.public_key use_ssl = self.use_ssl error_param = '' if self.error: error_param = '&error=%s' % self.error if use_ssl: server = self.API_SSL_SERVER else: server = self.API_SERVER captcha = DIV( SCRIPT("var RecaptchaOptions = {%s};" % self.options), SCRIPT(_type="text/javascript", _src="%s/challenge?k=%s%s" % (server, public_key, error_param)), TAG.noscript( IFRAME( _src="%s/noscript?k=%s%s" % ( server, public_key, error_param), _height="300", _width="500", _frameborder="0"), BR(), INPUT( _type='hidden', _name='recaptcha_response_field', _value='manual_challenge')), _id='recaptcha') if not self.errors.captcha: return XML(captcha).xml() else: captcha.append(DIV(self.errors['captcha'], _class='error')) return XML(captcha).xml() def addrow(form, a, b, c, style, _id, position=-1): if style == "divs": form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id=_id)) elif style == "table2cols": form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'), TD(c, _class='w2p_fc'))) form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'), _colspan=2, _id=_id)) elif style == "ul": form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id=_id)) elif style == "bootstrap": form[0].insert(position, DIV(LABEL(a, _class='control-label'), DIV(b, SPAN(c, _class='inline-help'), _class='controls'), _class='control-group', _id=_id)) else: form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'), TD(b, _class='w2p_fw'), TD(c, _class='w2p_fc'), _id=_id)) class Auth(object): default_settings = dict( hideerror=False, password_min_length=4, cas_maps=None, reset_password_requires_verification=False, registration_requires_verification=False, registration_requires_approval=False, login_after_registration=False, login_after_password_change=True, alternate_requires_registration=False, create_user_groups="user_%(id)s", everybody_group_id=None, login_captcha=None, register_captcha=None, retrieve_username_captcha=None, retrieve_password_captcha=None, captcha=None, expiration=3600, # one hour long_expiration=3600 * 30 * 24, # one month remember_me_form=True, allow_basic_login=False, allow_basic_login_only=False, on_failed_authentication=lambda x: redirect(x), formstyle="table3cols", label_separator=": ", allow_delete_accounts=False, password_field='password', table_user_name='auth_user', table_group_name='auth_group', table_membership_name='auth_membership', table_permission_name='auth_permission', table_event_name='auth_event', table_cas_name='auth_cas', table_user=None, table_group=None, table_membership=None, table_permission=None, table_event=None, table_cas=None, showid=False, use_username=False, login_email_validate=True, login_userfield=None, logout_onlogout=None, register_fields=None, register_verify_password=True, profile_fields=None, email_case_sensitive=True, username_case_sensitive=True, update_fields = ['email'], ondelete="CASCADE", client_side = True, wiki = Settings(), ) # ## these are messages that can be customized default_messages = dict( login_button='Login', register_button='Register', password_reset_button='Request reset password', password_change_button='Change password', profile_save_button='Apply changes', submit_button='Submit', verify_password='Verify Password', delete_label='Check to delete', function_disabled='Function disabled', access_denied='Insufficient privileges', registration_verifying='Registration needs verification', registration_pending='Registration is pending approval', login_disabled='Login disabled by administrator', logged_in='Logged in', email_sent='Email sent', unable_to_send_email='Unable to send email', email_verified='Email verified', logged_out='Logged out', registration_successful='Registration successful', invalid_email='Invalid email', unable_send_email='Unable to send email', invalid_login='Invalid login', invalid_user='Invalid user', invalid_password='Invalid password', is_empty="Cannot be empty", mismatched_password="Password fields don't match", verify_email='Click on the link %(link)s to verify your email', verify_email_subject='Email verification', username_sent='Your username was emailed to you', new_password_sent='A new password was emailed to you', password_changed='Password changed', retrieve_username='Your username is: %(username)s', retrieve_username_subject='Username retrieve', retrieve_password='Your password is: %(password)s', retrieve_password_subject='Password retrieve', reset_password= 'Click on the link %(link)s to reset your password', reset_password_subject='Password reset', invalid_reset_password='Invalid reset password', profile_updated='Profile updated', new_password='New password', old_password='Old password', group_description='Group uniquely assigned to user %(id)s', register_log='User %(id)s Registered', login_log='User %(id)s Logged-in', login_failed_log=None, logout_log='User %(id)s Logged-out', profile_log='User %(id)s Profile updated', verify_email_log='User %(id)s Verification email sent', retrieve_username_log='User %(id)s Username retrieved', retrieve_password_log='User %(id)s Password retrieved', reset_password_log='User %(id)s Password reset', change_password_log='User %(id)s Password changed', add_group_log='Group %(group_id)s created', del_group_log='Group %(group_id)s deleted', add_membership_log=None, del_membership_log=None, has_membership_log=None, add_permission_log=None, del_permission_log=None, has_permission_log=None, impersonate_log='User %(id)s is impersonating %(other_id)s', label_first_name='First name', label_last_name='Last name', label_username='Username', label_email='E-mail', label_password='Password', label_registration_key='Registration key', label_reset_password_key='Reset Password key', label_registration_id='Registration identifier', label_role='Role', label_description='Description', label_user_id='User ID', label_group_id='Group ID', label_name='Name', label_table_name='Object or table name', label_record_id='Record ID', label_time_stamp='Timestamp', label_client_ip='Client IP', label_origin='Origin', label_remember_me="Remember me (for 30 days)", verify_password_comment='please input your password again', ) """ Class for authentication, authorization, role based access control. Includes: - registration and profile - login and logout - username and password retrieval - event logging - role creation and assignment - user defined group/role based permission Authentication Example: from contrib.utils import * mail=Mail() mail.settings.server='smtp.gmail.com:587' mail.settings.sender='you@somewhere.com' mail.settings.login='username:password' auth=Auth(db) auth.settings.mailer=mail # auth.settings....=... auth.define_tables() def authentication(): return dict(form=auth()) exposes: - http://.../{application}/{controller}/authentication/login - http://.../{application}/{controller}/authentication/logout - http://.../{application}/{controller}/authentication/register - http://.../{application}/{controller}/authentication/verify_email - http://.../{application}/{controller}/authentication/retrieve_username - http://.../{application}/{controller}/authentication/retrieve_password - http://.../{application}/{controller}/authentication/reset_password - http://.../{application}/{controller}/authentication/profile - http://.../{application}/{controller}/authentication/change_password On registration a group with role=new_user.id is created and user is given membership of this group. You can create a group with: group_id=auth.add_group('Manager', 'can access the manage action') auth.add_permission(group_id, 'access to manage') Here \"access to manage\" is just a user defined string. You can give access to a user: auth.add_membership(group_id, user_id) If user id is omitted, the logged in user is assumed Then you can decorate any action: @auth.requires_permission('access to manage') def manage(): return dict() You can restrict a permission to a specific table: auth.add_permission(group_id, 'edit', db.sometable) @auth.requires_permission('edit', db.sometable) Or to a specific record: auth.add_permission(group_id, 'edit', db.sometable, 45) @auth.requires_permission('edit', db.sometable, 45) If authorization is not granted calls: auth.settings.on_failed_authorization Other options: auth.settings.mailer=None auth.settings.expiration=3600 # seconds ... ### these are messages that can be customized ... """ @staticmethod def get_or_create_key(filename=None, alg='sha512'): request = current.request if not filename: filename = os.path.join(request.folder, 'private', 'auth.key') if os.path.exists(filename): key = open(filename, 'r').read().strip() else: key = alg + ':' + web2py_uuid() open(filename, 'w').write(key) return key def url(self, f=None, args=None, vars=None, scheme=False): if args is None: args = [] if vars is None: vars = {} return URL(c=self.settings.controller, f=f, args=args, vars=vars, scheme=scheme) def here(self): return URL(args=current.request.args, vars=current.request.vars) def __init__(self, environment=None, db=None, mailer=True, hmac_key=None, controller='default', function='user', cas_provider=None, signature=True, secure=False): """ auth=Auth(db) - environment is there for legacy but unused (awful) - db has to be the database where to create tables for authentication - mailer=Mail(...) or None (no mailed) or True (make a mailer) - hmac_key can be a hmac_key or hmac_key=Auth.get_or_create_key() - controller (where is the user action?) - cas_provider (delegate authentication to the URL, CAS2) """ ## next two lines for backward compatibility if not db and environment and isinstance(environment, DAL): db = environment self.db = db self.environment = current request = current.request session = current.session auth = session.auth self.user_groups = auth and auth.user_groups or {} if secure: request.requires_https() if auth and auth.last_visit and auth.last_visit + \ datetime.timedelta(days=0, seconds=auth.expiration) > request.now: self.user = auth.user # this is a trick to speed up sessions if (request.now - auth.last_visit).seconds > (auth.expiration / 10): auth.last_visit = request.now else: self.user = None if session.auth: del session.auth # ## what happens after login? self.next = current.request.vars._next if isinstance(self.next, (list, tuple)): self.next = self.next[0] url_index = URL(controller, 'index') url_login = URL(controller, function, args='login') # ## what happens after registration? settings = self.settings = Settings() settings.update(Auth.default_settings) settings.update( cas_domains=[request.env.http_host], cas_provider=cas_provider, cas_actions=dict(login='login', validate='validate', servicevalidate='serviceValidate', proxyvalidate='proxyValidate', logout='logout'), extra_fields={}, actions_disabled=[], controller=controller, function=function, login_url=url_login, logged_url=URL(controller, function, args='profile'), download_url=URL(controller, 'download'), mailer=(mailer == True) and Mail() or mailer, on_failed_authorization = URL(controller, function, args='not_authorized'), login_next = url_index, login_onvalidation = [], login_onaccept = [], login_onfail = [], login_methods = [self], login_form = self, logout_next = url_index, logout_onlogout = None, register_next = url_index, register_onvalidation = [], register_onaccept = [], verify_email_next = url_login, verify_email_onaccept = [], profile_next = url_index, profile_onvalidation = [], profile_onaccept = [], retrieve_username_next = url_index, retrieve_password_next = url_index, request_reset_password_next = url_login, reset_password_next = url_index, change_password_next = url_index, change_password_onvalidation = [], change_password_onaccept = [], retrieve_password_onvalidation = [], reset_password_onvalidation = [], reset_password_onaccept = [], hmac_key = hmac_key, ) settings.lock_keys = True # ## these are messages that can be customized messages = self.messages = Messages(current.T) messages.update(Auth.default_messages) messages.update(ajax_failed_authentication=DIV(H4('NOT AUTHORIZED'), 'Please ', A('login', _href=self.settings.login_url + ('?_next=' + urllib.quote(current.request.env.http_web2py_component_location)) if current.request.env.http_web2py_component_location else ''), ' to view this content.', _class='not-authorized alert alert-block')) messages.lock_keys = True # for "remember me" option response = current.response if auth and auth.remember: # when user wants to be logged in for longer response.cookies[response.session_id_name]["expires"] = \ auth.expiration if signature: self.define_signature() else: self.signature = None def _get_user_id(self): "accessor for auth.user_id" return self.user and self.user.id or None user_id = property(_get_user_id, doc="user.id or None") def table_user(self): return self.db[self.settings.table_user_name] def table_group(self): return self.db[self.settings.table_group_name] def table_membership(self): return self.db[self.settings.table_membership_name] def table_permission(self): return self.db[self.settings.table_permission_name] def table_event(self): return self.db[self.settings.table_event_name] def table_cas(self): return self.db[self.settings.table_cas_name] def _HTTP(self, *a, **b): """ only used in lambda: self._HTTP(404) """ raise HTTP(*a, **b) def __call__(self): """ usage: def authentication(): return dict(form=auth()) """ request = current.request args = request.args if not args: redirect(self.url(args='login', vars=request.vars)) elif args[0] in self.settings.actions_disabled: raise HTTP(404) if args[0] in ('login', 'logout', 'register', 'verify_email', 'retrieve_username', 'retrieve_password', 'reset_password', 'request_reset_password', 'change_password', 'profile', 'groups', 'impersonate', 'not_authorized'): if len(request.args) >= 2 and args[0] == 'impersonate': return getattr(self, args[0])(request.args[1]) else: return getattr(self, args[0])() elif args[0] == 'cas' and not self.settings.cas_provider: if args(1) == self.settings.cas_actions['login']: return self.cas_login(version=2) elif args(1) == self.settings.cas_actions['validate']: return self.cas_validate(version=1) elif args(1) == self.settings.cas_actions['servicevalidate']: return self.cas_validate(version=2, proxy=False) elif args(1) == self.settings.cas_actions['proxyvalidate']: return self.cas_validate(version=2, proxy=True) elif args(1) == self.settings.cas_actions['logout']: return self.logout(next=request.vars.service or DEFAULT) else: raise HTTP(404) def navbar(self, prefix='Welcome', action=None, separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT, referrer_actions=DEFAULT, mode='default'): def Anr(*a,**b): b['_rel']='nofollow' return A(*a,**b) referrer_actions = [] if not referrer_actions else referrer_actions request = current.request asdropdown = (mode == 'dropdown') T = current.T if isinstance(prefix, str): prefix = T(prefix) if prefix: prefix = prefix.strip() + ' ' if not action: action = self.url(self.settings.function) s1, s2, s3 = separators if URL() == action: next = '' else: next = '?_next=' + urllib.quote(URL(args=request.args, vars=request.get_vars)) href = lambda function: '%s/%s%s' % (action, function, next if referrer_actions is DEFAULT or function in referrer_actions else '') if self.user_id: if user_identifier is DEFAULT: user_identifier = '%(first_name)s' if callable(user_identifier): user_identifier = user_identifier(self.user) elif ((isinstance(user_identifier, str) or type(user_identifier).__name__ == 'lazyT') and re.search(r'%\(.+\)s', user_identifier)): user_identifier = user_identifier % self.user if not user_identifier: user_identifier = '' logout = Anr(T('Logout'), _href='%s/logout?_next=%s' % (action, urllib.quote(self.settings.logout_next))) profile = Anr(T('Profile'), _href=href('profile')) password = Anr(T('Password'), _href=href('change_password')) bar = SPAN( prefix, user_identifier, s1, logout, s3, _class='auth_navbar') if asdropdown: logout = LI(Anr(I(_class='icon-off'), ' ' + T('Logout'), _href='%s/logout?_next=%s' % (action, urllib.quote(self.settings.logout_next)))) # the space before T('Logout') is intentional. It creates a gap between icon and text profile = LI(Anr(I(_class='icon-user'), ' ' + T('Profile'), _href=href('profile'))) password = LI(Anr(I(_class='icon-lock'), ' ' + T('Password'), _href=href('change_password'))) bar = UL(logout, _class='dropdown-menu') # logout will be the last item in list if not 'profile' in self.settings.actions_disabled: if not asdropdown: bar.insert(-1, s2) bar.insert(-1, profile) if not 'change_password' in self.settings.actions_disabled: if not asdropdown: bar.insert(-1, s2) bar.insert(-1, password) else: login = Anr(T('Login'), _href=href('login')) register = Anr(T('Register'), _href=href('register')) retrieve_username = Anr( T('Forgot username?'), _href=href('retrieve_username')) lost_password = Anr( T('Lost password?'), _href=href('request_reset_password')) bar = SPAN(s1, login, s3, _class='auth_navbar') if asdropdown: login = LI(Anr(I(_class='icon-off'), ' ' + T('Login'), _href=href('login'))) # the space before T('Login') is intentional. It creates a gap between icon and text register = LI(Anr(I(_class='icon-user'), ' ' + T('Register'), _href=href('register'))) retrieve_username = LI(Anr(I(_class='icon-edit'), ' ' + T( 'Forgot username?'), _href=href('retrieve_username'))) lost_password = LI(Anr(I(_class='icon-lock'), ' ' + T( 'Lost password?'), _href=href('request_reset_password'))) bar = UL(login, _class='dropdown-menu') # login will be the last item in list if not 'register' in self.settings.actions_disabled: if not asdropdown: bar.insert(-1, s2) bar.insert(-1, register) if self.settings.use_username and not 'retrieve_username' \ in self.settings.actions_disabled: if not asdropdown: bar.insert(-1, s2) bar.insert(-1, retrieve_username) if not 'request_reset_password' \ in self.settings.actions_disabled: if not asdropdown: bar.insert(-1, s2) bar.insert(-1, lost_password) if asdropdown: bar.insert(-1, LI('', _class='divider')) if self.user_id: bar = LI(Anr(prefix, user_identifier, _href='#'), bar, _class='dropdown') else: bar = LI(Anr(T('Login'), _href='#'), bar, _class='dropdown') return bar def __get_migrate(self, tablename, migrate=True): if type(migrate).__name__ == 'str': return (migrate + tablename + '.table') elif migrate == False: return False else: return True def enable_record_versioning(self, tables, archive_db=None, archive_names='%(tablename)s_archive', current_record='current_record'): """ to enable full record versioning (including auth tables): auth = Auth(db) auth.define_tables(signature=True) # define our own tables db.define_table('mything',Field('name'),auth.signature) auth.enable_record_versioning(tables=db) tables can be the db (all table) or a list of tables. only tables with modified_by and modified_on fiels (as created by auth.signature) will have versioning. Old record versions will be in table 'mything_archive' automatically defined. when you enable enable_record_versioning, records are never deleted but marked with is_active=False. enable_record_versioning enables a common_filter for every table that filters out records with is_active = False Important: If you use auth.enable_record_versioning, do not use auth.archive or you will end up with duplicates. auth.archive does explicitly what enable_record_versioning does automatically. """ tables = [table for table in tables] for table in tables: if 'modified_on' in table.fields() and not current_record in table.fields(): table._enable_record_versioning( archive_db=archive_db, archive_name=archive_names, current_record=current_record) def define_signature(self): db = self.db settings = self.settings request = current.request T = current.T reference_user = 'reference %s' % settings.table_user_name def lazy_user(auth=self): return auth.user_id def represent(id, record=None, s=settings): try: user = s.table_user(id) return '%s %s' % (user.get("first_name", user.get("email")), user.get("last_name", '')) except: return id ondelete = self.settings.ondelete self.signature = db.Table( self.db, 'auth_signature', Field('is_active', 'boolean', default=True, readable=False, writable=False, label=T('Is Active')), Field('created_on', 'datetime', default=request.now, writable=False, readable=False, label=T('Created On')), Field('created_by', reference_user, default=lazy_user, represent=represent, writable=False, readable=False, label=T('Created By'), ondelete=ondelete), Field('modified_on', 'datetime', update=request.now, default=request.now, writable=False, readable=False, label=T('Modified On')), Field('modified_by', reference_user, represent=represent, default=lazy_user, update=lazy_user, writable=False, readable=False, label=T('Modified By'), ondelete=ondelete)) def define_tables(self, username=None, signature=None, migrate=True, fake_migrate=False): """ to be called unless tables are defined manually usages: # defines all needed tables and table files # 'myprefix_auth_user.table', ... auth.define_tables(migrate='myprefix_') # defines all needed tables without migration/table files auth.define_tables(migrate=False) """ db = self.db settings = self.settings if username is None: username = settings.use_username else: settings.use_username = username if not self.signature: self.define_signature() if signature == True: signature_list = [self.signature] elif not signature: signature_list = [] elif isinstance(signature, self.db.Table): signature_list = [signature] else: signature_list = signature is_not_empty = IS_NOT_EMPTY(error_message=self.messages.is_empty) is_crypted = CRYPT(key=settings.hmac_key, min_length=settings.password_min_length) is_unique_email = [ IS_EMAIL(error_message=self.messages.invalid_email), IS_NOT_IN_DB(db, '%s.email' % settings.table_user_name)] if not settings.email_case_sensitive: is_unique_email.insert(1, IS_LOWER()) if not settings.table_user_name in db.tables: passfield = settings.password_field extra_fields = settings.extra_fields.get( settings.table_user_name, []) + signature_list if username or settings.cas_provider: is_unique_username = \ [IS_MATCH('[\w\.\-]+'), IS_NOT_IN_DB(db, '%s.username' % settings.table_user_name)] if not settings.username_case_sensitive: is_unique_username.insert(1, IS_LOWER()) db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name, requires=is_not_empty), Field('last_name', length=128, default='', label=self.messages.label_last_name, requires=is_not_empty), Field('email', length=512, default='', label=self.messages.label_email, requires=is_unique_email), Field('username', length=128, default='', label=self.messages.label_username, requires=is_unique_username), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password, requires=[is_crypted]), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), Field('registration_id', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_id), *extra_fields, **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(username)s')) else: db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name, requires=is_not_empty), Field('last_name', length=128, default='', label=self.messages.label_last_name, requires=is_not_empty), Field('email', length=512, default='', label=self.messages.label_email, requires=is_unique_email), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password, requires=[is_crypted]), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), Field('registration_id', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_id), *extra_fields, **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(first_name)s %(last_name)s (%(id)s)')) reference_table_user = 'reference %s' % settings.table_user_name if not settings.table_group_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_group_name, []) + signature_list db.define_table( settings.table_group_name, Field('role', length=512, default='', label=self.messages.label_role, requires=IS_NOT_IN_DB( db, '%s.role' % settings.table_group_name)), Field('description', 'text', label=self.messages.label_description), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_group_name, migrate), fake_migrate=fake_migrate, format='%(role)s (%(id)s)')) reference_table_group = 'reference %s' % settings.table_group_name if not settings.table_membership_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_membership_name, []) + signature_list db.define_table( settings.table_membership_name, Field('user_id', reference_table_user, label=self.messages.label_user_id), Field('group_id', reference_table_group, label=self.messages.label_group_id), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_membership_name, migrate), fake_migrate=fake_migrate)) if not settings.table_permission_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_permission_name, []) + signature_list db.define_table( settings.table_permission_name, Field('group_id', reference_table_group, label=self.messages.label_group_id), Field('name', default='default', length=512, label=self.messages.label_name, requires=is_not_empty), Field('table_name', length=512, label=self.messages.label_table_name), Field('record_id', 'integer', default=0, label=self.messages.label_record_id, requires=IS_INT_IN_RANGE(0, 10 ** 9)), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_permission_name, migrate), fake_migrate=fake_migrate)) if not settings.table_event_name in db.tables: db.define_table( settings.table_event_name, Field('time_stamp', 'datetime', default=current.request.now, label=self.messages.label_time_stamp), Field('client_ip', default=current.request.client, label=self.messages.label_client_ip), Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('origin', default='auth', length=512, label=self.messages.label_origin, requires=is_not_empty), Field('description', 'text', default='', label=self.messages.label_description, requires=is_not_empty), *settings.extra_fields.get(settings.table_event_name, []), **dict( migrate=self.__get_migrate( settings.table_event_name, migrate), fake_migrate=fake_migrate)) now = current.request.now if settings.cas_domains: if not settings.table_cas_name in db.tables: db.define_table( settings.table_cas_name, Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('created_on', 'datetime', default=now), Field('service', requires=IS_URL()), Field('ticket'), Field('renew', 'boolean', default=False), *settings.extra_fields.get(settings.table_cas_name, []), **dict( migrate=self.__get_migrate( settings.table_cas_name, migrate), fake_migrate=fake_migrate)) if not db._lazy_tables: settings.table_user = db[settings.table_user_name] settings.table_group = db[settings.table_group_name] settings.table_membership = db[settings.table_membership_name] settings.table_permission = db[settings.table_permission_name] settings.table_event = db[settings.table_event_name] if settings.cas_domains: settings.table_cas = db[settings.table_cas_name] if settings.cas_provider: # THIS IS NOT LAZY settings.actions_disabled = \ ['profile', 'register', 'change_password', 'request_reset_password', 'retrieve_username'] from gluon.contrib.login_methods.cas_auth import CasAuth maps = settings.cas_maps if not maps: table_user = self.table_user() maps = dict((name, lambda v, n=name: v.get(n, None)) for name in table_user.fields if name != 'id' and table_user[name].readable) maps['registration_id'] = \ lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user']) actions = [settings.cas_actions['login'], settings.cas_actions['servicevalidate'], settings.cas_actions['logout']] settings.login_form = CasAuth( casversion=2, urlbase=settings.cas_provider, actions=actions, maps=maps) return self def log_event(self, description, vars=None, origin='auth'): """ usage: auth.log_event(description='this happened', origin='auth') """ if not description: return elif self.is_logged_in(): user_id = self.user.id else: user_id = None # user unknown vars = vars or {} self.table_event().insert( description=str(description % vars), origin=origin, user_id=user_id) def get_or_create_user(self, keys, update_fields=['email']): """ Used for alternate login methods: If the user exists already then password is updated. If the user doesn't yet exist, then they are created. """ table_user = self.table_user() user = None checks = [] # make a guess about who this user is for fieldname in ['registration_id', 'username', 'email']: if fieldname in table_user.fields() and \ keys.get(fieldname, None): checks.append(fieldname) value = keys[fieldname] user = table_user(**{fieldname: value}) if user: break if not checks: return None if not 'registration_id' in keys: keys['registration_id'] = keys[checks[0]] # if we think we found the user but registration_id does not match, # make new user if 'registration_id' in checks \ and user \ and user.registration_id \ and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])): user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER? if user: update_keys = dict(registration_id=keys['registration_id']) for key in update_fields: if key in keys: update_keys[key] = keys[key] user.update_record(**update_keys) elif checks: if not 'first_name' in keys and 'first_name' in table_user.fields: guess = keys.get('email', 'anonymous').split('@')[0] keys['first_name'] = keys.get('username', guess) user_id = table_user.insert(**table_user._filter_fields(keys)) user = self.user = table_user[user_id] if self.settings.create_user_groups: group_id = self.add_group( self.settings.create_user_groups % user) self.add_membership(group_id, user_id) if self.settings.everybody_group_id: self.add_membership(self.settings.everybody_group_id, user_id) return user def basic(self, basic_auth_realm=False): """ perform basic login. :param basic_auth_realm: optional basic http authentication realm. :type basic_auth_realm: str or unicode or function or callable or boolean. reads current.request.env.http_authorization and returns basic_allowed,basic_accepted,user. if basic_auth_realm is defined is a callable it's return value is used to set the basic authentication realm, if it's a string its content is used instead. Otherwise basic authentication realm is set to the application name. If basic_auth_realm is None or False (the default) the behavior is to skip sending any challenge. """ if not self.settings.allow_basic_login: return (False, False, False) basic = current.request.env.http_authorization if basic_auth_realm: if callable(basic_auth_realm): basic_auth_realm = basic_auth_auth() elif isinstance(basic_auth_realm, (unicode, str)): basic_realm = unicode(basic_auth_realm) elif basic_auth_realm is True: basic_realm = u'' + current.request.application http_401 = HTTP(401, u'Not Authorized', **{'WWW-Authenticate': u'Basic realm="' + basic_realm + '"'}) if not basic or not basic[:6].lower() == 'basic ': if basic_auth_realm: raise http_401 return (True, False, False) (username, sep, password) = base64.b64decode(basic[6:]).partition(':') is_valid_user = sep and self.login_bare(username, password) if not is_valid_user and basic_auth_realm: raise http_401 return (True, True, is_valid_user) def login_user(self, user): """ login the user = db.auth_user(id) """ from gluon.settings import global_settings if global_settings.web2py_runtime_gae: user = Row(self.db.auth_user._filter_fields(user, id=True)) delattr(user,'password') else: user = Row(user) for key,value in user.items(): if callable(value) or key=='password': delattr(user,key) current.session.auth = Storage( user = user, last_visit=current.request.now, expiration=self.settings.expiration, hmac_key=web2py_uuid()) self.user = user self.update_groups() def login_bare(self, username, password): """ logins user as specified by usernname (or email) and password """ table_user = self.table_user() if self.settings.login_userfield: userfield = self.settings.login_userfield elif 'username' in table_user.fields: userfield = 'username' else: userfield = 'email' passfield = self.settings.password_field user = self.db(table_user[userfield] == username).select().first() if user and user.get(passfield, False): password = table_user[passfield].validate(password)[0] if not user.registration_key and password == user[passfield]: self.login_user(user) return user else: # user not in database try other login methods for login_method in self.settings.login_methods: if login_method != self and login_method(username, password): self.user = username return username return False def cas_login( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, version=2, ): request = current.request response = current.response session = current.session db, table = self.db, self.table_cas() session._cas_service = request.vars.service or session._cas_service if not request.env.http_host in self.settings.cas_domains or \ not session._cas_service: raise HTTP(403, 'not authorized') def allow_access(interactivelogin=False): row = table(service=session._cas_service, user_id=self.user.id) if row: ticket = row.ticket else: ticket = 'ST-' + web2py_uuid() table.insert(service=session._cas_service, user_id=self.user.id, ticket=ticket, created_on=request.now, renew=interactivelogin) service = session._cas_service query_sep = '&' if '?' in service else '?' del session._cas_service if 'warn' in request.vars and not interactivelogin: response.headers[ 'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket return A("Continue to %s" % service, _href=service + query_sep + "ticket=" + ticket) else: redirect(service + query_sep + "ticket=" + ticket) if self.is_logged_in() and not 'renew' in request.vars: return allow_access() elif not self.is_logged_in() and 'gateway' in request.vars: redirect(service) def cas_onaccept(form, onaccept=onaccept): if not onaccept is DEFAULT: onaccept(form) return allow_access(interactivelogin=True) return self.login(next, onvalidation, cas_onaccept, log) def cas_validate(self, version=2, proxy=False): request = current.request db, table = self.db, self.table_cas() current.response.headers['Content-Type'] = 'text' ticket = request.vars.ticket renew = 'renew' in request.vars row = table(ticket=ticket) success = False if row: if self.settings.login_userfield: userfield = self.settings.login_userfield elif 'username' in table.fields: userfield = 'username' else: userfield = 'email' # If ticket is a service Ticket and RENEW flag respected if ticket[0:3] == 'ST-' and \ not ((row.renew and renew) ^ renew): user = self.table_user()(row.user_id) row.delete_record() success = True def build_response(body): return '<?xml version="1.0" encoding="UTF-8"?>\n' +\ TAG['cas:serviceResponse']( body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml() if success: if version == 1: message = 'yes\n%s' % user[userfield] else: # assume version 2 username = user.get('username', user[userfield]) message = build_response( TAG['cas:authenticationSuccess']( TAG['cas:user'](username), *[TAG['cas:' + field.name](user[field.name]) for field in self.table_user() if field.readable])) else: if version == 1: message = 'no\n' elif row: message = build_response(TAG['cas:authenticationFailure']()) else: message = build_response( TAG['cas:authenticationFailure']( 'Ticket %s not recognized' % ticket, _code='INVALID TICKET')) raise HTTP(200, message) def login( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a login form method: Auth.login([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.table_user() if self.settings.login_userfield: username = self.settings.login_userfield elif 'username' in table_user.fields: username = 'username' else: username = 'email' if 'username' in table_user.fields or \ not self.settings.login_email_validate: tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty) else: tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email) old_requires = table_user[username].requires table_user[username].requires = tmpvalidator request = current.request response = current.response session = current.session passfield = self.settings.password_field try: table_user[passfield].requires[-1].min_length = 0 except: pass ### use session for federated login if self.next: session._auth_next = self.next elif session._auth_next: self.next = session._auth_next ### pass if next is DEFAULT: next = self.next or self.settings.login_next if onvalidation is DEFAULT: onvalidation = self.settings.login_onvalidation if onaccept is DEFAULT: onaccept = self.settings.login_onaccept if log is DEFAULT: log = self.messages.login_log onfail = self.settings.login_onfail user = None # default # do we use our own login form, or from a central source? if self.settings.login_form == self: form = SQLFORM( table_user, fields=[username, passfield], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.login_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if self.settings.remember_me_form: ## adds a new input checkbox "remember me for longer" if self.settings.formstyle != 'bootstrap': addrow(form, XML("&nbsp;"), DIV(XML("&nbsp;"), INPUT(_type='checkbox', _class='checkbox', _id="auth_user_remember", _name="remember", ), XML("&nbsp;&nbsp;"), LABEL( self.messages.label_remember_me, _for="auth_user_remember", )), "", self.settings.formstyle, 'auth_user_remember__row') elif self.settings.formstyle == 'bootstrap': addrow(form, "", LABEL( INPUT(_type='checkbox', _id="auth_user_remember", _name="remember"), self.messages.label_remember_me, _class="checkbox"), "", self.settings.formstyle, 'auth_user_remember__row') captcha = self.settings.login_captcha or \ (self.settings.login_captcha != False and self.settings.captcha) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') accepted_form = False if form.accepts(request, session, formname='login', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): accepted_form = True # check for username in db user = self.db(table_user[username] == form.vars[username]).select().first() if user: # user in db, check if registration pending or disabled temp_user = user if temp_user.registration_key == 'pending': response.flash = self.messages.registration_pending return form elif temp_user.registration_key in ('disabled', 'blocked'): response.flash = self.messages.login_disabled return form elif not temp_user.registration_key is None and \ temp_user.registration_key.strip(): response.flash = \ self.messages.registration_verifying return form # try alternate logins 1st as these have the # current version of the password user = None for login_method in self.settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in self.settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user( form.vars, self.settings.update_fields) break if not user: # alternates have failed, maybe because service inaccessible if self.settings.login_methods[0] == self: # try logging in locally using cached credentials if form.vars.get(passfield, '') == temp_user[passfield]: # success user = temp_user else: # user not in db if not self.settings.alternate_requires_registration: # we're allowed to auto-register users from external systems for login_method in self.settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in self.settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user( form.vars, self.settings.update_fields) break if not user: self.log_event(self.messages.login_failed_log, request.post_vars) # invalid login session.flash = self.messages.invalid_login callback(onfail, None) redirect( self.url(args=request.args, vars=request.get_vars), client_side=self.settings.client_side) else: # use a central authentication server cas = self.settings.login_form cas_user = cas.get_user() if cas_user: cas_user[passfield] = None user = self.get_or_create_user( table_user._filter_fields(cas_user), self.settings.update_fields) elif hasattr(cas, 'login_form'): return cas.login_form() else: # we need to pass through login again before going on next = self.url(self.settings.function, args='login') redirect(cas.login_url(next), client_side=self.settings.client_side) # process authenticated users if user: user = Row(table_user._filter_fields(user, id=True)) # process authenticated users # user wants to be logged in for longer self.login_user(user) session.auth.expiration = \ request.vars.get('remember', False) and \ self.settings.long_expiration or \ self.settings.expiration session.auth.remember = 'remember' in request.vars self.log_event(log, user) session.flash = self.messages.logged_in # how to continue if self.settings.login_form == self: if accepted_form: callback(onaccept, form) if next == session._auth_next: session._auth_next = None next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) table_user[username].requires = old_requires return form elif user: callback(onaccept, None) if next == session._auth_next: del session._auth_next redirect(next, client_side=self.settings.client_side) def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT): """ logout and redirects to login method: Auth.logout ([next=DEFAULT[, onlogout=DEFAULT[, log=DEFAULT]]]) """ if next is DEFAULT: next = self.settings.logout_next if onlogout is DEFAULT: onlogout = self.settings.logout_onlogout if onlogout: onlogout(self.user) if log is DEFAULT: log = self.messages.logout_log if self.user: self.log_event(log, self.user) if self.settings.login_form != self: cas = self.settings.login_form cas_user = cas.get_user() if cas_user: next = cas.logout_url(next) current.session.auth = None current.session.flash = self.messages.logged_out if not next is None: redirect(next) def register( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a registration form method: Auth.register([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.table_user() request = current.request response = current.response session = current.session if self.is_logged_in(): redirect(self.settings.logged_url, client_side=self.settings.client_side) if next is DEFAULT: next = self.next or self.settings.register_next if onvalidation is DEFAULT: onvalidation = self.settings.register_onvalidation if onaccept is DEFAULT: onaccept = self.settings.register_onaccept if log is DEFAULT: log = self.messages.register_log table_user = self.table_user() if self.settings.login_userfield: username = self.settings.login_userfield elif 'username' in table_user.fields: username = 'username' else: username = 'email' # Ensure the username field is unique. unique_validator = IS_NOT_IN_DB(self.db, table_user[username]) if not table_user[username].requires: table_user[username].requires = unique_validator elif isinstance(table_user[username].requires, (list, tuple)): if not any([isinstance(validator, IS_NOT_IN_DB) for validator in table_user[username].requires]): if isinstance(table_user[username].requires, list): table_user[username].requires.append(unique_validator) else: table_user[username].requires += (unique_validator, ) elif not isinstance(table_user[username].requires, IS_NOT_IN_DB): table_user[username].requires = [table_user[username].requires, unique_validator] passfield = self.settings.password_field formstyle = self.settings.formstyle form = SQLFORM(table_user, fields=self.settings.register_fields, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.register_button, delete_label=self.messages.delete_label, formstyle=formstyle, separator=self.settings.label_separator ) if self.settings.register_verify_password: for i, row in enumerate(form[0].components): item = row.element('input', _name=passfield) if item: form.custom.widget.password_two = \ INPUT(_name="password_two", _type="password", requires=IS_EXPR( 'value==%s' % repr(request.vars.get(passfield, None)), error_message=self.messages.mismatched_password)) if formstyle == 'bootstrap': form.custom.widget.password_two[ '_class'] = 'input-xlarge' addrow( form, self.messages.verify_password + self.settings.label_separator, form.custom.widget.password_two, self.messages.verify_password_comment, formstyle, '%s_%s__row' % (table_user, 'password_two'), position=i + 1) break captcha = self.settings.register_captcha or self.settings.captcha if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') table_user.registration_key.default = key = web2py_uuid() if form.accepts(request, session, formname='register', onvalidation=onvalidation, hideerror=self.settings.hideerror): description = self.messages.group_description % form.vars if self.settings.create_user_groups: group_id = self.add_group( self.settings.create_user_groups % form.vars, description) self.add_membership(group_id, form.vars.id) if self.settings.everybody_group_id: self.add_membership( self.settings.everybody_group_id, form.vars.id) if self.settings.registration_requires_verification: link = self.url( self.settings.function, args=('verify_email', key), scheme=True) if not self.settings.mailer or \ not self.settings.mailer.send( to=form.vars.email, subject=self.messages.verify_email_subject, message=self.messages.verify_email % dict(key=key, link=link)): self.db.rollback() response.flash = self.messages.unable_send_email return form session.flash = self.messages.email_sent if self.settings.registration_requires_approval and \ not self.settings.registration_requires_verification: table_user[form.vars.id] = dict(registration_key='pending') session.flash = self.messages.registration_pending elif (not self.settings.registration_requires_verification or self.settings.login_after_registration): if not self.settings.registration_requires_verification: table_user[form.vars.id] = dict(registration_key='') session.flash = self.messages.registration_successful user = self.db( table_user[username] == form.vars[username] ).select().first() self.login_user(user) session.flash = self.messages.logged_in self.log_event(log, form.vars) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def is_logged_in(self): """ checks if the user is logged in and returns True/False. if so user is in auth.user as well as in session.auth.user """ if self.user: return True return False def verify_email( self, next=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ action user to verify the registration email, XXXXXXXXXXXXXXXX method: Auth.verify_email([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ key = getarg(-1) table_user = self.table_user() user = table_user(registration_key=key) if not user: redirect(self.settings.login_url) if self.settings.registration_requires_approval: user.update_record(registration_key='pending') current.session.flash = self.messages.registration_pending else: user.update_record(registration_key='') current.session.flash = self.messages.email_verified # make sure session has same user.registrato_key as db record if current.session.auth and current.session.auth.user: current.session.auth.user.registration_key = user.registration_key if log is DEFAULT: log = self.messages.verify_email_log if next is DEFAULT: next = self.settings.verify_email_next if onaccept is DEFAULT: onaccept = self.settings.verify_email_onaccept self.log_event(log, user) callback(onaccept, user) redirect(next) def retrieve_username( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to retrieve the user username (only if there is a username field) method: Auth.retrieve_username([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.table_user() if not 'username' in table_user.fields: raise HTTP(404) request = current.request response = current.response session = current.session captcha = self.settings.retrieve_username_captcha or \ (self.settings.retrieve_username_captcha != False and self.settings.captcha) if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next is DEFAULT: next = self.next or self.settings.retrieve_username_next if onvalidation is DEFAULT: onvalidation = self.settings.retrieve_username_onvalidation if onaccept is DEFAULT: onaccept = self.settings.retrieve_username_onaccept if log is DEFAULT: log = self.messages.retrieve_username_log old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if form.accepts(request, session, formname='retrieve_username', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = table_user(email=form.vars.email) if not user: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) username = user.username self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_username_subject, message=self.messages.retrieve_username % dict(username=username)) session.flash = self.messages.email_sent self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next) table_user.email.requires = old_requires return form def random_password(self): import string import random password = '' specials = r'!#$*' for i in range(0, 3): password += random.choice(string.lowercase) password += random.choice(string.uppercase) password += random.choice(string.digits) password += random.choice(specials) return ''.join(random.sample(password, len(password))) def reset_password_deprecated( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to reset the user password (deprecated) method: Auth.reset_password_deprecated([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.table_user() request = current.request response = current.response session = current.session if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next is DEFAULT: next = self.next or self.settings.retrieve_password_next if onvalidation is DEFAULT: onvalidation = self.settings.retrieve_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.retrieve_password_onaccept if log is DEFAULT: log = self.messages.retrieve_password_log old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, formname='retrieve_password', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = table_user(email=form.vars.email) if not user: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) elif user.registration_key in ('pending', 'disabled', 'blocked'): current.session.flash = \ self.messages.registration_pending redirect(self.url(args=request.args)) password = self.random_password() passfield = self.settings.password_field d = dict( passfield=str(table_user[passfield].validate(password)[0]), registration_key='') user.update_record(**d) if self.settings.mailer and \ self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_password_subject, message=self.messages.retrieve_password % dict(password=password)): session.flash = self.messages.email_sent else: session.flash = self.messages.unable_to_send_email self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next) table_user.email.requires = old_requires return form def reset_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to reset the user password method: Auth.reset_password([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.table_user() request = current.request # response = current.response session = current.session if next is DEFAULT: next = self.next or self.settings.reset_password_next try: key = request.vars.key or getarg(-1) t0 = int(key.split('-')[0]) if time.time() - t0 > 60 * 60 * 24: raise Exception user = table_user(reset_password_key=key) if not user: raise Exception except Exception: session.flash = self.messages.invalid_reset_password redirect(next, client_side=self.settings.client_side) passfield = self.settings.password_field form = SQLFORM.factory( Field('new_password', 'password', label=self.messages.new_password, requires=self.table_user()[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_reset_button, hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, hideerror=self.settings.hideerror): user.update_record( **{passfield: str(form.vars.new_password), 'registration_key': '', 'reset_password_key': ''}) session.flash = self.messages.password_changed if self.settings.login_after_password_change: self.login_user(user) redirect(next, client_side=self.settings.client_side) return form def request_reset_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form to reset the user password method: Auth.reset_password([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.table_user() request = current.request response = current.response session = current.session captcha = self.settings.retrieve_password_captcha or \ (self.settings.retrieve_password_captcha != False and self.settings.captcha) if next is DEFAULT: next = self.next or self.settings.request_reset_password_next if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if onvalidation is DEFAULT: onvalidation = self.settings.reset_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.reset_password_onaccept if log is DEFAULT: log = self.messages.reset_password_log table_user.email.requires = [ IS_EMAIL(error_message=self.messages.invalid_email), IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.password_reset_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if form.accepts(request, session, formname='reset_password', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = table_user(email=form.vars.email) if not user: session.flash = self.messages.invalid_email redirect(self.url(args=request.args), client_side=self.settings.client_side) elif user.registration_key in ('pending', 'disabled', 'blocked'): session.flash = self.messages.registration_pending redirect(self.url(args=request.args), client_side=self.settings.client_side) if self.email_reset_password(user): session.flash = self.messages.email_sent else: session.flash = self.messages.unable_to_send_email self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) # old_requires = table_user.email.requires return form def email_reset_password(self, user): reset_password_key = str(int(time.time())) + '-' + web2py_uuid() link = self.url(self.settings.function, args=('reset_password', reset_password_key), scheme=True) if self.settings.mailer.send( to=user.email, subject=self.messages.reset_password_subject, message=self.messages.reset_password % dict(key=reset_password_key, link=link)): user.update_record(reset_password_key=reset_password_key) return True return False def retrieve_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): if self.settings.reset_password_requires_verification: return self.request_reset_password(next, onvalidation, onaccept, log) else: return self.reset_password_deprecated(next, onvalidation, onaccept, log) def change_password( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form that lets the user change password method: Auth.change_password([next=DEFAULT[, onvalidation=DEFAULT[, onaccept=DEFAULT[, log=DEFAULT]]]]) """ if not self.is_logged_in(): redirect(self.settings.login_url, client_side=self.settings.client_side) db = self.db table_user = self.table_user() s = db(table_user.id == self.user.id) request = current.request session = current.session if next is DEFAULT: next = self.next or self.settings.change_password_next if onvalidation is DEFAULT: onvalidation = self.settings.change_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.change_password_onaccept if log is DEFAULT: log = self.messages.change_password_log passfield = self.settings.password_field form = SQLFORM.factory( Field('old_password', 'password', label=self.messages.old_password, requires=table_user[passfield].requires), Field('new_password', 'password', label=self.messages.new_password, requires=table_user[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_change_button, hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, formname='change_password', onvalidation=onvalidation, hideerror=self.settings.hideerror): if not form.vars['old_password'] == s.select().first()[passfield]: form.errors['old_password'] = self.messages.invalid_password else: d = {passfield: str(form.vars.new_password)} s.update(**d) session.flash = self.messages.password_changed self.log_event(log, self.user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def profile( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ returns a form that lets the user change his/her profile method: Auth.profile([next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) """ table_user = self.table_user() if not self.is_logged_in(): redirect(self.settings.login_url, client_side=self.settings.client_side) passfield = self.settings.password_field table_user[passfield].writable = False request = current.request session = current.session if next is DEFAULT: next = self.next or self.settings.profile_next if onvalidation is DEFAULT: onvalidation = self.settings.profile_onvalidation if onaccept is DEFAULT: onaccept = self.settings.profile_onaccept if log is DEFAULT: log = self.messages.profile_log form = SQLFORM( table_user, self.user.id, fields=self.settings.profile_fields, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.profile_save_button, delete_label=self.messages.delete_label, upload=self.settings.download_url, formstyle=self.settings.formstyle, separator=self.settings.label_separator, deletable=self.settings.allow_delete_accounts, ) if form.accepts(request, session, formname='profile', onvalidation=onvalidation, hideerror=self.settings.hideerror): self.user.update(table_user._filter_fields(form.vars)) session.flash = self.messages.profile_updated self.log_event(log, self.user) callback(onaccept, form) if form.deleted: return self.logout() if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def is_impersonating(self): return self.is_logged_in() and 'impersonator' in current.session.auth def impersonate(self, user_id=DEFAULT): """ usage: POST TO http://..../impersonate request.post_vars.user_id=<id> set request.post_vars.user_id to 0 to restore original user. requires impersonator is logged in and has_permission('impersonate', 'auth_user', user_id) """ request = current.request session = current.session auth = session.auth table_user = self.table_user() if not self.is_logged_in(): raise HTTP(401, "Not Authorized") current_id = auth.user.id requested_id = user_id if user_id is DEFAULT: user_id = current.request.post_vars.user_id if user_id and user_id != self.user.id and user_id != '0': if not self.has_permission('impersonate', self.settings.table_user_name, user_id): raise HTTP(403, "Forbidden") user = table_user(user_id) if not user: raise HTTP(401, "Not Authorized") auth.impersonator = cPickle.dumps(session) auth.user.update( table_user._filter_fields(user, True)) self.user = auth.user onaccept = self.settings.login_onaccept if onaccept: form = Storage(dict(vars=self.user)) if not isinstance(onaccept,(list, tuple)): onaccept = [onaccept] for callback in onaccept: callback(form) log = self.messages.impersonate_log self.log_event(log, dict(id=current_id, other_id=auth.user.id)) elif user_id in (0, '0'): if self.is_impersonating(): session.clear() session.update(cPickle.loads(auth.impersonator)) self.user = session.auth.user return None if requested_id is DEFAULT and not request.post_vars: return SQLFORM.factory(Field('user_id', 'integer')) return SQLFORM(table_user, user.id, readonly=True) def update_groups(self): if not self.user: return user_groups = self.user_groups = {} if current.session.auth: current.session.auth.user_groups = self.user_groups table_group = self.table_group() table_membership = self.table_membership() memberships = self.db( table_membership.user_id == self.user.id).select() for membership in memberships: group = table_group(membership.group_id) if group: user_groups[membership.group_id] = group.role def groups(self): """ displays the groups and their roles for the logged in user """ if not self.is_logged_in(): redirect(self.settings.login_url) table_membership = self.table_membership() memberships = self.db( table_membership.user_id == self.user.id).select() table = TABLE() for membership in memberships: table_group = self.db[self.settings.table_group_name] groups = self.db(table_group.id == membership.group_id).select() if groups: group = groups[0] table.append(TR(H3(group.role, '(%s)' % group.id))) table.append(TR(P(group.description))) if not memberships: return None return table def not_authorized(self): """ you can change the view for this page to make it look as you like """ if current.request.ajax: raise HTTP(403, 'ACCESS DENIED') return 'ACCESS DENIED' def requires(self, condition, requires_login=True, otherwise=None): """ decorator that prevents access to action if not logged in """ def decorator(action): def f(*a, **b): basic_allowed, basic_accepted, user = self.basic() user = user or self.user if requires_login: if not user: if current.request.ajax: raise HTTP(401, self.messages.ajax_failed_authentication) elif not otherwise is None: if callable(otherwise): return otherwise() redirect(otherwise) elif self.settings.allow_basic_login_only or \ basic_accepted or current.request.is_restful: raise HTTP(403, "Not authorized") else: next = self.here() current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next=' + urllib.quote(next)) if callable(condition): flag = condition() else: flag = condition if not flag: current.session.flash = self.messages.access_denied return call_or_redirect( self.settings.on_failed_authorization) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def requires_login(self, otherwise=None): """ decorator that prevents access to action if not logged in """ return self.requires(True, otherwise=otherwise) def requires_membership(self, role=None, group_id=None, otherwise=None): """ decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def has_membership(self=self, group_id=group_id, role=role): return self.has_membership(group_id=group_id, role=role) return self.requires(has_membership, otherwise=otherwise) def requires_permission(self, name, table_name='', record_id=0, otherwise=None): """ decorator that prevents access to action if not logged in or if user logged in is not a member of any group (role) that has 'name' access to 'table_name', 'record_id'. """ def has_permission(self=self, name=name, table_name=table_name, record_id=record_id): return self.has_permission(name, table_name, record_id) return self.requires(has_permission, otherwise=otherwise) def requires_signature(self, otherwise=None): """ decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def verify(): return URL.verify(current.request, user_signature=True) return self.requires(verify, otherwise) def add_group(self, role, description=''): """ creates a group associated to a role """ group_id = self.table_group().insert( role=role, description=description) self.log_event(self.messages.add_group_log, dict(group_id=group_id, role=role)) return group_id def del_group(self, group_id): """ deletes a group """ self.db(self.table_group().id == group_id).delete() self.db(self.table_membership().group_id == group_id).delete() self.db(self.table_permission().group_id == group_id).delete() self.update_groups() self.log_event(self.messages.del_group_log, dict(group_id=group_id)) def id_group(self, role): """ returns the group_id of the group specified by the role """ rows = self.db(self.table_group().role == role).select() if not rows: return None return rows[0].id def user_group(self, user_id=None): """ returns the group_id of the group uniquely associated to this user i.e. role=user:[user_id] """ return self.id_group(self.user_group_role(user_id)) def user_group_role(self, user_id=None): if not self.settings.create_user_groups: return None if user_id: user = self.table_user()[user_id] else: user = self.user return self.settings.create_user_groups % user def has_membership(self, group_id=None, user_id=None, role=None): """ checks if user is member of group_id or role """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.table_membership() if group_id and user_id and self.db((membership.user_id == user_id) & (membership.group_id == group_id)).select(): r = True else: r = False self.log_event(self.messages.has_membership_log, dict(user_id=user_id, group_id=group_id, check=r)) return r def add_membership(self, group_id=None, user_id=None, role=None): """ gives user_id membership of group_id or role if user is None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.table_membership() record = membership(user_id=user_id, group_id=group_id) if record: return record.id else: id = membership.insert(group_id=group_id, user_id=user_id) self.update_groups() self.log_event(self.messages.add_membership_log, dict(user_id=user_id, group_id=group_id)) return id def del_membership(self, group_id=None, user_id=None, role=None): """ revokes membership from group_id to user_id if user_id is None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) if not user_id and self.user: user_id = self.user.id membership = self.table_membership() self.log_event(self.messages.del_membership_log, dict(user_id=user_id, group_id=group_id)) ret = self.db(membership.user_id == user_id)(membership.group_id == group_id).delete() self.update_groups() return ret def has_permission( self, name='any', table_name='', record_id=0, user_id=None, group_id=None, ): """ checks if user_id or current logged in user is member of a group that has 'name' permission on 'table_name' and 'record_id' if group_id is passed, it checks whether the group has the permission """ if not group_id and self.settings.everybody_group_id and \ self.has_permission( name, table_name, record_id, user_id=None, group_id=self.settings.everybody_group_id): return True if not user_id and not group_id and self.user: user_id = self.user.id if user_id: membership = self.table_membership() rows = self.db(membership.user_id == user_id).select(membership.group_id) groups = set([row.group_id for row in rows]) if group_id and not group_id in groups: return False else: groups = set([group_id]) permission = self.table_permission() rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == record_id).select(permission.group_id) groups_required = set([row.group_id for row in rows]) if record_id: rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == 0).select(permission.group_id) groups_required = groups_required.union(set([row.group_id for row in rows])) if groups.intersection(groups_required): r = True else: r = False if user_id: self.log_event(self.messages.has_permission_log, dict(user_id=user_id, name=name, table_name=table_name, record_id=record_id)) return r def add_permission( self, group_id, name='any', table_name='', record_id=0, ): """ gives group_id 'name' access to 'table_name' and 'record_id' """ permission = self.table_permission() if group_id == 0: group_id = self.user_group() record = self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))( permission.record_id == long(record_id)).select().first() if record: id = record.id else: id = permission.insert(group_id=group_id, name=name, table_name=str(table_name), record_id=long(record_id)) self.log_event(self.messages.add_permission_log, dict(permission_id=id, group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return id def del_permission( self, group_id, name='any', table_name='', record_id=0, ): """ revokes group_id 'name' access to 'table_name' and 'record_id' """ permission = self.table_permission() self.log_event(self.messages.del_permission_log, dict(group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == long(record_id)).delete() def accessible_query(self, name, table, user_id=None): """ returns a query with all accessible records for user_id or the current logged in user this method does not work on GAE because uses JOIN and IN example: db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL) """ if not user_id: user_id = self.user_id db = self.db if isinstance(table, str) and table in self.db.tables(): table = self.db[table] elif isinstance(table, (Set, Query)): # experimental: build a chained query for all tables if isinstance(table, Set): cquery = table.query else: cquery = table tablenames = db._adapter.tables(cquery) for tablename in tablenames: cquery &= self.accessible_query(name, tablename, user_id=user_id) return cquery if not isinstance(table, str) and\ self.has_permission(name, table, 0, user_id): return table.id > 0 membership = self.table_membership() permission = self.table_permission() query = table.id.belongs( db(membership.user_id == user_id) (membership.group_id == permission.group_id) (permission.name == name) (permission.table_name == table) ._select(permission.record_id)) if self.settings.everybody_group_id: query |= table.id.belongs( db(permission.group_id == self.settings.everybody_group_id) (permission.name == name) (permission.table_name == table) ._select(permission.record_id)) return query @staticmethod def archive(form, archive_table=None, current_record='current_record', archive_current=False, fields=None): """ If you have a table (db.mytable) that needs full revision history you can just do: form=crud.update(db.mytable,myrecord,onaccept=auth.archive) or form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive) crud.archive will define a new table "mytable_archive" and store a copy of the current record (if archive_current=True) or a copy of the previous record (if archive_current=False) in the newly created table including a reference to the current record. fields allows to specify extra fields that need to be archived. If you want to access such table you need to define it yourself in a model: db.define_table('mytable_archive', Field('current_record',db.mytable), db.mytable) Notice such table includes all fields of db.mytable plus one: current_record. crud.archive does not timestamp the stored record unless your original table has a fields like: db.define_table(..., Field('saved_on','datetime', default=request.now,update=request.now,writable=False), Field('saved_by',auth.user, default=auth.user_id,update=auth.user_id,writable=False), there is nothing special about these fields since they are filled before the record is archived. If you want to change the archive table name and the name of the reference field you can do, for example: db.define_table('myhistory', Field('parent_record',db.mytable), db.mytable) and use it as: form=crud.update(db.mytable,myrecord, onaccept=lambda form:crud.archive(form, archive_table=db.myhistory, current_record='parent_record')) """ if not archive_current and not form.record: return None table = form.table if not archive_table: archive_table_name = '%s_archive' % table if not archive_table_name in table._db: table._db.define_table( archive_table_name, Field(current_record, table), *[field.clone(unique=False) for field in table]) archive_table = table._db[archive_table_name] new_record = {current_record: form.vars.id} for fieldname in archive_table.fields: if not fieldname in ['id', current_record]: if archive_current and fieldname in form.vars: new_record[fieldname] = form.vars[fieldname] elif form.record and fieldname in form.record: new_record[fieldname] = form.record[fieldname] if fields: new_record.update(fields) id = archive_table.insert(**new_record) return id def wiki(self, slug=None, env=None, render='markmin', manage_permissions=False, force_prefix='', restrict_search=False, resolve=True, extra=None, menu_groups=None, templates=None, migrate=True, controller=None, function=None): if controller and function: resolve = False if not hasattr(self, '_wiki'): self._wiki = Wiki(self, render=render, manage_permissions=manage_permissions, force_prefix=force_prefix, restrict_search=restrict_search, env=env, extra=extra or {}, menu_groups=menu_groups, templates=templates, migrate=migrate, controller=controller, function=function) else: self._wiki.env.update(env or {}) # if resolve is set to True, process request as wiki call # resolve=False allows initial setup without wiki redirection wiki = None if resolve: action = str(current.request.args(0)).startswith("_") if slug and not action: wiki = self._wiki.read(slug) if isinstance(wiki, dict) and wiki.has_key('content'): # We don't want to return a dict object, just the wiki wiki = wiki['content'] else: wiki = self._wiki() if isinstance(wiki, basestring): wiki = XML(wiki) return wiki def wikimenu(self): """to be used in menu.py for app wide wiki menus""" if (hasattr(self, "_wiki") and self._wiki.settings.controller and self._wiki.settings.function): self._wiki.automenu() class Crud(object): def url(self, f=None, args=None, vars=None): """ this should point to the controller that exposes download and crud """ if args is None: args = [] if vars is None: vars = {} return URL(c=self.settings.controller, f=f, args=args, vars=vars) def __init__(self, environment, db=None, controller='default'): self.db = db if not db and environment and isinstance(environment, DAL): self.db = environment elif not db: raise SyntaxError("must pass db as first or second argument") self.environment = current settings = self.settings = Settings() settings.auth = None settings.logger = None settings.create_next = None settings.update_next = None settings.controller = controller settings.delete_next = self.url() settings.download_url = self.url('download') settings.create_onvalidation = StorageList() settings.update_onvalidation = StorageList() settings.delete_onvalidation = StorageList() settings.create_onaccept = StorageList() settings.update_onaccept = StorageList() settings.update_ondelete = StorageList() settings.delete_onaccept = StorageList() settings.update_deletable = True settings.showid = False settings.keepvalues = False settings.create_captcha = None settings.update_captcha = None settings.captcha = None settings.formstyle = 'table3cols' settings.label_separator = ': ' settings.hideerror = False settings.detect_record_change = True settings.hmac_key = None settings.lock_keys = True messages = self.messages = Messages(current.T) messages.submit_button = 'Submit' messages.delete_label = 'Check to delete' messages.record_created = 'Record Created' messages.record_updated = 'Record Updated' messages.record_deleted = 'Record Deleted' messages.update_log = 'Record %(id)s updated' messages.create_log = 'Record %(id)s created' messages.read_log = 'Record %(id)s read' messages.delete_log = 'Record %(id)s deleted' messages.lock_keys = True def __call__(self): args = current.request.args if len(args) < 1: raise HTTP(404) elif args[0] == 'tables': return self.tables() elif len(args) > 1 and not args(1) in self.db.tables: raise HTTP(404) table = self.db[args(1)] if args[0] == 'create': return self.create(table) elif args[0] == 'select': return self.select(table, linkto=self.url(args='read')) elif args[0] == 'search': form, rows = self.search(table, linkto=self.url(args='read')) return DIV(form, SQLTABLE(rows)) elif args[0] == 'read': return self.read(table, args(2)) elif args[0] == 'update': return self.update(table, args(2)) elif args[0] == 'delete': return self.delete(table, args(2)) else: raise HTTP(404) def log_event(self, message, vars): if self.settings.logger: self.settings.logger.log_event(message, vars, origin='crud') def has_permission(self, name, table, record=0): if not self.settings.auth: return True try: record_id = record.id except: record_id = record return self.settings.auth.has_permission(name, str(table), record_id) def tables(self): return TABLE(*[TR(A(name, _href=self.url(args=('select', name)))) for name in self.db.tables]) @staticmethod def archive(form, archive_table=None, current_record='current_record'): return Auth.archive(form, archive_table=archive_table, current_record=current_record) def update( self, table, record, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, ondelete=DEFAULT, log=DEFAULT, message=DEFAULT, deletable=DEFAULT, formname=DEFAULT, **attributes ): """ method: Crud.update(table, record, [next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT [, message=DEFAULT[, deletable=DEFAULT]]]]]]) """ if not (isinstance(table, self.db.Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, self.db.Table): table = self.db[table] try: record_id = record.id except: record_id = record or 0 if record_id and not self.has_permission('update', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) if not record_id and not self.has_permission('create', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request response = current.response session = current.session if request.extension == 'json' and request.vars.json: request.vars.update(json_parser.loads(request.vars.json)) if next is DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.update_next if onvalidation is DEFAULT: onvalidation = self.settings.update_onvalidation if onaccept is DEFAULT: onaccept = self.settings.update_onaccept if ondelete is DEFAULT: ondelete = self.settings.update_ondelete if log is DEFAULT: log = self.messages.update_log if deletable is DEFAULT: deletable = self.settings.update_deletable if message is DEFAULT: message = self.messages.record_updated if not 'hidden' in attributes: attributes['hidden'] = {} attributes['hidden']['_next'] = next form = SQLFORM( table, record, showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, deletable=deletable, upload=self.settings.download_url, formstyle=self.settings.formstyle, separator=self.settings.label_separator, **attributes # contains hidden ) self.accepted = False self.deleted = False captcha = self.settings.update_captcha or self.settings.captcha if record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') captcha = self.settings.create_captcha or self.settings.captcha if not record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if not request.extension in ('html', 'load'): (_session, _formname) = (None, None) else: (_session, _formname) = ( session, '%s/%s' % (table._tablename, form.record_id)) if not formname is DEFAULT: _formname = formname keepvalues = self.settings.keepvalues if request.vars.delete_this_record: keepvalues = False if isinstance(onvalidation, StorageList): onvalidation = onvalidation.get(table._tablename, []) if form.accepts(request, _session, formname=_formname, onvalidation=onvalidation, keepvalues=keepvalues, hideerror=self.settings.hideerror, detect_record_change=self.settings.detect_record_change): self.accepted = True response.flash = message if log: self.log_event(log, form.vars) if request.vars.delete_this_record: self.deleted = True message = self.messages.record_deleted callback(ondelete, form, table._tablename) response.flash = message callback(onaccept, form, table._tablename) if not request.extension in ('html', 'load'): raise HTTP(200, 'RECORD CREATED/UPDATED') if isinstance(next, (list, tuple)): # fix issue with 2.6 next = next[0] if next: # Only redirect when explicit next = replace_id(next, form) session.flash = response.flash redirect(next) elif not request.extension in ('html', 'load'): raise HTTP(401, serializers.json(dict(errors=form.errors))) return form def create( self, table, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, message=DEFAULT, formname=DEFAULT, **attributes ): """ method: Crud.create(table, [next=DEFAULT [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT[, message=DEFAULT]]]]]) """ if next is DEFAULT: next = self.settings.create_next if onvalidation is DEFAULT: onvalidation = self.settings.create_onvalidation if onaccept is DEFAULT: onaccept = self.settings.create_onaccept if log is DEFAULT: log = self.messages.create_log if message is DEFAULT: message = self.messages.record_created return self.update( table, None, next=next, onvalidation=onvalidation, onaccept=onaccept, log=log, message=message, deletable=False, formname=formname, **attributes ) def read(self, table, record): if not (isinstance(table, self.db.Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, self.db.Table): table = self.db[table] if not self.has_permission('read', table, record): redirect(self.settings.auth.settings.on_failed_authorization) form = SQLFORM( table, record, readonly=True, comments=False, upload=self.settings.download_url, showid=self.settings.showid, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if not current.request.extension in ('html', 'load'): return table._filter_fields(form.record, id=True) return form def delete( self, table, record_id, next=DEFAULT, message=DEFAULT, ): """ method: Crud.delete(table, record_id, [next=DEFAULT [, message=DEFAULT]]) """ if not (isinstance(table, self.db.Table) or table in self.db.tables): raise HTTP(404) if not isinstance(table, self.db.Table): table = self.db[table] if not self.has_permission('delete', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request session = current.session if next is DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.delete_next if message is DEFAULT: message = self.messages.record_deleted record = table[record_id] if record: callback(self.settings.delete_onvalidation, record) del table[record_id] callback(self.settings.delete_onaccept, record, table._tablename) session.flash = message redirect(next) def rows( self, table, query=None, fields=None, orderby=None, limitby=None, ): if not (isinstance(table, self.db.Table) or table in self.db.tables): raise HTTP(404) if not self.has_permission('select', table): redirect(self.settings.auth.settings.on_failed_authorization) #if record_id and not self.has_permission('select', table): # redirect(self.settings.auth.settings.on_failed_authorization) if not isinstance(table, self.db.Table): table = self.db[table] if not query: query = table.id > 0 if not fields: fields = [field for field in table if field.readable] else: fields = [table[f] if isinstance(f, str) else f for f in fields] rows = self.db(query).select(*fields, **dict(orderby=orderby, limitby=limitby)) return rows def select( self, table, query=None, fields=None, orderby=None, limitby=None, headers=None, **attr ): headers = headers or {} rows = self.rows(table, query, fields, orderby, limitby) if not rows: return None # Nicer than an empty table. if not 'upload' in attr: attr['upload'] = self.url('download') if not current.request.extension in ('html', 'load'): return rows.as_list() if not headers: if isinstance(table, str): table = self.db[table] headers = dict((str(k), k.label) for k in table) return SQLTABLE(rows, headers=headers, **attr) def get_format(self, field): rtable = field._db[field.type[10:]] format = rtable.get('_format', None) if format and isinstance(format, str): return format[2:-2] return field.name def get_query(self, field, op, value, refsearch=False): try: if refsearch: format = self.get_format(field) if op == 'equals': if not refsearch: return field == value else: return lambda row: row[field.name][format] == value elif op == 'not equal': if not refsearch: return field != value else: return lambda row: row[field.name][format] != value elif op == 'greater than': if not refsearch: return field > value else: return lambda row: row[field.name][format] > value elif op == 'less than': if not refsearch: return field < value else: return lambda row: row[field.name][format] < value elif op == 'starts with': if not refsearch: return field.like(value + '%') else: return lambda row: str(row[field.name][format]).startswith(value) elif op == 'ends with': if not refsearch: return field.like('%' + value) else: return lambda row: str(row[field.name][format]).endswith(value) elif op == 'contains': if not refsearch: return field.like('%' + value + '%') else: return lambda row: value in row[field.name][format] except: return None def search(self, *tables, **args): """ Creates a search form and its results for a table Example usage: form, results = crud.search(db.test, queries = ['equals', 'not equal', 'contains'], query_labels={'equals':'Equals', 'not equal':'Not equal'}, fields = ['id','children'], field_labels = { 'id':'ID','children':'Children'}, zero='Please choose', query = (db.test.id > 0)&(db.test.id != 3) ) """ table = tables[0] fields = args.get('fields', table.fields) request = current.request db = self.db if not (isinstance(table, db.Table) or table in db.tables): raise HTTP(404) attributes = {} for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'): if key in args: attributes[key] = args[key] tbl = TABLE() selected = [] refsearch = [] results = [] showall = args.get('showall', False) if showall: selected = fields chkall = args.get('chkall', False) if chkall: for f in fields: request.vars['chk%s' % f] = 'on' ops = args.get('queries', []) zero = args.get('zero', '') if not ops: ops = ['equals', 'not equal', 'greater than', 'less than', 'starts with', 'ends with', 'contains'] ops.insert(0, zero) query_labels = args.get('query_labels', {}) query = args.get('query', table.id > 0) field_labels = args.get('field_labels', {}) for field in fields: field = table[field] if not field.readable: continue fieldname = field.name chkval = request.vars.get('chk' + fieldname, None) txtval = request.vars.get('txt' + fieldname, None) opval = request.vars.get('op' + fieldname, None) row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname, _disabled=(field.type == 'id'), value=(field.type == 'id' or chkval == 'on'))), TD(field_labels.get(fieldname, field.label)), TD(SELECT([OPTION(query_labels.get(op, op), _value=op) for op in ops], _name="op" + fieldname, value=opval)), TD(INPUT(_type="text", _name="txt" + fieldname, _value=txtval, _id='txt' + fieldname, _class=str(field.type)))) tbl.append(row) if request.post_vars and (chkval or field.type == 'id'): if txtval and opval != '': if field.type[0:10] == 'reference ': refsearch.append(self.get_query(field, opval, txtval, refsearch=True)) else: value, error = field.validate(txtval) if not error: ### TODO deal with 'starts with', 'ends with', 'contains' on GAE query &= self.get_query(field, opval, value) else: row[3].append(DIV(error, _class='error')) selected.append(field) form = FORM(tbl, INPUT(_type="submit")) if selected: try: results = db(query).select(*selected, **attributes) for r in refsearch: results = results.find(r) except: # hmmm, we should do better here results = None return form, results urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor())) def fetch(url, data=None, headers=None, cookie=Cookie.SimpleCookie(), user_agent='Mozilla/5.0'): headers = headers or {} if not data is None: data = urllib.urlencode(data) if user_agent: headers['User-agent'] = user_agent headers['Cookie'] = ' '.join( ['%s=%s;' % (c.key, c.value) for c in cookie.values()]) try: from google.appengine.api import urlfetch except ImportError: req = urllib2.Request(url, data, headers) html = urllib2.urlopen(req).read() else: method = ((data is None) and urlfetch.GET) or urlfetch.POST while url is not None: response = urlfetch.fetch(url=url, payload=data, method=method, headers=headers, allow_truncated=False, follow_redirects=False, deadline=10) # next request will be a get, so no need to send the data again data = None method = urlfetch.GET # load cookies from the response cookie.load(response.headers.get('set-cookie', '')) url = response.headers.get('location') html = response.content return html regex_geocode = \ re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""") def geocode(address): try: a = urllib.quote(address) txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s' % a) item = regex_geocode.search(txt) (la, lo) = (float(item.group('la')), float(item.group('lo'))) return (la, lo) except: return (0.0, 0.0) def universal_caller(f, *a, **b): c = f.func_code.co_argcount n = f.func_code.co_varnames[:c] defaults = f.func_defaults or [] pos_args = n[0:-len(defaults)] named_args = n[-len(defaults):] arg_dict = {} # Fill the arg_dict with name and value for the submitted, positional values for pos_index, pos_val in enumerate(a[:c]): arg_dict[n[pos_index] ] = pos_val # n[pos_index] is the name of the argument # There might be pos_args left, that are sent as named_values. Gather them as well. # If a argument already is populated with values we simply replaces them. for arg_name in pos_args[len(arg_dict):]: if arg_name in b: arg_dict[arg_name] = b[arg_name] if len(arg_dict) >= len(pos_args): # All the positional arguments is found. The function may now be called. # However, we need to update the arg_dict with the values from the named arguments as well. for arg_name in named_args: if arg_name in b: arg_dict[arg_name] = b[arg_name] return f(**arg_dict) # Raise an error, the function cannot be called. raise HTTP(404, "Object does not exist") class Service(object): def __init__(self, environment=None): self.run_procedures = {} self.csv_procedures = {} self.xml_procedures = {} self.rss_procedures = {} self.json_procedures = {} self.jsonrpc_procedures = {} self.jsonrpc2_procedures = {} self.xmlrpc_procedures = {} self.amfrpc_procedures = {} self.amfrpc3_procedures = {} self.soap_procedures = {} def run(self, f): """ example: service = Service() @service.run def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/run/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def csv(self, f): """ example: service = Service() @service.csv def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/csv/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def xml(self, f): """ example: service = Service() @service.xml def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/xml/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def rss(self, f): """ example: service = Service() @service.rss def myfunction(): return dict(title=..., link=..., description=..., created_on=..., entries=[dict(title=..., link=..., description=..., created_on=...]) def call(): return service() Then call it with: wget http://..../app/default/call/rss/myfunction """ self.rss_procedures[f.__name__] = f return f def json(self, f): """ example: service = Service() @service.json def myfunction(a, b): return [{a: b}] def call(): return service() Then call it with: wget http://..../app/default/call/json/myfunction?a=hello&b=world """ self.json_procedures[f.__name__] = f return f def jsonrpc(self, f): """ example: service = Service() @service.jsonrpc def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world """ self.jsonrpc_procedures[f.__name__] = f return f def jsonrpc2(self, f): """ example: service = Service() @service.jsonrpc2 def myfunction(a, b): return a + b def call(): return service() Then call it with: wget --post-data '{"jsonrpc": "2.0", "id": 1, "method": "myfunction", "params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2 """ self.jsonrpc2_procedures[f.__name__] = f return f def xmlrpc(self, f): """ example: service = Service() @service.xmlrpc def myfunction(a, b): return a + b def call(): return service() The call it with: wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world """ self.xmlrpc_procedures[f.__name__] = f return f def amfrpc(self, f): """ example: service = Service() @service.amfrpc def myfunction(a, b): return a + b def call(): return service() The call it with: wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world """ self.amfrpc_procedures[f.__name__] = f return f def amfrpc3(self, domain='default'): """ example: service = Service() @service.amfrpc3('domain') def myfunction(a, b): return a + b def call(): return service() The call it with: wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world """ if not isinstance(domain, str): raise SyntaxError("AMF3 requires a domain for function") def _amfrpc3(f): if domain: self.amfrpc3_procedures[domain + '.' + f.__name__] = f else: self.amfrpc3_procedures[f.__name__] = f return f return _amfrpc3 def soap(self, name=None, returns=None, args=None, doc=None): """ example: service = Service() @service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,}) def myfunction(a, b): return a + b def call(): return service() The call it with: from gluon.contrib.pysimplesoap.client import SoapClient client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL") response = client.MyFunction(a=1,b=2) return response['result'] Exposes online generated documentation and xml example messages at: - http://..../app/default/call/soap """ def _soap(f): self.soap_procedures[name or f.__name__] = f, returns, args, doc return f return _soap def serve_run(self, args=None): request = current.request if not args: args = request.args if args and args[0] in self.run_procedures: return str(universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars))) self.error() def serve_csv(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/x-csv' if not args: args = request.args def none_exception(value): if isinstance(value, unicode): return value.encode('utf8') if hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') if value is None: return '<NULL>' return value if args and args[0] in self.run_procedures: import types r = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) s = cStringIO.StringIO() if hasattr(r, 'export_to_csv_file'): r.export_to_csv_file(s) elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)): import csv writer = csv.writer(s) writer.writerow(r[0].keys()) for line in r: writer.writerow([none_exception(v) for v in line.values()]) else: import csv writer = csv.writer(s) for line in r: writer.writerow(line) return s.getvalue() self.error() def serve_xml(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/xml' if not args: args = request.args if args and args[0] in self.run_procedures: s = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) if hasattr(s, 'as_list'): s = s.as_list() return serializers.xml(s, quote=False) self.error() def serve_rss(self, args=None): request = current.request response = current.response if not args: args = request.args if args and args[0] in self.rss_procedures: feed = universal_caller(self.rss_procedures[args[0]], *args[1:], **dict(request.vars)) else: self.error() response.headers['Content-Type'] = 'application/rss+xml' return serializers.rss(feed) def serve_json(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'application/json; charset=utf-8' if not args: args = request.args d = dict(request.vars) if args and args[0] in self.json_procedures: s = universal_caller(self.json_procedures[args[0]], *args[1:], **d) if hasattr(s, 'as_list'): s = s.as_list() return response.json(s) self.error() class JsonRpcException(Exception): def __init__(self, code, info): jrpc_error = Service.jsonrpc_errors.get(code) if jrpc_error: self.message, self.description = jrpc_error self.code, self.info = code, info # jsonrpc 2.0 error types. records the following structure {code: (message,meaning)} jsonrpc_errors = { -32700: ("Parse error. Invalid JSON was received by the server.", "An error occurred on the server while parsing the JSON text."), -32600: ("Invalid Request", "The JSON sent is not a valid Request object."), -32601: ("Method not found", "The method does not exist / is not available."), -32602: ("Invalid params", "Invalid method parameter(s)."), -32603: ("Internal error", "Internal JSON-RPC error."), -32099: ("Server error", "Reserved for implementation-defined server-errors.")} def serve_jsonrpc(self): def return_response(id, result): return serializers.json({'version': '1.1', 'id': id, 'result': result, 'error': None}) def return_error(id, code, message, data=None): error = {'name': 'JSONRPCError', 'code': code, 'message': message} if data is not None: error['data'] = data return serializers.json({'id': id, 'version': '1.1', 'error': error, }) request = current.request response = current.response response.headers['Content-Type'] = 'application/json; charset=utf-8' methods = self.jsonrpc_procedures data = json_parser.loads(request.body.read()) jsonrpc_2 = data.get('jsonrpc') if jsonrpc_2: #hand over to version 2 of the protocol return self.serve_jsonrpc2(data) id, method, params = data['id'], data['method'], data.get('params', '') if not method in methods: return return_error(id, 100, 'method "%s" does not exist' % method) try: if isinstance(params,dict): s = methods[method](**params) else: s = methods[method](*params) if hasattr(s, 'as_list'): s = s.as_list() return return_response(id, s) except Service.JsonRpcException, e: return return_error(id, e.code, e.info) except BaseException: etype, eval, etb = sys.exc_info() code = 100 message = '%s: %s' % (etype.__name__, eval) data = request.is_local and traceback.format_tb(etb) return return_error(id, code, message, data) except: etype, eval, etb = sys.exc_info() return return_error(id, 100, 'Exception %s: %s' % (etype, eval)) def serve_jsonrpc2(self, data=None, batch_element=False): def return_response(id, result): if not must_respond: return None return serializers.json({'jsonrpc': '2.0', 'id': id, 'result': result}) def return_error(id, code, message=None, data=None): error = {'code': code} if message is None: error['message'] = Service.jsonrpc_errors[code][0] else: error['message'] = message if data is None: error['data'] = Service.jsonrpc_errors[code][1] else: error['data'] = data return serializers.json({'jsonrpc': '2.0', 'id': id, 'error': error}) def validate(data): """ Validate request as defined in: http://www.jsonrpc.org/specification#request_object. :param data: The json object. :type name: str. :returns: - True -- if successful - False -- if no error should be reported (i.e. data is missing 'id' member) :raises: JsonRPCException """ iparms = set(data.keys()) mandatory_args = set(['jsonrpc', 'method']) missing_args = mandatory_args - iparms if missing_args: raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args)) if data['jsonrpc'] != '2.0': raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc']) if 'id' not in iparms: return False return True request = current.request response = current.response if not data: response.headers['Content-Type'] = 'application/json; charset=utf-8' try: data = json_parser.loads(request.body.read()) except ValueError: # decoding error in json lib return return_error(None, -32700) except json_parser.JSONDecodeError: # decoding error in simplejson lib return return_error(None, -32700) # Batch handling if isinstance(data, list) and not batch_element: retlist = [] for c in data: retstr = self.serve_jsonrpc2(c, batch_element=True) if retstr: # do not add empty responses retlist.append(retstr) if len(retlist) == 0: # return nothing return '' else: return "[" + ','.join(retlist) + "]" methods = self.jsonrpc2_procedures methods.update(self.jsonrpc_procedures) try: must_respond = validate(data) except Service.JsonRpcException, e: return return_error(None, e.code, e.info) id, method, params = data.get('id'), data['method'], data.get('params', '') if not method in methods: return return_error(id, -32601, data='Method "%s" does not exist' % method) try: if isinstance(params,dict): s = methods[method](**params) else: s = methods[method](*params) if hasattr(s, 'as_list'): s = s.as_list() if must_respond: return return_response(id, s) else: return '' except HTTP, e: raise e except Service.JsonRpcException, e: return return_error(id, e.code, e.info) except BaseException: etype, eval, etb = sys.exc_info() code = -32099 data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb)) return return_error(id, code, data=data) except: etype, eval, etb = sys.exc_info() return return_error(id, -32099, data='Exception %s: %s' % (etype, eval)) def serve_xmlrpc(self): request = current.request response = current.response services = self.xmlrpc_procedures.values() return response.xmlrpc(request, services) def serve_amfrpc(self, version=0): try: import pyamf import pyamf.remoting.gateway except: return "pyamf not installed or not in Python sys.path" request = current.request response = current.response if version == 3: services = self.amfrpc3_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) pyamf_request = pyamf.remoting.decode(request.body) else: services = self.amfrpc_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) context = pyamf.get_context(pyamf.AMF0) pyamf_request = pyamf.remoting.decode(request.body, context) pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion) for name, message in pyamf_request: pyamf_response[name] = base_gateway.getProcessor(message)(message) response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE if version == 3: return pyamf.remoting.encode(pyamf_response).getvalue() else: return pyamf.remoting.encode(pyamf_response, context).getvalue() def serve_soap(self, version="1.1"): try: from contrib.pysimplesoap.server import SoapDispatcher except: return "pysimplesoap not installed in contrib" request = current.request response = current.response procedures = self.soap_procedures location = "%s://%s%s" % ( request.env.wsgi_url_scheme, request.env.http_host, URL(r=request, f="call/soap", vars={})) namespace = 'namespace' in response and response.namespace or location documentation = response.description or '' dispatcher = SoapDispatcher( name=response.title, location=location, action=location, # SOAPAction namespace=namespace, prefix='pys', documentation=documentation, ns=True) for method, (function, returns, args, doc) in procedures.iteritems(): dispatcher.register_function(method, function, returns, args, doc) if request.env.request_method == 'POST': # Process normal Soap Operation response.headers['Content-Type'] = 'text/xml' return dispatcher.dispatch(request.body.read()) elif 'WSDL' in request.vars: # Return Web Service Description response.headers['Content-Type'] = 'text/xml' return dispatcher.wsdl() elif 'op' in request.vars: # Return method help webpage response.headers['Content-Type'] = 'text/html' method = request.vars['op'] sample_req_xml, sample_res_xml, doc = dispatcher.help(method) body = [H1("Welcome to Web2Py SOAP webservice gateway"), A("See all webservice operations", _href=URL(r=request, f="call/soap", vars={})), H2(method), P(doc), UL(LI("Location: %s" % dispatcher.location), LI("Namespace: %s" % dispatcher.namespace), LI("SoapAction: %s" % dispatcher.action), ), H3("Sample SOAP XML Request Message:"), CODE(sample_req_xml, language="xml"), H3("Sample SOAP XML Response Message:"), CODE(sample_res_xml, language="xml"), ] return {'body': body} else: # Return general help and method list webpage response.headers['Content-Type'] = 'text/html' body = [H1("Welcome to Web2Py SOAP webservice gateway"), P(response.description), P("The following operations are available"), A("See WSDL for webservice description", _href=URL(r=request, f="call/soap", vars={"WSDL":None})), UL([LI(A("%s: %s" % (method, doc or ''), _href=URL(r=request, f="call/soap", vars={'op': method}))) for method, doc in dispatcher.list_methods()]), ] return {'body': body} def __call__(self): """ register services with: service = Service() @service.run @service.rss @service.json @service.jsonrpc @service.xmlrpc @service.amfrpc @service.amfrpc3('domain') @service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,}) expose services with def call(): return service() call services with http://..../app/default/call/run?[parameters] http://..../app/default/call/rss?[parameters] http://..../app/default/call/json?[parameters] http://..../app/default/call/jsonrpc http://..../app/default/call/xmlrpc http://..../app/default/call/amfrpc http://..../app/default/call/amfrpc3 http://..../app/default/call/soap """ request = current.request if len(request.args) < 1: raise HTTP(404, "Not Found") arg0 = request.args(0) if arg0 == 'run': return self.serve_run(request.args[1:]) elif arg0 == 'rss': return self.serve_rss(request.args[1:]) elif arg0 == 'csv': return self.serve_csv(request.args[1:]) elif arg0 == 'xml': return self.serve_xml(request.args[1:]) elif arg0 == 'json': return self.serve_json(request.args[1:]) elif arg0 == 'jsonrpc': return self.serve_jsonrpc() elif arg0 == 'jsonrpc2': return self.serve_jsonrpc2() elif arg0 == 'xmlrpc': return self.serve_xmlrpc() elif arg0 == 'amfrpc': return self.serve_amfrpc() elif arg0 == 'amfrpc3': return self.serve_amfrpc(3) elif arg0 == 'soap': return self.serve_soap() else: self.error() def error(self): raise HTTP(404, "Object does not exist") def completion(callback): """ Executes a task on completion of the called action. For example: from gluon.tools import completion @completion(lambda d: logging.info(repr(d))) def index(): return dict(message='hello') It logs the output of the function every time input is called. The argument of completion is executed in a new thread. """ def _completion(f): def __completion(*a, **b): d = None try: d = f(*a, **b) return d finally: thread.start_new_thread(callback, (d,)) return __completion return _completion def prettydate(d, T=lambda x: x): if isinstance(d, datetime.datetime): dt = datetime.datetime.now() - d elif isinstance(d, datetime.date): dt = datetime.date.today() - d elif not d: return '' else: return '[invalid date]' if dt.days < 0: suffix = ' from now' dt = -dt else: suffix = ' ago' if dt.days >= 2 * 365: return T('%d years' + suffix) % int(dt.days / 365) elif dt.days >= 365: return T('1 year' + suffix) elif dt.days >= 60: return T('%d months' + suffix) % int(dt.days / 30) elif dt.days > 21: return T('1 month' + suffix) elif dt.days >= 14: return T('%d weeks' + suffix) % int(dt.days / 7) elif dt.days >= 7: return T('1 week' + suffix) elif dt.days > 1: return T('%d days' + suffix) % dt.days elif dt.days == 1: return T('1 day' + suffix) elif dt.seconds >= 2 * 60 * 60: return T('%d hours' + suffix) % int(dt.seconds / 3600) elif dt.seconds >= 60 * 60: return T('1 hour' + suffix) elif dt.seconds >= 2 * 60: return T('%d minutes' + suffix) % int(dt.seconds / 60) elif dt.seconds >= 60: return T('1 minute' + suffix) elif dt.seconds > 1: return T('%d seconds' + suffix) % dt.seconds elif dt.seconds == 1: return T('1 second' + suffix) else: return T('now') def test_thread_separation(): def f(): c = PluginManager() lock1.acquire() lock2.acquire() c.x = 7 lock1.release() lock2.release() lock1 = thread.allocate_lock() lock2 = thread.allocate_lock() lock1.acquire() thread.start_new_thread(f, ()) a = PluginManager() a.x = 5 lock1.release() lock2.acquire() return a.x class PluginManager(object): """ Plugin Manager is similar to a storage object but it is a single level singleton this means that multiple instances within the same thread share the same attributes Its constructor is also special. The first argument is the name of the plugin you are defining. The named arguments are parameters needed by the plugin with default values. If the parameters were previous defined, the old values are used. For example: ### in some general configuration file: >>> plugins = PluginManager() >>> plugins.me.param1=3 ### within the plugin model >>> _ = PluginManager('me',param1=5,param2=6,param3=7) ### where the plugin is used >>> print plugins.me.param1 3 >>> print plugins.me.param2 6 >>> plugins.me.param3 = 8 >>> print plugins.me.param3 8 Here are some tests: >>> a=PluginManager() >>> a.x=6 >>> b=PluginManager('check') >>> print b.x 6 >>> b=PluginManager() # reset settings >>> print b.x <Storage {}> >>> b.x=7 >>> print a.x 7 >>> a.y.z=8 >>> print b.y.z 8 >>> test_thread_separation() 5 >>> plugins=PluginManager('me',db='mydb') >>> print plugins.me.db mydb >>> print 'me' in plugins True >>> print plugins.me.installed True """ instances = {} def __new__(cls, *a, **b): id = thread.get_ident() lock = thread.allocate_lock() try: lock.acquire() try: return cls.instances[id] except KeyError: instance = object.__new__(cls, *a, **b) cls.instances[id] = instance return instance finally: lock.release() def __init__(self, plugin=None, **defaults): if not plugin: self.__dict__.clear() settings = self.__getattr__(plugin) settings.installed = True settings.update( (k, v) for k, v in defaults.items() if not k in settings) def __getattr__(self, key): if not key in self.__dict__: self.__dict__[key] = Storage() return self.__dict__[key] def keys(self): return self.__dict__.keys() def __contains__(self, key): return key in self.__dict__ class Expose(object): def __init__(self, base=None, basename=None, extensions=None, allow_download=True): """ Usage: def static(): return dict(files=Expose()) or def static(): path = os.path.join(request.folder,'static','public') return dict(files=Expose(path,basename='public')) extensions: an optional list of file extensions for filtering displayed files: ['.py', '.jpg'] allow_download: whether to allow downloading selected files """ current.session.forget() base = base or os.path.join(current.request.folder, 'static') basename = basename or current.request.function self.basename = basename self.args = current.request.raw_args and \ [arg for arg in current.request.raw_args.split('/') if arg] or [] filename = os.path.join(base, *self.args) if not os.path.exists(filename): raise HTTP(404, "FILE NOT FOUND") if not os.path.normpath(filename).startswith(base): raise HTTP(401, "NOT AUTHORIZED") if allow_download and not os.path.isdir(filename): current.response.headers['Content-Type'] = contenttype(filename) raise HTTP(200, open(filename, 'rb'), **current.response.headers) self.path = path = os.path.join(filename, '*') self.folders = [f[len(path) - 1:] for f in sorted(glob.glob(path)) if os.path.isdir(f) and not self.isprivate(f)] self.filenames = [f[len(path) - 1:] for f in sorted(glob.glob(path)) if not os.path.isdir(f) and not self.isprivate(f)] if 'README' in self.filenames: readme = open(os.path.join(filename,'README')).read() self.paragraph = MARKMIN(readme) else: self.paragraph = None if extensions: self.filenames = [f for f in self.filenames if os.path.splitext(f)[-1] in extensions] def breadcrumbs(self, basename): path = [] span = SPAN() span.append(A(basename, _href=URL())) for arg in self.args: span.append('/') path.append(arg) span.append(A(arg, _href=URL(args='/'.join(path)))) return span def table_folders(self): if self.folders: return SPAN(H3('Folders'), TABLE( *[TR(TD(A(folder, _href=URL(args=self.args + [folder])))) for folder in self.folders], **dict(_class="table"))) return '' @staticmethod def isprivate(f): return 'private' in f or f.startswith('.') or f.endswith('~') @staticmethod def isimage(f): return os.path.splitext(f)[-1].lower() in ( '.png', '.jpg', '.jpeg', '.gif', '.tiff') def table_files(self, width=160): if self.filenames: return SPAN(H3('Files'), TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))), TD(IMG(_src=URL(args=self.args + [f]), _style='max-width:%spx' % width) if width and self.isimage(f) else '')) for f in self.filenames], **dict(_class="table"))) return '' def xml(self): return DIV( H2(self.breadcrumbs(self.basename)), self.paragraph or '', self.table_folders(), self.table_files()).xml() class Wiki(object): everybody = 'everybody' rows_page = 25 def markmin_base(self,body): return MARKMIN(body, extra=self.settings.extra, url=True, environment=self.env, autolinks=lambda link: expand_one(link, {})).xml() def render_tags(self, tags): return DIV( _class='w2p_wiki_tags', *[A(t.strip(), _href=URL(args='_search', vars=dict(q=t))) for t in tags or [] if t.strip()]) def markmin_render(self, page): return self.markmin_base(page.body) + self.render_tags(page.tags).xml() def html_render(self, page): html = page.body # @///function -> http://..../function html = replace_at_urls(html, URL) # http://...jpg -> <img src="http://...jpg/> or embed html = replace_autolinks(html, lambda link: expand_one(link, {})) # @{component:name} -> <script>embed component name</script> html = replace_components(html, self.env) html = html + self.render_tags(page.tags).xml() return html @staticmethod def component(text): """ In wiki docs allows @{component:controller/function/args} which renders as a LOAD(..., ajax=True) """ items = text.split('/') controller, function, args = items[0], items[1], items[2:] return LOAD(controller, function, args=args, ajax=True).xml() def get_render(self): if isinstance(self.settings.render, basestring): r = getattr(self, "%s_render" % self.settings.render) elif callable(self.settings.render): r = self.settings.render else: raise ValueError("Invalid render type %s" % type(render)) return r def __init__(self, auth, env=None, render='markmin', manage_permissions=False, force_prefix='', restrict_search=False, extra=None, menu_groups=None, templates=None, migrate=True, controller=None, function=None): settings = self.settings = auth.settings.wiki # render: "markmin", "html", ..., <function> settings.render = render perms = settings.manage_permissions = manage_permissions settings.force_prefix = force_prefix settings.restrict_search = restrict_search settings.extra = extra or {} settings.menu_groups = menu_groups settings.templates = templates settings.controller = controller settings.function = function db = auth.db self.env = env or {} self.env['component'] = Wiki.component self.auth = auth self.wiki_menu_items = None if self.auth.user: self.settings.force_prefix = force_prefix % self.auth.user else: self.settings.force_prefix = force_prefix self.host = current.request.env.http_host table_definitions = [ ('wiki_page', { 'args':[ Field('slug', requires=[IS_SLUG(), IS_NOT_IN_DB(db, 'wiki_page.slug')], writable=False), Field('title', unique=True), Field('body', 'text', notnull=True), Field('tags', 'list:string'), Field('can_read', 'list:string', writable=perms, readable=perms, default=[Wiki.everybody]), Field('can_edit', 'list:string', writable=perms, readable=perms, default=[Wiki.everybody]), Field('changelog'), Field('html', 'text', compute=self.get_render(), readable=False, writable=False), auth.signature], 'vars':{'format':'%(title)s', 'migrate':migrate}}), ('wiki_tag', { 'args':[ Field('name'), Field('wiki_page', 'reference wiki_page'), auth.signature], 'vars':{'format':'%(title)s', 'migrate':migrate}}), ('wiki_media', { 'args':[ Field('wiki_page', 'reference wiki_page'), Field('title', required=True), Field('filename', 'upload', required=True), auth.signature], 'vars':{'format':'%(title)s', 'migrate':migrate}}), ] # define only non-existent tables for key, value in table_definitions: args = [] if not key in db.tables(): # look for wiki_ extra fields in auth.settings extra_fields = auth.settings.extra_fields if extra_fields: if key in extra_fields: if extra_fields[key]: for field in extra_fields[key]: args.append(field) args += value['args'] db.define_table(key, *args, **value['vars']) if self.settings.templates is None and not \ self.settings.manage_permissions: self.settings.templates = db.wiki_page.tags.contains('template')&\ db.wiki_page.can_read.contains('everybody') def update_tags_insert(page, id, db=db): for tag in page.tags or []: tag = tag.strip().lower() if tag: db.wiki_tag.insert(name=tag, wiki_page=id) def update_tags_update(dbset, page, db=db): page = dbset.select().first() db(db.wiki_tag.wiki_page == page.id).delete() for tag in page.tags or []: tag = tag.strip().lower() if tag: db.wiki_tag.insert(name=tag, wiki_page=page.id) db.wiki_page._after_insert.append(update_tags_insert) db.wiki_page._after_update.append(update_tags_update) if (auth.user and check_credentials(current.request, gae_login=False) and not 'wiki_editor' in auth.user_groups.values()): group = db.auth_group(role='wiki_editor') gid = group.id if group else db.auth_group.insert( role='wiki_editor') auth.add_membership(gid) settings.lock_keys = True # WIKI ACCESS POLICY def not_authorized(self, page=None): raise HTTP(401) def can_read(self, page): if 'everybody' in page.can_read or not \ self.settings.manage_permissions: return True elif self.auth.user: groups = self.auth.user_groups.values() if ('wiki_editor' in groups or set(groups).intersection(set(page.can_read + page.can_edit)) or page.created_by == self.auth.user.id): return True return False def can_edit(self, page=None): if not self.auth.user: redirect(self.auth.settings.login_url) groups = self.auth.user_groups.values() return ('wiki_editor' in groups or (page is None and 'wiki_author' in groups) or not page is None and ( set(groups).intersection(set(page.can_edit)) or page.created_by == self.auth.user.id)) def can_manage(self): if not self.auth.user: return False groups = self.auth.user_groups.values() return 'wiki_editor' in groups def can_search(self): return True def can_see_menu(self): if self.settings.menu_groups is None: return True if self.auth.user: groups = self.auth.user_groups.values() if any(t in self.settings.menu_groups for t in groups): return True return False ### END POLICY def automenu(self): """adds the menu if not present""" request = current.request if not self.wiki_menu_items and self.settings.controller and self.settings.function: self.wiki_menu_items = self.menu(self.settings.controller, self.settings.function) current.response.menu += self.wiki_menu_items def __call__(self): request = current.request settings = self.settings settings.controller = settings.controller or request.controller settings.function = settings.function or request.function self.automenu() zero = request.args(0) or 'index' if zero and zero.isdigit(): return self.media(int(zero)) elif not zero or not zero.startswith('_'): return self.read(zero) elif zero == '_edit': return self.edit(request.args(1) or 'index',request.args(2) or 0) elif zero == '_editmedia': return self.editmedia(request.args(1) or 'index') elif zero == '_create': return self.create() elif zero == '_pages': return self.pages() elif zero == '_search': return self.search() elif zero == '_recent': ipage = int(request.vars.page or 0) query = self.auth.db.wiki_page.created_by == request.args( 1, cast=int) return self.search(query=query, orderby=~self.auth.db.wiki_page.created_on, limitby=(ipage * self.rows_page, (ipage + 1) * self.rows_page), ) elif zero == '_cloud': return self.cloud() elif zero == '_preview': return self.preview(self.get_render()) def first_paragraph(self, page): if not self.can_read(page): mm = (page.body or '').replace('\r', '') ps = [p for p in mm.split('\n\n') if not p.startswith('#') and p.strip()] if ps: return ps[0] return '' def fix_hostname(self, body): return (body or '').replace('://HOSTNAME', '://%s' % self.host) def read(self, slug): if slug in '_cloud': return self.cloud() elif slug in '_search': return self.search() page = self.auth.db.wiki_page(slug=slug) if not page: redirect(URL(args=('_create', slug))) if not self.can_read(page): return self.not_authorized(page) if current.request.extension == 'html': if not page: url = URL(args=('_edit', slug)) return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn")) else: return dict(title=page.title, slug=page.slug, content=XML(self.fix_hostname(page.html)), tags=page.tags, created_on=page.created_on, modified_on=page.modified_on) elif current.request.extension == 'load': return self.fix_hostname(page.html) if page else '' else: if not page: raise HTTP(404) else: return dict(title=page.title, slug=page.slug, content=page.body, tags=page.tags, created_on=page.created_on, modified_on=page.modified_on) def check_editor(self, role='wiki_editor', act=False): if not self.auth.user: if not act: return False redirect(self.auth.settings.login_url) elif not self.auth.has_membership(role): if not act: return False raise HTTP(401, "Not Authorized") return True def edit(self,slug,from_template=0): auth = self.auth db = auth.db page = db.wiki_page(slug=slug) if not self.can_edit(page): return self.not_authorized(page) title_guess = ' '.join(c.capitalize() for c in slug.split('-')) if not page: if not (self.can_manage() or slug.startswith(self.settings.force_prefix)): current.session.flash = 'slug must have "%s" prefix' \ % self.settings.force_prefix redirect(URL(args=('_create'))) db.wiki_page.can_read.default = [Wiki.everybody] db.wiki_page.can_edit.default = [auth.user_group_role()] db.wiki_page.title.default = title_guess db.wiki_page.slug.default = slug if slug == 'wiki-menu': db.wiki_page.body.default = \ '- Menu Item > @////index\n- - Submenu > http://web2py.com' else: db.wiki_page.body.default = db(db.wiki_page.id==from_template).select(db.wiki_page.body)[0].body if int(from_template) > 0 else '## %s\n\npage content' % title_guess vars = current.request.post_vars if vars.body: vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME') form = SQLFORM(db.wiki_page, page, deletable=True, formstyle='table2cols', showid=False).process() if form.deleted: current.session.flash = 'page deleted' redirect(URL()) elif form.accepted: current.session.flash = 'page created' redirect(URL(args=slug)) script = """ jQuery(function() { if (!jQuery('#wiki_page_body').length) return; var pagecontent = jQuery('#wiki_page_body'); pagecontent.css('font-family', 'Monaco,Menlo,Consolas,"Courier New",monospace'); var prevbutton = jQuery('<button class="btn nopreview">Preview</button>'); var preview = jQuery('<div id="preview"></div>').hide(); var previewmedia = jQuery('<div id="previewmedia"></div>'); var form = pagecontent.closest('form'); preview.insertBefore(form); prevbutton.insertBefore(form); if(%(link_media)s) { var mediabutton = jQuery('<button class="btn nopreview">Media</button>'); mediabutton.insertBefore(form); previewmedia.insertBefore(form); mediabutton.toggle(function() { web2py_component('%(urlmedia)s', 'previewmedia'); }, function() { previewmedia.empty(); }); } prevbutton.click(function(e) { e.preventDefault(); if (prevbutton.hasClass('nopreview')) { prevbutton.addClass('preview').removeClass( 'nopreview').html('Edit Source'); web2py_ajax_page('post', '%(url)s', {body : jQuery('#wiki_page_body').val()}, 'preview'); form.fadeOut('fast', function() {preview.fadeIn()}); } else { prevbutton.addClass( 'nopreview').removeClass('preview').html('Preview'); preview.fadeOut('fast', function() {form.fadeIn()}); } }) }) """ % dict(url=URL(args=('_preview', slug)),link_media=('true' if page else 'false'), urlmedia=URL(extension='load', args=('_editmedia',slug), vars=dict(embedded=1))) return dict(content=TAG[''](form, SCRIPT(script))) def editmedia(self, slug): auth = self.auth db = auth.db page = db.wiki_page(slug=slug) if not (page and self.can_edit(page)): return self.not_authorized(page) self.auth.db.wiki_media.id.represent = lambda id, row: \ id if not row.filename else \ SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1])) self.auth.db.wiki_media.wiki_page.default = page.id self.auth.db.wiki_media.wiki_page.writable = False links = [] csv = True create = True if current.request.vars.embedded: script = "var c = jQuery('#wiki_page_body'); c.val(c.val() + jQuery('%s').text()); return false;" fragment = self.auth.db.wiki_media.id.represent csv = False create = False links=[ lambda row: A('copy into source', _href='#', _onclick=script % (fragment(row.id, row))) ] content = SQLFORM.grid( self.auth.db.wiki_media.wiki_page == page.id, orderby=self.auth.db.wiki_media.title, links = links, csv = csv, create = create, args=['_editmedia', slug], user_signature=False) return dict(content=content) def create(self): if not self.can_edit(): return self.not_authorized() db = self.auth.db slugs=db(db.wiki_page.id>0).select(db.wiki_page.id,db.wiki_page.slug) options=[OPTION(row.slug,_value=row.id) for row in slugs] options.insert(0, OPTION('',_value='')) fields = [Field("slug", default=current.request.args(1) or self.settings.force_prefix, requires=(IS_SLUG(), IS_NOT_IN_DB(db,db.wiki_page.slug))),] if self.settings.templates: fields.append( Field("from_template", "reference wiki_page", requires=IS_EMPTY_OR( IS_IN_DB(db(self.settings.templates), db.wiki_page._id, '%(slug)s')), comment=current.T( "Choose Template or empty for new Page"))) form = SQLFORM.factory(*fields, **dict(_class="well")) form.element("[type=submit]").attributes["_value"] = \ current.T("Create Page from Slug") if form.process().accepted: form.vars.from_template = 0 if not form.vars.from_template \ else form.vars.from_template redirect(URL(args=('_edit', form.vars.slug,form.vars.from_template or 0))) # added param return dict(content=form) def pages(self): if not self.can_manage(): return self.not_authorized() self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN( '@////%s' % slug) self.auth.db.wiki_page.title.represent = lambda title, row: \ A(title, _href=URL(args=row.slug)) wiki_table = self.auth.db.wiki_page content = SQLFORM.grid( wiki_table, fields = [wiki_table.slug, wiki_table.title, wiki_table.tags, wiki_table.can_read, wiki_table.can_edit], links=[ lambda row: A('edit', _href=URL(args=('_edit', row.slug)),_class='btn'), lambda row: A('media', _href=URL(args=('_editmedia', row.slug)),_class='btn')], details=False, editable=False, deletable=False, create=False, orderby=self.auth.db.wiki_page.title, args=['_pages'], user_signature=False) return dict(content=content) def media(self, id): request, db = current.request, self.auth.db media = db.wiki_media(id) if media: if self.settings.manage_permissions: page = db.wiki_page(media.wiki_page) if not self.can_read(page): return self.not_authorized(page) request.args = [media.filename] return current.response.download(request, db) else: raise HTTP(404) def menu(self, controller='default', function='index'): db = self.auth.db request = current.request menu_page = db.wiki_page(slug='wiki-menu') menu = [] if menu_page: tree = {'': menu} regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)') for match in regex.finditer(self.fix_hostname(menu_page.body)): base = match.group('base').replace(' ', '') title = match.group('title') link = match.group('link') title_page = None if link.startswith('@'): items = link[2:].split('/') if len(items) > 3: title_page = items[3] link = URL(a=items[0] or None, c=items[1] or controller, f=items[2] or function, args=items[3:]) parent = tree.get(base[1:], tree['']) subtree = [] tree[base] = subtree parent.append((current.T(title), request.args(0) == title_page, link, subtree)) if self.can_see_menu(): submenu = [] menu.append((current.T('[Wiki]'), None, None, submenu)) if URL() == URL(controller, function): if not str(request.args(0)).startswith('_'): slug = request.args(0) or 'index' mode = 1 elif request.args(0) == '_edit': slug = request.args(1) or 'index' mode = 2 elif request.args(0) == '_editmedia': slug = request.args(1) or 'index' mode = 3 else: mode = 0 if mode in (2, 3): submenu.append((current.T('View Page'), None, URL(controller, function, args=slug))) if mode in (1, 3): submenu.append((current.T('Edit Page'), None, URL(controller, function, args=('_edit', slug)))) if mode in (1, 2): submenu.append((current.T('Edit Page Media'), None, URL(controller, function, args=('_editmedia', slug)))) submenu.append((current.T('Create New Page'), None, URL(controller, function, args=('_create')))) # Moved next if to inside self.auth.user check if self.can_manage(): submenu.append((current.T('Manage Pages'), None, URL(controller, function, args=('_pages')))) submenu.append((current.T('Edit Menu'), None, URL(controller, function, args=('_edit', 'wiki-menu')))) # Also moved inside self.auth.user check submenu.append((current.T('Search Pages'), None, URL(controller, function, args=('_search')))) return menu def search(self, tags=None, query=None, cloud=True, preview=True, limitby=(0, 100), orderby=None): if not self.can_search(): return self.not_authorized() request = current.request content = CAT() if tags is None and query is None: form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(), value=request.vars.q), INPUT(_type="submit", _value=current.T('Search')), _method='GET') content.append(DIV(form, _class='w2p_wiki_form')) if request.vars.q: tags = [v.strip() for v in request.vars.q.split(',')] tags = [v.lower() for v in tags if v] if tags or not query is None: db = self.auth.db count = db.wiki_tag.wiki_page.count() fields = [db.wiki_page.id, db.wiki_page.slug, db.wiki_page.title, db.wiki_page.tags, db.wiki_page.can_read] if preview: fields.append(db.wiki_page.body) if query is None: query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\ (db.wiki_tag.name.belongs(tags)) query = query | db.wiki_page.title.contains(request.vars.q) if self.settings.restrict_search and not self.manage(): query = query & (db.wiki_page.created_by == self.auth.user_id) pages = db(query).select(count, *fields, **dict(orderby=orderby or ~count, groupby=reduce(lambda a, b: a | b, fields), distinct=True, limitby=limitby)) if request.extension in ('html', 'load'): if not pages: content.append(DIV(current.T("No results"), _class='w2p_wiki_form')) def link(t): return A(t, _href=URL(args='_search', vars=dict(q=t))) items = [DIV(H3(A(p.wiki_page.title, _href=URL( args=p.wiki_page.slug))), MARKMIN(self.first_paragraph(p.wiki_page)) if preview else '', DIV(_class='w2p_wiki_tags', *[link(t.strip()) for t in p.wiki_page.tags or [] if t.strip()]), _class='w2p_wiki_search_item') for p in pages] content.append(DIV(_class='w2p_wiki_pages', *items)) else: cloud = False content = [p.wiki_page.as_dict() for p in pages] elif cloud: content.append(self.cloud()['content']) if request.extension == 'load': return content return dict(content=content) def cloud(self): db = self.auth.db count = db.wiki_tag.wiki_page.count(distinct=True) ids = db(db.wiki_tag).select( db.wiki_tag.name, count, distinct=True, groupby=db.wiki_tag.name, orderby=~count, limitby=(0, 20)) if ids: a, b = ids[0](count), ids[-1](count) def style(c): STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem' size = (1.5 * (c - b) / max(a - b, 1) + 1.3) return STYLE % (1.3, size) items = [] for item in ids: items.append(A(item.wiki_tag.name, _style=style(item(count)), _href=URL(args='_search', vars=dict(q=item.wiki_tag.name)))) items.append(' ') return dict(content=DIV(_class='w2p_cloud', *items)) def preview(self, render): request = current.request return render(request.post_vars) if __name__ == '__main__': import doctest doctest.testmod()
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Functions required to execute app components ============================================ FOR INTERNAL USE ONLY """ from os import stat import thread import logging from fileutils import read_file cfs = {} # for speed-up cfs_lock = thread.allocate_lock() # and thread safety def getcfs(key, filename, filter=None): """ Caches the *filtered* file `filename` with `key` until the file is modified. :param key: the cache key :param filename: the file to cache :param filter: is the function used for filtering. Normally `filename` is a .py file and `filter` is a function that bytecode compiles the file. In this way the bytecode compiled file is cached. (Default = None) This is used on Google App Engine since pyc files cannot be saved. """ try: t = stat(filename).st_mtime except OSError: return filter() if callable(filter) else '' cfs_lock.acquire() item = cfs.get(key, None) cfs_lock.release() if item and item[0] == t: return item[1] if not callable(filter): data = read_file(filename) else: data = filter() cfs_lock.acquire() cfs[key] = (t, data) cfs_lock.release() return data
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) CONTENT_TYPE dictionary created against freedesktop.org' shared mime info database version 1.1. Deviations from official standards: - '.md': 'application/x-genesis-rom' --> 'text/x-markdown' - '.png': 'image/x-apple-ios-png' --> 'image/png' Additions: - '.load': 'text/html' - '.json': 'application/json' - '.jsonp': 'application/jsonp' - '.pickle': 'application/python-pickle' - '.w2p': 'application/w2p' """ __all__ = ['contenttype'] CONTENT_TYPE = { '.123': 'application/vnd.lotus-1-2-3', '.3ds': 'image/x-3ds', '.3g2': 'video/3gpp2', '.3ga': 'video/3gpp', '.3gp': 'video/3gpp', '.3gp2': 'video/3gpp2', '.3gpp': 'video/3gpp', '.3gpp2': 'video/3gpp2', '.602': 'application/x-t602', '.669': 'audio/x-mod', '.7z': 'application/x-7z-compressed', '.a': 'application/x-archive', '.aac': 'audio/aac', '.abw': 'application/x-abiword', '.abw.crashed': 'application/x-abiword', '.abw.gz': 'application/x-abiword', '.ac3': 'audio/ac3', '.ace': 'application/x-ace', '.adb': 'text/x-adasrc', '.ads': 'text/x-adasrc', '.afm': 'application/x-font-afm', '.ag': 'image/x-applix-graphics', '.ai': 'application/illustrator', '.aif': 'audio/x-aiff', '.aifc': 'audio/x-aifc', '.aiff': 'audio/x-aiff', '.aiffc': 'audio/x-aifc', '.al': 'application/x-perl', '.alz': 'application/x-alz', '.amr': 'audio/amr', '.amz': 'audio/x-amzxml', '.ani': 'application/x-navi-animation', '.anim[1-9j]': 'video/x-anim', '.anx': 'application/annodex', '.ape': 'audio/x-ape', '.apk': 'application/vnd.android.package-archive', '.ar': 'application/x-archive', '.arj': 'application/x-arj', '.arw': 'image/x-sony-arw', '.as': 'application/x-applix-spreadsheet', '.asc': 'text/plain', '.asf': 'video/x-ms-asf', '.asp': 'application/x-asp', '.ass': 'text/x-ssa', '.asx': 'audio/x-ms-asx', '.atom': 'application/atom+xml', '.au': 'audio/basic', '.avf': 'video/x-msvideo', '.avi': 'video/x-msvideo', '.aw': 'application/x-applix-word', '.awb': 'audio/amr-wb', '.awk': 'application/x-awk', '.axa': 'audio/annodex', '.axv': 'video/annodex', '.bak': 'application/x-trash', '.bcpio': 'application/x-bcpio', '.bdf': 'application/x-font-bdf', '.bdm': 'video/mp2t', '.bdmv': 'video/mp2t', '.bib': 'text/x-bibtex', '.bin': 'application/octet-stream', '.blend': 'application/x-blender', '.blender': 'application/x-blender', '.bmp': 'image/bmp', '.bz': 'application/x-bzip', '.bz2': 'application/x-bzip', '.c': 'text/x-csrc', '.c++': 'text/x-c++src', '.cab': 'application/vnd.ms-cab-compressed', '.cap': 'application/vnd.tcpdump.pcap', '.cb7': 'application/x-cb7', '.cbl': 'text/x-cobol', '.cbr': 'application/x-cbr', '.cbt': 'application/x-cbt', '.cbz': 'application/x-cbz', '.cc': 'text/x-c++src', '.ccmx': 'application/x-ccmx', '.cdf': 'application/x-netcdf', '.cdr': 'application/vnd.corel-draw', '.cer': 'application/pkix-cert', '.cert': 'application/x-x509-ca-cert', '.cgm': 'image/cgm', '.chm': 'application/vnd.ms-htmlhelp', '.chrt': 'application/x-kchart', '.class': 'application/x-java', '.clpi': 'video/mp2t', '.cls': 'text/x-tex', '.cmake': 'text/x-cmake', '.cob': 'text/x-cobol', '.cpi': 'video/mp2t', '.cpio': 'application/x-cpio', '.cpio.gz': 'application/x-cpio-compressed', '.cpp': 'text/x-c++src', '.cr2': 'image/x-canon-cr2', '.crl': 'application/pkix-crl', '.crt': 'application/x-x509-ca-cert', '.crw': 'image/x-canon-crw', '.cs': 'text/x-csharp', '.csh': 'application/x-csh', '.css': 'text/css', '.cssl': 'text/css', '.csv': 'text/csv', '.cue': 'application/x-cue', '.cur': 'image/x-win-bitmap', '.cxx': 'text/x-c++src', '.d': 'text/x-dsrc', '.dar': 'application/x-dar', '.dbf': 'application/x-dbf', '.dc': 'application/x-dc-rom', '.dcl': 'text/x-dcl', '.dcm': 'application/dicom', '.dcr': 'image/x-kodak-dcr', '.dds': 'image/x-dds', '.deb': 'application/x-deb', '.der': 'application/x-x509-ca-cert', '.desktop': 'application/x-desktop', '.di': 'text/x-dsrc', '.dia': 'application/x-dia-diagram', '.diff': 'text/x-patch', '.divx': 'video/x-msvideo', '.djv': 'image/vnd.djvu', '.djvu': 'image/vnd.djvu', '.dmg': 'application/x-apple-diskimage', '.dmp': 'application/vnd.tcpdump.pcap', '.dng': 'image/x-adobe-dng', '.doc': 'application/msword', '.docbook': 'application/x-docbook+xml', '.docm': 'application/vnd.ms-word.document.macroenabled.12', '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', '.dot': 'text/vnd.graphviz', '.dotm': 'application/vnd.ms-word.template.macroenabled.12', '.dotx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.template', '.dsl': 'text/x-dsl', '.dtd': 'application/xml-dtd', '.dts': 'audio/vnd.dts', '.dtshd': 'audio/vnd.dts.hd', '.dtx': 'text/x-tex', '.dv': 'video/dv', '.dvi': 'application/x-dvi', '.dvi.bz2': 'application/x-bzdvi', '.dvi.gz': 'application/x-gzdvi', '.dwg': 'image/vnd.dwg', '.dxf': 'image/vnd.dxf', '.e': 'text/x-eiffel', '.egon': 'application/x-egon', '.eif': 'text/x-eiffel', '.el': 'text/x-emacs-lisp', '.emf': 'image/x-emf', '.eml': 'message/rfc822', '.emp': 'application/vnd.emusic-emusic_package', '.ent': 'application/xml-external-parsed-entity', '.eps': 'image/x-eps', '.eps.bz2': 'image/x-bzeps', '.eps.gz': 'image/x-gzeps', '.epsf': 'image/x-eps', '.epsf.bz2': 'image/x-bzeps', '.epsf.gz': 'image/x-gzeps', '.epsi': 'image/x-eps', '.epsi.bz2': 'image/x-bzeps', '.epsi.gz': 'image/x-gzeps', '.epub': 'application/epub+zip', '.erl': 'text/x-erlang', '.es': 'application/ecmascript', '.etheme': 'application/x-e-theme', '.etx': 'text/x-setext', '.exe': 'application/x-ms-dos-executable', '.exr': 'image/x-exr', '.ez': 'application/andrew-inset', '.f': 'text/x-fortran', '.f4a': 'audio/mp4', '.f4b': 'audio/x-m4b', '.f4v': 'video/mp4', '.f90': 'text/x-fortran', '.f95': 'text/x-fortran', '.fb2': 'application/x-fictionbook+xml', '.fig': 'image/x-xfig', '.fits': 'image/fits', '.fl': 'application/x-fluid', '.flac': 'audio/flac', '.flc': 'video/x-flic', '.fli': 'video/x-flic', '.flv': 'video/x-flv', '.flw': 'application/x-kivio', '.fo': 'text/x-xslfo', '.fodg': 'application/vnd.oasis.opendocument.graphics-flat-xml', '.fodp': 'application/vnd.oasis.opendocument.presentation-flat-xml', '.fods': 'application/vnd.oasis.opendocument.spreadsheet-flat-xml', '.fodt': 'application/vnd.oasis.opendocument.text-flat-xml', '.for': 'text/x-fortran', '.fxm': 'video/x-javafx', '.g3': 'image/fax-g3', '.gb': 'application/x-gameboy-rom', '.gba': 'application/x-gba-rom', '.gcrd': 'text/vcard', '.ged': 'application/x-gedcom', '.gedcom': 'application/x-gedcom', '.gem': 'application/x-tar', '.gen': 'application/x-genesis-rom', '.gf': 'application/x-tex-gf', '.gg': 'application/x-sms-rom', '.gif': 'image/gif', '.glade': 'application/x-glade', '.gml': 'application/gml+xml', '.gmo': 'application/x-gettext-translation', '.gnc': 'application/x-gnucash', '.gnd': 'application/gnunet-directory', '.gnucash': 'application/x-gnucash', '.gnumeric': 'application/x-gnumeric', '.gnuplot': 'application/x-gnuplot', '.go': 'text/x-go', '.gp': 'application/x-gnuplot', '.gpg': 'application/pgp-encrypted', '.gplt': 'application/x-gnuplot', '.gra': 'application/x-graphite', '.gsf': 'application/x-font-type1', '.gsm': 'audio/x-gsm', '.gtar': 'application/x-tar', '.gv': 'text/vnd.graphviz', '.gvp': 'text/x-google-video-pointer', '.gz': 'application/gzip', '.h': 'text/x-chdr', '.h++': 'text/x-c++hdr', '.h4': 'application/x-hdf', '.h5': 'application/x-hdf', '.hdf': 'application/x-hdf', '.hdf4': 'application/x-hdf', '.hdf5': 'application/x-hdf', '.hh': 'text/x-c++hdr', '.hlp': 'application/winhlp', '.hp': 'text/x-c++hdr', '.hpgl': 'application/vnd.hp-hpgl', '.hpp': 'text/x-c++hdr', '.hs': 'text/x-haskell', '.htm': 'text/html', '.html': 'text/html', '.hwp': 'application/x-hwp', '.hwt': 'application/x-hwt', '.hxx': 'text/x-c++hdr', '.ica': 'application/x-ica', '.icb': 'image/x-tga', '.icc': 'application/vnd.iccprofile', '.icm': 'application/vnd.iccprofile', '.icns': 'image/x-icns', '.ico': 'image/vnd.microsoft.icon', '.ics': 'text/calendar', '.idl': 'text/x-idl', '.ief': 'image/ief', '.iff': 'image/x-ilbm', '.ilbm': 'image/x-ilbm', '.ime': 'text/x-imelody', '.imy': 'text/x-imelody', '.ins': 'text/x-tex', '.iptables': 'text/x-iptables', '.iso': 'application/x-cd-image', '.iso9660': 'application/x-cd-image', '.it': 'audio/x-it', '.it87': 'application/x-it87', '.j2k': 'image/jp2', '.jad': 'text/vnd.sun.j2me.app-descriptor', '.jar': 'application/x-java-archive', '.java': 'text/x-java', '.jceks': 'application/x-java-jce-keystore', '.jks': 'application/x-java-keystore', '.jng': 'image/x-jng', '.jnlp': 'application/x-java-jnlp-file', '.jp2': 'image/jp2', '.jpc': 'image/jp2', '.jpe': 'image/jpeg', '.jpeg': 'image/jpeg', '.jpf': 'image/jp2', '.jpg': 'image/jpeg', '.jpr': 'application/x-jbuilder-project', '.jpx': 'image/jp2', '.js': 'application/javascript', '.json': 'application/json', '.jsonp': 'application/jsonp', '.k25': 'image/x-kodak-k25', '.kar': 'audio/midi', '.karbon': 'application/x-karbon', '.kdc': 'image/x-kodak-kdc', '.kdelnk': 'application/x-desktop', '.kexi': 'application/x-kexiproject-sqlite3', '.kexic': 'application/x-kexi-connectiondata', '.kexis': 'application/x-kexiproject-shortcut', '.kfo': 'application/x-kformula', '.kil': 'application/x-killustrator', '.kino': 'application/smil', '.kml': 'application/vnd.google-earth.kml+xml', '.kmz': 'application/vnd.google-earth.kmz', '.kon': 'application/x-kontour', '.kpm': 'application/x-kpovmodeler', '.kpr': 'application/x-kpresenter', '.kpt': 'application/x-kpresenter', '.kra': 'application/x-krita', '.ks': 'application/x-java-keystore', '.ksp': 'application/x-kspread', '.kud': 'application/x-kugar', '.kwd': 'application/x-kword', '.kwt': 'application/x-kword', '.la': 'application/x-shared-library-la', '.latex': 'text/x-tex', '.lbm': 'image/x-ilbm', '.ldif': 'text/x-ldif', '.lha': 'application/x-lha', '.lhs': 'text/x-literate-haskell', '.lhz': 'application/x-lhz', '.load' : 'text/html', '.log': 'text/x-log', '.lrz': 'application/x-lrzip', '.ltx': 'text/x-tex', '.lua': 'text/x-lua', '.lwo': 'image/x-lwo', '.lwob': 'image/x-lwo', '.lwp': 'application/vnd.lotus-wordpro', '.lws': 'image/x-lws', '.ly': 'text/x-lilypond', '.lyx': 'application/x-lyx', '.lz': 'application/x-lzip', '.lzh': 'application/x-lha', '.lzma': 'application/x-lzma', '.lzo': 'application/x-lzop', '.m': 'text/x-matlab', '.m15': 'audio/x-mod', '.m1u': 'video/vnd.mpegurl', '.m2t': 'video/mp2t', '.m2ts': 'video/mp2t', '.m3u': 'application/vnd.apple.mpegurl', '.m3u8': 'application/vnd.apple.mpegurl', '.m4': 'application/x-m4', '.m4a': 'audio/mp4', '.m4b': 'audio/x-m4b', '.m4u': 'video/vnd.mpegurl', '.m4v': 'video/mp4', '.mab': 'application/x-markaby', '.mak': 'text/x-makefile', '.man': 'application/x-troff-man', '.manifest': 'text/cache-manifest', '.markdown': 'text/x-markdown', '.mbox': 'application/mbox', '.md': 'text/x-markdown', '.mdb': 'application/vnd.ms-access', '.mdi': 'image/vnd.ms-modi', '.me': 'text/x-troff-me', '.med': 'audio/x-mod', '.meta4': 'application/metalink4+xml', '.metalink': 'application/metalink+xml', '.mgp': 'application/x-magicpoint', '.mht': 'application/x-mimearchive', '.mhtml': 'application/x-mimearchive', '.mid': 'audio/midi', '.midi': 'audio/midi', '.mif': 'application/x-mif', '.minipsf': 'audio/x-minipsf', '.mk': 'text/x-makefile', '.mka': 'audio/x-matroska', '.mkd': 'text/x-markdown', '.mkv': 'video/x-matroska', '.ml': 'text/x-ocaml', '.mli': 'text/x-ocaml', '.mm': 'text/x-troff-mm', '.mmf': 'application/x-smaf', '.mml': 'application/mathml+xml', '.mng': 'video/x-mng', '.mo': 'text/x-modelica', '.mo3': 'audio/x-mo3', '.mobi': 'application/x-mobipocket-ebook', '.moc': 'text/x-moc', '.mod': 'audio/x-mod', '.mof': 'text/x-mof', '.moov': 'video/quicktime', '.mov': 'video/quicktime', '.movie': 'video/x-sgi-movie', '.mp+': 'audio/x-musepack', '.mp2': 'video/mpeg', '.mp3': 'audio/mpeg', '.mp4': 'video/mp4', '.mpc': 'audio/x-musepack', '.mpe': 'video/mpeg', '.mpeg': 'video/mpeg', '.mpg': 'video/mpeg', '.mpga': 'audio/mpeg', '.mpl': 'video/mp2t', '.mpls': 'video/mp2t', '.mpp': 'audio/x-musepack', '.mrl': 'text/x-mrml', '.mrml': 'text/x-mrml', '.mrw': 'image/x-minolta-mrw', '.ms': 'text/x-troff-ms', '.msi': 'application/x-msi', '.msod': 'image/x-msod', '.msx': 'application/x-msx-rom', '.mtm': 'audio/x-mod', '.mts': 'video/mp2t', '.mup': 'text/x-mup', '.mxf': 'application/mxf', '.mxu': 'video/vnd.mpegurl', '.n64': 'application/x-n64-rom', '.nb': 'application/mathematica', '.nc': 'application/x-netcdf', '.nds': 'application/x-nintendo-ds-rom', '.nef': 'image/x-nikon-nef', '.nes': 'application/x-nes-rom', '.nfo': 'text/x-nfo', '.not': 'text/x-mup', '.nsc': 'application/x-netshow-channel', '.nsv': 'video/x-nsv', '.nzb': 'application/x-nzb', '.o': 'application/x-object', '.obj': 'application/x-tgif', '.ocl': 'text/x-ocl', '.oda': 'application/oda', '.odb': 'application/vnd.oasis.opendocument.database', '.odc': 'application/vnd.oasis.opendocument.chart', '.odf': 'application/vnd.oasis.opendocument.formula', '.odg': 'application/vnd.oasis.opendocument.graphics', '.odi': 'application/vnd.oasis.opendocument.image', '.odm': 'application/vnd.oasis.opendocument.text-master', '.odp': 'application/vnd.oasis.opendocument.presentation', '.ods': 'application/vnd.oasis.opendocument.spreadsheet', '.odt': 'application/vnd.oasis.opendocument.text', '.oga': 'audio/ogg', '.ogg': 'application/ogg', '.ogm': 'video/x-ogm+ogg', '.ogv': 'video/ogg', '.ogx': 'application/ogg', '.old': 'application/x-trash', '.oleo': 'application/x-oleo', '.ooc': 'text/x-ooc', '.opml': 'text/x-opml+xml', '.oprc': 'application/vnd.palm', '.ora': 'image/openraster', '.orf': 'image/x-olympus-orf', '.otc': 'application/vnd.oasis.opendocument.chart-template', '.otf': 'application/x-font-otf', '.otg': 'application/vnd.oasis.opendocument.graphics-template', '.oth': 'application/vnd.oasis.opendocument.text-web', '.otp': 'application/vnd.oasis.opendocument.presentation-template', '.ots': 'application/vnd.oasis.opendocument.spreadsheet-template', '.ott': 'application/vnd.oasis.opendocument.text-template', '.owl': 'application/rdf+xml', '.oxps': 'application/oxps', '.oxt': 'application/vnd.openofficeorg.extension', '.p': 'text/x-pascal', '.p10': 'application/pkcs10', '.p12': 'application/x-pkcs12', '.p7b': 'application/x-pkcs7-certificates', '.p7c': 'application/pkcs7-mime', '.p7m': 'application/pkcs7-mime', '.p7s': 'application/pkcs7-signature', '.p8': 'application/pkcs8', '.pack': 'application/x-java-pack200', '.pak': 'application/x-pak', '.par2': 'application/x-par2', '.pas': 'text/x-pascal', '.patch': 'text/x-patch', '.pbm': 'image/x-portable-bitmap', '.pcap': 'application/vnd.tcpdump.pcap', '.pcd': 'image/x-photo-cd', '.pcf': 'application/x-cisco-vpn-settings', '.pcf.gz': 'application/x-font-pcf', '.pcf.z': 'application/x-font-pcf', '.pcl': 'application/vnd.hp-pcl', '.pct': 'image/x-pict', '.pcx': 'image/x-pcx', '.pdb': 'chemical/x-pdb', '.pdc': 'application/x-aportisdoc', '.pdf': 'application/pdf', '.pdf.bz2': 'application/x-bzpdf', '.pdf.gz': 'application/x-gzpdf', '.pdf.xz': 'application/x-xzpdf', '.pef': 'image/x-pentax-pef', '.pem': 'application/x-x509-ca-cert', '.perl': 'application/x-perl', '.pfa': 'application/x-font-type1', '.pfb': 'application/x-font-type1', '.pfx': 'application/x-pkcs12', '.pgm': 'image/x-portable-graymap', '.pgn': 'application/x-chess-pgn', '.pgp': 'application/pgp-encrypted', '.php': 'application/x-php', '.php3': 'application/x-php', '.php4': 'application/x-php', '.php5': 'application/x-php', '.phps': 'application/x-php', '.pict': 'image/x-pict', '.pict1': 'image/x-pict', '.pict2': 'image/x-pict', '.pk': 'application/x-tex-pk', '.pkipath': 'application/pkix-pkipath', '.pkr': 'application/pgp-keys', '.pl': 'application/x-perl', '.pla': 'audio/x-iriver-pla', '.pln': 'application/x-planperfect', '.pls': 'audio/x-scpls', '.pm': 'application/x-perl', '.png': 'image/png', '.pnm': 'image/x-portable-anymap', '.pntg': 'image/x-macpaint', '.po': 'text/x-gettext-translation', '.por': 'application/x-spss-por', '.pot': 'text/x-gettext-translation-template', '.potm': 'application/vnd.ms-powerpoint.template.macroenabled.12', '.potx': 'application/vnd.openxmlformats-officedocument.presentationml.template', '.ppam': 'application/vnd.ms-powerpoint.addin.macroenabled.12', '.ppm': 'image/x-portable-pixmap', '.pps': 'application/vnd.ms-powerpoint', '.ppsm': 'application/vnd.ms-powerpoint.slideshow.macroenabled.12', '.ppsx': 'application/vnd.openxmlformats-officedocument.presentationml.slideshow', '.ppt': 'application/vnd.ms-powerpoint', '.pptm': 'application/vnd.ms-powerpoint.presentation.macroenabled.12', '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', '.ppz': 'application/vnd.ms-powerpoint', '.pqa': 'application/vnd.palm', '.prc': 'application/vnd.palm', '.ps': 'application/postscript', '.ps.bz2': 'application/x-bzpostscript', '.ps.gz': 'application/x-gzpostscript', '.psd': 'image/vnd.adobe.photoshop', '.psf': 'audio/x-psf', '.psf.gz': 'application/x-gz-font-linux-psf', '.psflib': 'audio/x-psflib', '.psid': 'audio/prs.sid', '.psw': 'application/x-pocket-word', '.pw': 'application/x-pw', '.py': 'text/x-python', '.pyc': 'application/x-python-bytecode', '.pickle': 'application/python-pickle', '.pyo': 'application/x-python-bytecode', '.qif': 'image/x-quicktime', '.qml': 'text/x-qml', '.qt': 'video/quicktime', '.qti': 'application/x-qtiplot', '.qti.gz': 'application/x-qtiplot', '.qtif': 'image/x-quicktime', '.qtl': 'application/x-quicktime-media-link', '.qtvr': 'video/quicktime', '.ra': 'audio/vnd.rn-realaudio', '.raf': 'image/x-fuji-raf', '.ram': 'application/ram', '.rar': 'application/x-rar', '.ras': 'image/x-cmu-raster', '.raw': 'image/x-panasonic-raw', '.rax': 'audio/vnd.rn-realaudio', '.rb': 'application/x-ruby', '.rdf': 'application/rdf+xml', '.rdfs': 'application/rdf+xml', '.reg': 'text/x-ms-regedit', '.rej': 'text/x-reject', '.rgb': 'image/x-rgb', '.rle': 'image/rle', '.rm': 'application/vnd.rn-realmedia', '.rmj': 'application/vnd.rn-realmedia', '.rmm': 'application/vnd.rn-realmedia', '.rms': 'application/vnd.rn-realmedia', '.rmvb': 'application/vnd.rn-realmedia', '.rmx': 'application/vnd.rn-realmedia', '.rnc': 'application/relax-ng-compact-syntax', '.rng': 'application/xml', '.roff': 'text/troff', '.rp': 'image/vnd.rn-realpix', '.rpm': 'application/x-rpm', '.rss': 'application/rss+xml', '.rt': 'text/vnd.rn-realtext', '.rtf': 'application/rtf', '.rtx': 'text/richtext', '.rv': 'video/vnd.rn-realvideo', '.rvx': 'video/vnd.rn-realvideo', '.rw2': 'image/x-panasonic-raw2', '.s3m': 'audio/x-s3m', '.sam': 'application/x-amipro', '.sami': 'application/x-sami', '.sav': 'application/x-spss-sav', '.scala': 'text/x-scala', '.scm': 'text/x-scheme', '.sda': 'application/vnd.stardivision.draw', '.sdc': 'application/vnd.stardivision.calc', '.sdd': 'application/vnd.stardivision.impress', '.sdp': 'application/sdp', '.sds': 'application/vnd.stardivision.chart', '.sdw': 'application/vnd.stardivision.writer', '.sgf': 'application/x-go-sgf', '.sgi': 'image/x-sgi', '.sgl': 'application/vnd.stardivision.writer', '.sgm': 'text/sgml', '.sgml': 'text/sgml', '.sh': 'application/x-shellscript', '.shape': 'application/x-dia-shape', '.shar': 'application/x-shar', '.shn': 'application/x-shorten', '.siag': 'application/x-siag', '.sid': 'audio/prs.sid', '.sik': 'application/x-trash', '.sis': 'application/vnd.symbian.install', '.sisx': 'x-epoc/x-sisx-app', '.sit': 'application/x-stuffit', '.siv': 'application/sieve', '.sk': 'image/x-skencil', '.sk1': 'image/x-skencil', '.skr': 'application/pgp-keys', '.sldm': 'application/vnd.ms-powerpoint.slide.macroenabled.12', '.sldx': 'application/vnd.openxmlformats-officedocument.presentationml.slide', '.slk': 'text/spreadsheet', '.smaf': 'application/x-smaf', '.smc': 'application/x-snes-rom', '.smd': 'application/vnd.stardivision.mail', '.smf': 'application/vnd.stardivision.math', '.smi': 'application/x-sami', '.smil': 'application/smil', '.sml': 'application/smil', '.sms': 'application/x-sms-rom', '.snd': 'audio/basic', '.so': 'application/x-sharedlib', '.spc': 'application/x-pkcs7-certificates', '.spd': 'application/x-font-speedo', '.spec': 'text/x-rpm-spec', '.spl': 'application/x-shockwave-flash', '.spm': 'application/x-source-rpm', '.spx': 'audio/x-speex', '.sql': 'text/x-sql', '.sr2': 'image/x-sony-sr2', '.src': 'application/x-wais-source', '.src.rpm': 'application/x-source-rpm', '.srf': 'image/x-sony-srf', '.srt': 'application/x-subrip', '.ss': 'text/x-scheme', '.ssa': 'text/x-ssa', '.stc': 'application/vnd.sun.xml.calc.template', '.std': 'application/vnd.sun.xml.draw.template', '.sti': 'application/vnd.sun.xml.impress.template', '.stm': 'audio/x-stm', '.stw': 'application/vnd.sun.xml.writer.template', '.sty': 'text/x-tex', '.sub': 'text/x-subviewer', '.sun': 'image/x-sun-raster', '.sv': 'text/x-svsrc', '.sv4cpio': 'application/x-sv4cpio', '.sv4crc': 'application/x-sv4crc', '.svg': 'image/svg+xml', '.svgz': 'image/svg+xml-compressed', '.svh': 'text/x-svhdr', '.swf': 'application/x-shockwave-flash', '.swm': 'application/x-ms-wim', '.sxc': 'application/vnd.sun.xml.calc', '.sxd': 'application/vnd.sun.xml.draw', '.sxg': 'application/vnd.sun.xml.writer.global', '.sxi': 'application/vnd.sun.xml.impress', '.sxm': 'application/vnd.sun.xml.math', '.sxw': 'application/vnd.sun.xml.writer', '.sylk': 'text/spreadsheet', '.t': 'text/troff', '.t2t': 'text/x-txt2tags', '.tar': 'application/x-tar', '.tar.bz': 'application/x-bzip-compressed-tar', '.tar.bz2': 'application/x-bzip-compressed-tar', '.tar.gz': 'application/x-compressed-tar', '.tar.lrz': 'application/x-lrzip-compressed-tar', '.tar.lzma': 'application/x-lzma-compressed-tar', '.tar.lzo': 'application/x-tzo', '.tar.xz': 'application/x-xz-compressed-tar', '.tar.z': 'application/x-tarz', '.taz': 'application/x-tarz', '.tb2': 'application/x-bzip-compressed-tar', '.tbz': 'application/x-bzip-compressed-tar', '.tbz2': 'application/x-bzip-compressed-tar', '.tcl': 'text/x-tcl', '.tex': 'text/x-tex', '.texi': 'text/x-texinfo', '.texinfo': 'text/x-texinfo', '.tga': 'image/x-tga', '.tgz': 'application/x-compressed-tar', '.theme': 'application/x-theme', '.themepack': 'application/x-windows-themepack', '.tif': 'image/tiff', '.tiff': 'image/tiff', '.tk': 'text/x-tcl', '.tlrz': 'application/x-lrzip-compressed-tar', '.tlz': 'application/x-lzma-compressed-tar', '.tnef': 'application/vnd.ms-tnef', '.tnf': 'application/vnd.ms-tnef', '.toc': 'application/x-cdrdao-toc', '.torrent': 'application/x-bittorrent', '.tpic': 'image/x-tga', '.tr': 'text/troff', '.ts': 'video/mp2t', '.tsv': 'text/tab-separated-values', '.tta': 'audio/x-tta', '.ttc': 'application/x-font-ttf', '.ttf': 'application/x-font-ttf', '.ttx': 'application/x-font-ttx', '.txt': 'text/plain', '.txz': 'application/x-xz-compressed-tar', '.tzo': 'application/x-tzo', '.ufraw': 'application/x-ufraw', '.ui': 'application/x-gtk-builder', '.uil': 'text/x-uil', '.ult': 'audio/x-mod', '.uni': 'audio/x-mod', '.url': 'application/x-mswinurl', '.ustar': 'application/x-ustar', '.uue': 'text/x-uuencode', '.v': 'text/x-verilog', '.vala': 'text/x-vala', '.vapi': 'text/x-vala', '.vcard': 'text/vcard', '.vcf': 'text/vcard', '.vcs': 'text/calendar', '.vct': 'text/vcard', '.vda': 'image/x-tga', '.vhd': 'text/x-vhdl', '.vhdl': 'text/x-vhdl', '.viv': 'video/vivo', '.vivo': 'video/vivo', '.vlc': 'audio/x-mpegurl', '.vob': 'video/mpeg', '.voc': 'audio/x-voc', '.vor': 'application/vnd.stardivision.writer', '.vrm': 'model/vrml', '.vrml': 'model/vrml', '.vsd': 'application/vnd.visio', '.vss': 'application/vnd.visio', '.vst': 'image/x-tga', '.vsw': 'application/vnd.visio', '.vtt': 'text/vtt', '.w2p': 'application/w2p', '.wav': 'audio/x-wav', '.wax': 'audio/x-ms-asx', '.wb1': 'application/x-quattropro', '.wb2': 'application/x-quattropro', '.wb3': 'application/x-quattropro', '.wbmp': 'image/vnd.wap.wbmp', '.wcm': 'application/vnd.ms-works', '.wdb': 'application/vnd.ms-works', '.webm': 'video/webm', '.wim': 'application/x-ms-wim', '.wk1': 'application/vnd.lotus-1-2-3', '.wk3': 'application/vnd.lotus-1-2-3', '.wk4': 'application/vnd.lotus-1-2-3', '.wks': 'application/vnd.ms-works', '.wma': 'audio/x-ms-wma', '.wmf': 'image/x-wmf', '.wml': 'text/vnd.wap.wml', '.wmls': 'text/vnd.wap.wmlscript', '.wmv': 'video/x-ms-wmv', '.wmx': 'audio/x-ms-asx', '.woff': 'application/font-woff', '.wp': 'application/vnd.wordperfect', '.wp4': 'application/vnd.wordperfect', '.wp5': 'application/vnd.wordperfect', '.wp6': 'application/vnd.wordperfect', '.wpd': 'application/vnd.wordperfect', '.wpg': 'application/x-wpg', '.wpl': 'application/vnd.ms-wpl', '.wpp': 'application/vnd.wordperfect', '.wps': 'application/vnd.ms-works', '.wri': 'application/x-mswrite', '.wrl': 'model/vrml', '.wsgi': 'text/x-python', '.wv': 'audio/x-wavpack', '.wvc': 'audio/x-wavpack-correction', '.wvp': 'audio/x-wavpack', '.wvx': 'audio/x-ms-asx', '.wwf': 'application/x-wwf', '.x3f': 'image/x-sigma-x3f', '.xac': 'application/x-gnucash', '.xbel': 'application/x-xbel', '.xbl': 'application/xml', '.xbm': 'image/x-xbitmap', '.xcf': 'image/x-xcf', '.xcf.bz2': 'image/x-compressed-xcf', '.xcf.gz': 'image/x-compressed-xcf', '.xhtml': 'application/xhtml+xml', '.xi': 'audio/x-xi', '.xla': 'application/vnd.ms-excel', '.xlam': 'application/vnd.ms-excel.addin.macroenabled.12', '.xlc': 'application/vnd.ms-excel', '.xld': 'application/vnd.ms-excel', '.xlf': 'application/x-xliff', '.xliff': 'application/x-xliff', '.xll': 'application/vnd.ms-excel', '.xlm': 'application/vnd.ms-excel', '.xlr': 'application/vnd.ms-works', '.xls': 'application/vnd.ms-excel', '.xlsb': 'application/vnd.ms-excel.sheet.binary.macroenabled.12', '.xlsm': 'application/vnd.ms-excel.sheet.macroenabled.12', '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', '.xlt': 'application/vnd.ms-excel', '.xltm': 'application/vnd.ms-excel.template.macroenabled.12', '.xltx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.template', '.xlw': 'application/vnd.ms-excel', '.xm': 'audio/x-xm', '.xmf': 'audio/x-xmf', '.xmi': 'text/x-xmi', '.xml': 'application/xml', '.xpi': 'application/x-xpinstall', '.xpm': 'image/x-xpixmap', '.xps': 'application/oxps', '.xsd': 'application/xml', '.xsl': 'application/xslt+xml', '.xslfo': 'text/x-xslfo', '.xslt': 'application/xslt+xml', '.xspf': 'application/xspf+xml', '.xul': 'application/vnd.mozilla.xul+xml', '.xwd': 'image/x-xwindowdump', '.xyz': 'chemical/x-pdb', '.xz': 'application/x-xz', '.yaml': 'application/x-yaml', '.yml': 'application/x-yaml', '.z': 'application/x-compress', '.zabw': 'application/x-abiword', '.zip': 'application/zip', '.zoo': 'application/x-zoo', } def contenttype(filename, default='text/plain'): """ Returns the Content-Type string matching extension of the given filename. """ i = filename.rfind('.') if i >= 0: default = CONTENT_TYPE.get(filename[i:].lower(), default) j = filename.rfind('.', 0, i) if j >= 0: default = CONTENT_TYPE.get(filename[j:].lower(), default) if default.startswith('text/'): default += '; charset=utf-8' return default
Python
#!/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) gluon.rewrite parses incoming URLs and formats outgoing URLs for gluon.html.URL. In addition, it rewrites both incoming and outgoing URLs based on the (optional) user-supplied routes.py, which also allows for rewriting of certain error messages. routes.py supports two styles of URL rewriting, depending on whether 'routers' is defined. Refer to router.example.py and routes.example.py for additional documentation. """ import os import re import logging import traceback import threading import urllib from storage import Storage, List from http import HTTP from fileutils import abspath, read_file from settings import global_settings isdir = os.path.isdir isfile = os.path.isfile exists = os.path.exists pjoin = os.path.join logger = logging.getLogger('web2py.rewrite') THREAD_LOCAL = threading.local() # thread-local storage for routing params regex_at = re.compile(r'(?<!\\)\$[a-zA-Z]\w*') regex_anything = re.compile(r'(?<!\\)\$anything') regex_redirect = re.compile(r'(\d+)->(.*)') regex_full_url = re.compile( r'^(?P<scheme>http|https|HTTP|HTTPS)\://(?P<host>[^/]*)(?P<uri>.*)') regex_version = re.compile(r'^(_[\d]+\.[\d]+\.[\d]+)$') # pattern to replace spaces with underscore in URL # also the html escaped variants '+' and '%20' are covered regex_space = re.compile('(\+|\s|%20)+') # pattern to find valid paths in url /application/controller/... # this could be: # for static pages: # /<b:application>/static/<x:file> # for dynamic pages: # /<a:application>[/<c:controller>[/<f:function>[.<e:ext>][/<s:args>]]] # application, controller, function and ext may only contain [a-zA-Z0-9_] # file and args may also contain '-', '=', '.' and '/' # apps in routes_apps_raw must parse raw_args into args regex_url = re.compile('^/((?P<a>\w+)(/(?P<c>\w+)(/(?P<z>(?P<f>\w+)(\.(?P<e>[\w.]+))?(?P<s>.*)))?)?)?$') regex_args = re.compile('^[/\w@=-]*(\.[/\w@=-]+)*$') def _router_default(): "return new copy of default base router" router = Storage( default_application='init', applications='ALL', default_controller='default', controllers='DEFAULT', default_function='index', functions=dict(), default_language=None, languages=None, root_static=['favicon.ico', 'robots.txt'], map_static=None, domains=None, exclusive_domain=False, map_hyphen=False, acfe_match=r'\w+$', # legal app/ctlr/fcn/ext # # Implementation note: # The file_match & args_match patterns use look-behind to avoid # pathological backtracking from nested patterns. # file_match = r'([-+=@$%\w]|(?<=[-+=@$%\w])[./])*$', # legal static subpath args_match=r'([\w@ -]|(?<=[\w@ -])[.=])*$', # legal arg in args ) return router def _params_default(app=None): "return new copy of default parameters" p = Storage() p.name = app or "BASE" p.default_application = app or "init" p.default_controller = "default" p.default_function = "index" p.routes_app = [] p.routes_in = [] p.routes_out = [] p.routes_onerror = [] p.routes_apps_raw = [] p.error_handler = None p.error_message = '<html><body><h1>%s</h1></body></html>' p.error_message_ticket = \ '<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body><!-- this is junk text else IE does not display the page: ' + ('x' * 512) + ' //--></html>' p.routers = None p.logging = 'off' return p params_apps = dict() params = _params_default(app=None) # regex rewrite parameters THREAD_LOCAL.routes = params # default to base regex rewrite parameters routers = None def log_rewrite(string): "Log rewrite activity under control of routes.py" if params.logging == 'debug': # catch common cases first logger.debug(string) elif params.logging == 'off' or not params.logging: pass elif params.logging == 'print': print string elif params.logging == 'info': logger.info(string) elif params.logging == 'warning': logger.warning(string) elif params.logging == 'error': logger.error(string) elif params.logging == 'critical': logger.critical(string) else: logger.debug(string) ROUTER_KEYS = set( ('default_application', 'applications', 'default_controller', 'controllers', 'default_function', 'functions', 'default_language', 'languages', 'domain', 'domains', 'root_static', 'path_prefix', 'exclusive_domain', 'map_hyphen', 'map_static', 'acfe_match', 'file_match', 'args_match')) ROUTER_BASE_KEYS = set( ('applications', 'default_application', 'domains', 'path_prefix')) # The external interface to rewrite consists of: # # load: load routing configuration file(s) # url_in: parse and rewrite incoming URL # url_out: assemble and rewrite outgoing URL # # THREAD_LOCAL.routes.default_application # THREAD_LOCAL.routes.error_message # THREAD_LOCAL.routes.error_message_ticket # THREAD_LOCAL.routes.try_redirect_on_error # THREAD_LOCAL.routes.error_handler # # filter_url: helper for doctest & unittest # filter_err: helper for doctest & unittest # regex_filter_out: doctest def fixup_missing_path_info(environ): eget = environ.get path_info = eget('PATH_INFO') request_uri = eget('REQUEST_URI') if not path_info and request_uri: # for fcgi, get path_info and # query_string from request_uri items = request_uri.split('?') path_info = environ['PATH_INFO'] = items[0] environ['QUERY_STRING'] = items[1] if len(items) > 1 else '' elif not request_uri: query_string = eget('QUERY_STRING') if query_string: environ['REQUEST_URI'] = '%s?%s' % (path_info, query_string) else: environ['REQUEST_URI'] = path_info if not eget('HTTP_HOST'): environ['HTTP_HOST'] = \ '%s:%s' % (eget('SERVER_NAME'), eget('SERVER_PORT')) def url_in(request, environ): "parse and rewrite incoming URL" if routers: return map_url_in(request, environ) return regex_url_in(request, environ) def url_out(request, environ, application, controller, function, args, other, scheme, host, port): "assemble and rewrite outgoing URL" if routers: acf = map_url_out(request, environ, application, controller, function, args, other, scheme, host, port) url = '%s%s' % (acf, other) else: url = '/%s/%s/%s%s' % (application, controller, function, other) url = regex_filter_out(url, environ) # # fill in scheme and host if absolute URL is requested # scheme can be a string, eg 'http', 'https', 'ws', 'wss' # if host is True or (host is None and (scheme or port is not None)): host = request.env.http_host if not scheme or scheme is True: scheme = request.env.get('wsgi_url_scheme', 'http').lower() \ if request else 'http' if host: host_port = host if not port else host.split(':', 1)[0] + ':%s' % port url = '%s://%s%s' % (scheme, host_port, url) return url def try_rewrite_on_error(http_response, request, environ, ticket=None): """ called from main.wsgibase to rewrite the http response. """ status = int(str(http_response.status).split()[0]) if status >= 399 and THREAD_LOCAL.routes.routes_onerror: keys = set(('%s/%s' % (request.application, status), '%s/*' % (request.application), '*/%s' % (status), '*/*')) for (key, uri) in THREAD_LOCAL.routes.routes_onerror: if key in keys: if uri == '!': # do nothing! return http_response, environ elif '?' in uri: path_info, query_string = uri.split('?', 1) query_string += '&' else: path_info, query_string = uri, '' query_string += \ 'code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ (status, ticket, urllib.quote_plus( request.env.request_uri), request.url) if uri.startswith('http://') or uri.startswith('https://'): # make up a response url = path_info + '?' + query_string message = 'You are being redirected <a href="%s">here</a>' return HTTP(303, message % url, Location=url), environ else: error_raising_path = environ['PATH_INFO'] # Rewrite routes_onerror path. path_info = '/' + path_info.lstrip( '/') # add leading '/' if missing environ['PATH_INFO'] = path_info error_handling_path = \ url_in(request, environ)[2]['PATH_INFO'] # Avoid infinite loop. if error_handling_path != error_raising_path: # wsgibase will be called recursively with the routes_onerror path. environ['PATH_INFO'] = path_info environ['QUERY_STRING'] = query_string environ['WEB2PY_STATUS_CODE'] = status return None, environ # do nothing! return http_response, environ def try_redirect_on_error(http_object, request, ticket=None): "called from main.wsgibase to rewrite the http response" status = int(str(http_object.status).split()[0]) if status > 399 and THREAD_LOCAL.routes.routes_onerror: keys = set(('%s/%s' % (request.application, status), '%s/*' % (request.application), '*/%s' % (status), '*/*')) for (key, redir) in THREAD_LOCAL.routes.routes_onerror: if key in keys: if redir == '!': break elif '?' in redir: url = '%s&code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ (redir, status, ticket, urllib.quote_plus(request.env.request_uri), request.url) else: url = '%s?code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ (redir, status, ticket, urllib.quote_plus(request.env.request_uri), request.url) return HTTP(303, 'You are being redirected <a href="%s">here</a>' % url, Location=url) return http_object def load(routes='routes.py', app=None, data=None, rdict=None): """ load: read (if file) and parse routes store results in params (called from main.py at web2py initialization time) If data is present, it's used instead of the routes.py contents. If rdict is present, it must be a dict to be used for routers (unit test) """ global params global routers if app is None: # reinitialize global params_apps params_apps = dict() params = _params_default(app=None) # regex rewrite parameters THREAD_LOCAL.routes = params # default to base regex rewrite parameters routers = None if isinstance(rdict, dict): symbols = dict(routers=rdict) path = 'rdict' else: if data is not None: path = 'routes' else: if app is None: path = abspath(routes) else: path = abspath('applications', app, routes) if not exists(path): return data = read_file(path).replace('\r\n', '\n') symbols = dict(app=app) try: exec (data + '\n') in symbols except SyntaxError, e: logger.error( '%s has a syntax error and will not be loaded\n' % path + traceback.format_exc()) raise e p = _params_default(app) for sym in ('routes_app', 'routes_in', 'routes_out'): if sym in symbols: for items in symbols[sym]: p[sym].append(compile_regex(*items)) for sym in ('routes_onerror', 'routes_apps_raw', 'error_handler', 'error_message', 'error_message_ticket', 'default_application', 'default_controller', 'default_function', 'logging'): if sym in symbols: p[sym] = symbols[sym] if 'routers' in symbols: p.routers = Storage(symbols['routers']) for key in p.routers: if isinstance(p.routers[key], dict): p.routers[key] = Storage(p.routers[key]) if app is None: params = p # install base rewrite parameters THREAD_LOCAL.routes = params # install default as current routes # # create the BASE router if routers in use # routers = params.routers # establish routers if present if isinstance(routers, dict): routers = Storage(routers) if routers is not None: router = _router_default() if routers.BASE: router.update(routers.BASE) routers.BASE = router # scan each app in applications/ # create a router, if routers are in use # parse the app-specific routes.py if present # all_apps = [] apppath = abspath('applications') for appname in os.listdir(apppath): if not appname.startswith('.') and \ isdir(abspath(apppath, appname)) and \ isdir(abspath(apppath, appname, 'controllers')): all_apps.append(appname) if routers: router = Storage(routers.BASE) # new copy if appname in routers: for key in routers[appname].keys(): if key in ROUTER_BASE_KEYS: raise SyntaxError("BASE-only key '%s' in router '%s'" % (key, appname)) router.update(routers[appname]) routers[appname] = router if exists(abspath('applications', appname, routes)): load(routes, appname) if routers: load_routers(all_apps) else: # app params_apps[app] = p if routers and p.routers: if app in p.routers: routers[app].update(p.routers[app]) log_rewrite('URL rewrite is on. configuration in %s' % path) def compile_regex(k, v, env=None): """ Preprocess and compile the regular expressions in routes_app/in/out The resulting regex will match a pattern of the form: [remote address]:[protocol]://[host]:[method] [path] We allow abbreviated regexes on input; here we try to complete them. """ k0 = k # original k for error reporting # bracket regex in ^...$ if not already done if not k[0] == '^': k = '^%s' % k if not k[-1] == '$': k = '%s$' % k # if there are no :-separated parts, prepend a catch-all for the IP address if k.find(':') < 0: # k = '^.*?:%s' % k[1:] k = '^.*?:https?://[^:/]+:[a-z]+ %s' % k[1:] # if there's no ://, provide a catch-all for the protocol, host & method if k.find('://') < 0: i = k.find(':/') if i < 0: raise SyntaxError("routes pattern syntax error: path needs leading '/' [%s]" % k0) k = r'%s:https?://[^:/]+:[a-z]+ %s' % (k[:i], k[i + 1:]) # $anything -> ?P<anything>.* for item in regex_anything.findall(k): k = k.replace(item, '(?P<anything>.*)') # $a (etc) -> ?P<a>\w+ for item in regex_at.findall(k): k = k.replace(item, r'(?P<%s>\w+)' % item[1:]) # same for replacement pattern, but with \g for item in regex_at.findall(v): v = v.replace(item, r'\g<%s>' % item[1:]) return (re.compile(k, re.DOTALL), v, env or {}) def load_routers(all_apps): "load-time post-processing of routers" for app in routers: # initialize apps with routers that aren't present, # on behalf of unit tests if app not in all_apps: all_apps.append(app) router = Storage(routers.BASE) # new copy if app != 'BASE': keys = set(routers[app]).intersection(ROUTER_BASE_KEYS) if keys: raise SyntaxError("BASE-only key(s) %s in router '%s'" % ( tuple(keys), app)) router.update(routers[app]) routers[app] = router router = routers[app] keys = set(router).difference(ROUTER_KEYS) if keys: raise SyntaxError("unknown key(s) %s in router '%s'" % ( tuple(keys), app)) if not router.controllers: router.controllers = set() elif not isinstance(router.controllers, str): router.controllers = set(router.controllers) if router.languages: router.languages = set(router.languages) else: router.languages = set() if router.functions: if isinstance(router.functions, (set, tuple, list)): functions = set(router.functions) if isinstance(router.default_function, str): functions.add( router.default_function) # legacy compatibility router.functions = {router.default_controller: functions} for controller in router.functions: router.functions[controller] = set( router.functions[controller]) else: router.functions = dict() if app != 'BASE': for base_only in ROUTER_BASE_KEYS: router.pop(base_only, None) if 'domain' in router: routers.BASE.domains[router.domain] = app if isinstance(router.controllers, str) and router.controllers == 'DEFAULT': router.controllers = set() if isdir(abspath('applications', app)): cpath = abspath('applications', app, 'controllers') for cname in os.listdir(cpath): if isfile(abspath(cpath, cname)) and cname.endswith('.py'): router.controllers.add(cname[:-3]) if router.controllers: router.controllers.add('static') router.controllers.add(router.default_controller) if isinstance(routers.BASE.applications, str) and routers.BASE.applications == 'ALL': routers.BASE.applications = list(all_apps) if routers.BASE.applications: routers.BASE.applications = set(routers.BASE.applications) else: routers.BASE.applications = set() for app in routers.keys(): # set router name router = routers[app] router.name = app # compile URL validation patterns router._acfe_match = re.compile(router.acfe_match) router._file_match = re.compile(router.file_match) if router.args_match: router._args_match = re.compile(router.args_match) # convert path_prefix to a list of path elements if router.path_prefix: if isinstance(router.path_prefix, str): router.path_prefix = router.path_prefix.strip('/').split('/') # rewrite BASE.domains as tuples # # key: 'domain[:port]' -> (domain, port) # value: 'application[/controller] -> (application, controller) # (port and controller may be None) # domains = dict() if routers.BASE.domains: for (d, a) in routers.BASE.domains.iteritems(): (domain, app) = (d.strip(':'), a.strip('/')) if ':' in domain: (domain, port) = domain.split(':') else: port = None if '/' in app: (app, ctlr) = app.split('/', 1) else: ctlr = None if ctlr and '/' in ctlr: (ctlr, fcn) = ctlr.split('/') else: fcn = None if app not in all_apps and app not in routers: raise SyntaxError("unknown app '%s' in domains" % app) domains[(domain, port)] = (app, ctlr, fcn) routers.BASE.domains = domains def regex_uri(e, regexes, tag, default=None): "filter incoming URI against a list of regexes" path = e['PATH_INFO'] host = e.get('HTTP_HOST', e.get('SERVER_NAME', 'localhost')).lower() i = host.find(':') if i > 0: host = host[:i] key = '%s:%s://%s:%s %s' % \ (e.get('REMOTE_ADDR', 'localhost'), e.get('wsgi.url_scheme', 'http').lower(), host, e.get('REQUEST_METHOD', 'get').lower(), path) for (regex, value, custom_env) in regexes: if regex.match(key): e.update(custom_env) rewritten = regex.sub(value, key) log_rewrite('%s: [%s] [%s] -> %s' % (tag, key, value, rewritten)) return rewritten log_rewrite('%s: [%s] -> %s (not rewritten)' % (tag, key, default)) return default def regex_select(env=None, app=None, request=None): """ select a set of regex rewrite params for the current request """ if app: THREAD_LOCAL.routes = params_apps.get(app, params) elif env and params.routes_app: if routers: map_url_in(request, env, app=True) else: app = regex_uri(env, params.routes_app, "routes_app") THREAD_LOCAL.routes = params_apps.get(app, params) else: THREAD_LOCAL.routes = params # default to base rewrite parameters log_rewrite("select routing parameters: %s" % THREAD_LOCAL.routes.name) return app # for doctest def regex_filter_in(e): "regex rewrite incoming URL" routes = THREAD_LOCAL.routes query = e.get('QUERY_STRING', None) e['WEB2PY_ORIGINAL_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '') if routes.routes_in: path = regex_uri(e, routes.routes_in, "routes_in", e['PATH_INFO']) rmatch = regex_redirect.match(path) if rmatch: raise HTTP(int(rmatch.group(1)), location=rmatch.group(2)) items = path.split('?', 1) e['PATH_INFO'] = items[0] if len(items) > 1: if query: query = items[1] + '&' + query else: query = items[1] e['QUERY_STRING'] = query e['REQUEST_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '') return e def sluggify(key): return key.lower().replace('.', '_') def invalid_url(routes): raise HTTP(400, routes.error_message % 'invalid request', web2py_error='invalid path') def regex_url_in(request, environ): "rewrite and parse incoming URL" # ################################################## # select application # rewrite URL if routes_in is defined # update request.env # ################################################## regex_select(env=environ, request=request) routes = THREAD_LOCAL.routes if routes.routes_in: environ = regex_filter_in(environ) request.env.update( (k.lower().replace('.', '_'), v) for k, v in environ.iteritems()) # ################################################## # serve if a static file # ################################################## path = request.env.path_info.replace('\\', '/') or '/' path = regex_space.sub('_', path) if path.endswith('/') and len(path) > 1: path = path[:-1] match = regex_url.match(path) if not match: invalid_url(routes) request.raw_args = (match.group('s') or '') if request.raw_args.startswith('/'): request.raw_args = request.raw_args[1:] if match.group('c') == 'static': application = match.group('a') version, filename = None, match.group('z') items = filename.split('/', 1) if regex_version.match(items[0]): version, filename = items static_folder = pjoin(request.env.applications_parent, 'applications', application,'static') static_file = os.path.abspath(pjoin(static_folder,filename)) if not static_file.startswith(static_folder): invalid_url(routes) return (static_file, version, environ) else: # ################################################## # parse application, controller and function # ################################################## request.application = match.group('a') or routes.default_application request.controller = match.group('c') or routes.default_controller request.function = match.group('f') or routes.default_function request.raw_extension = match.group('e') request.extension = request.raw_extension or 'html' if request.application in routes.routes_apps_raw: # application is responsible for parsing args request.args = None elif not regex_args.match(request.raw_args): invalid_url(routes) elif request.raw_args: request.args = List(request.raw_args.split('/')) else: request.args = List([]) return (None, None, environ) def regex_filter_out(url, e=None): "regex rewrite outgoing URL" if not hasattr(THREAD_LOCAL, 'routes'): regex_select() # ensure routes is set (for application threads) routes = THREAD_LOCAL.routes if routers: return url # already filtered if routes.routes_out: items = url.split('?', 1) if e: host = e.get('http_host', 'localhost').lower() i = host.find(':') if i > 0: host = host[:i] items[0] = '%s:%s://%s:%s %s' % \ (e.get('remote_addr', ''), e.get('wsgi_url_scheme', 'http').lower(), host, e.get('request_method', 'get').lower(), items[0]) else: items[0] = ':http://localhost:get %s' % items[0] for (regex, value, tmp) in routes.routes_out: if regex.match(items[0]): rewritten = '?'.join([regex.sub(value, items[0])] + items[1:]) log_rewrite('routes_out: [%s] -> %s' % (url, rewritten)) return rewritten log_rewrite('routes_out: [%s] not rewritten' % url) return url def filter_url(url, method='get', remote='0.0.0.0', out=False, app=False, lang=None, domain=(None, None), env=False, scheme=None, host=None, port=None): """ doctest/unittest interface to regex_filter_in() and regex_filter_out() """ match = regex_full_url.match(url) urlscheme = match.group('scheme').lower() urlhost = match.group('host').lower() uri = match.group('uri') k = uri.find('?') if k < 0: k = len(uri) if isinstance(domain, str): domain = (domain, None) (path_info, query_string) = (uri[:k], uri[k + 1:]) path_info = urllib.unquote(path_info) # simulate server e = { 'REMOTE_ADDR': remote, 'REQUEST_METHOD': method, 'wsgi.url_scheme': urlscheme, 'HTTP_HOST': urlhost, 'REQUEST_URI': uri, 'PATH_INFO': path_info, 'QUERY_STRING': query_string, #for filter_out request.env use lowercase 'remote_addr': remote, 'request_method': method, 'wsgi_url_scheme': urlscheme, 'http_host': urlhost } request = Storage() e["applications_parent"] = global_settings.applications_parent request.env = Storage(e) request.uri_language = lang # determine application only # if app: if routers: return map_url_in(request, e, app=True) return regex_select(e) # rewrite outbound URL # if out: (request.env.domain_application, request.env.domain_controller) = domain items = path_info.lstrip('/').split('/') if items[-1] == '': items.pop() # adjust trailing empty args assert len(items) >= 3, "at least /a/c/f is required" a = items.pop(0) c = items.pop(0) f = items.pop(0) if not routers: return regex_filter_out(uri, e) acf = map_url_out( request, None, a, c, f, items, None, scheme, host, port) if items: url = '%s/%s' % (acf, '/'.join(items)) if items[-1] == '': url += '/' else: url = acf if query_string: url += '?' + query_string return url # rewrite inbound URL # (static, version, e) = url_in(request, e) if static: return static result = "/%s/%s/%s" % ( request.application, request.controller, request.function) if request.extension and request.extension != 'html': result += ".%s" % request.extension if request.args: result += " %s" % request.args if e['QUERY_STRING']: result += " ?%s" % e['QUERY_STRING'] if request.uri_language: result += " (%s)" % request.uri_language if env: return request.env return result def filter_err(status, application='app', ticket='tkt'): "doctest/unittest interface to routes_onerror" routes = THREAD_LOCAL.routes if status > 399 and routes.routes_onerror: keys = set(('%s/%s' % (application, status), '%s/*' % (application), '*/%s' % (status), '*/*')) for (key, redir) in routes.routes_onerror: if key in keys: if redir == '!': break elif '?' in redir: url = redir + '&' + 'code=%s&ticket=%s' % (status, ticket) else: url = redir + '?' + 'code=%s&ticket=%s' % (status, ticket) return url # redirection return status # no action # router support # class MapUrlIn(object): "logic for mapping incoming URLs" def __init__(self, request=None, env=None): "initialize a map-in object" self.request = request self.env = env self.router = None self.application = None self.language = None self.controller = None self.function = None self.extension = 'html' self.controllers = set() self.functions = dict() self.languages = set() self.default_language = None self.map_hyphen = False self.exclusive_domain = False path = self.env['PATH_INFO'] self.query = self.env.get('QUERY_STRING', None) path = path.lstrip('/') self.env['PATH_INFO'] = '/' + path self.env['WEB2PY_ORIGINAL_URI'] = self.env['PATH_INFO'] + ( self.query and ('?' + self.query) or '') # to handle empty args, strip exactly one trailing slash, if present # .../arg1// represents one trailing empty arg # if path.endswith('/'): path = path[:-1] self.args = List(path and path.split('/') or []) # see http://www.python.org/dev/peps/pep-3333/#url-reconstruction for URL composition self.remote_addr = self.env.get('REMOTE_ADDR', 'localhost') self.scheme = self.env.get('wsgi.url_scheme', 'http').lower() self.method = self.env.get('REQUEST_METHOD', 'get').lower() (self.host, self.port) = (self.env.get('HTTP_HOST'), None) if not self.host: (self.host, self.port) = ( self.env.get('SERVER_NAME'), self.env.get('SERVER_PORT')) if not self.host: (self.host, self.port) = ('localhost', '80') if ':' in self.host: (self.host, self.port) = self.host.rsplit(':',1) # for ipv6 support if not self.port: self.port = '443' if self.scheme == 'https' else '80' def map_prefix(self): "strip path prefix, if present in its entirety" prefix = routers.BASE.path_prefix if prefix: prefixlen = len(prefix) if prefixlen > len(self.args): return for i in xrange(prefixlen): if prefix[i] != self.args[i]: return # prefix didn't match self.args = List(self.args[prefixlen:]) # strip the prefix def map_app(self): "determine application name" base = routers.BASE # base router self.domain_application = None self.domain_controller = None self.domain_function = None arg0 = self.harg0 if not base.exclusive_domain and base.applications and arg0 in base.applications: self.application = arg0 elif not base.exclusive_domain and arg0 and not base.applications: self.application = arg0 elif (self.host, self.port) in base.domains: (self.application, self.domain_controller, self.domain_function) = base.domains[(self.host, self.port)] self.env['domain_application'] = self.application self.env['domain_controller'] = self.domain_controller self.env['domain_function'] = self.domain_function elif (self.host, None) in base.domains: (self.application, self.domain_controller, self.domain_function) = base.domains[(self.host, None)] self.env['domain_application'] = self.application self.env['domain_controller'] = self.domain_controller self.env['domain_function'] = self.domain_function elif base.applications and arg0 in base.applications: self.application = arg0 elif arg0 and not base.applications: self.application = arg0 else: self.application = base.default_application or '' self.pop_arg_if(self.application == arg0) if not base._acfe_match.match(self.application): raise HTTP( 400, THREAD_LOCAL.routes.error_message % 'invalid request', web2py_error="invalid application: '%s'" % self.application) if self.application not in routers and \ (self.application != THREAD_LOCAL.routes.default_application or self.application == 'welcome'): raise HTTP( 400, THREAD_LOCAL.routes.error_message % 'invalid request', web2py_error="unknown application: '%s'" % self.application) # set the application router # log_rewrite("select application=%s" % self.application) self.request.application = self.application if self.application not in routers: self.router = routers.BASE # support gluon.main.wsgibase init->welcome else: self.router = routers[self.application] # application router self.controllers = self.router.controllers self.default_controller = self.domain_controller or self.router.default_controller self.functions = self.router.functions self.languages = self.router.languages self.default_language = self.router.default_language self.map_hyphen = self.router.map_hyphen self.exclusive_domain = self.router.exclusive_domain self._acfe_match = self.router._acfe_match self.file_match = self.router.file_match self._file_match = self.router._file_match self._args_match = self.router._args_match def map_root_static(self): ''' handle root-static files (no hyphen mapping) a root-static file is one whose incoming URL expects it to be at the root, typically robots.txt & favicon.ico ''' if len(self.args) == 1 and self.arg0 in self.router.root_static: self.controller = self.request.controller = 'static' root_static_file = pjoin(self.request.env.applications_parent, 'applications', self.application, self.controller, self.arg0) log_rewrite("route: root static=%s" % root_static_file) return root_static_file, None return None, None def map_language(self): "handle language (no hyphen mapping)" arg0 = self.arg0 # no hyphen mapping if arg0 and self.languages and arg0 in self.languages: self.language = arg0 else: self.language = self.default_language if self.language: log_rewrite("route: language=%s" % self.language) self.pop_arg_if(self.language == arg0) arg0 = self.arg0 def map_controller(self): "identify controller" # handle controller # arg0 = self.harg0 # map hyphens if not arg0 or (self.controllers and arg0 not in self.controllers): self.controller = self.default_controller or '' else: self.controller = arg0 self.pop_arg_if(arg0 == self.controller) log_rewrite("route: controller=%s" % self.controller) if not self.router._acfe_match.match(self.controller): raise HTTP( 400, THREAD_LOCAL.routes.error_message % 'invalid request', web2py_error='invalid controller') def map_static(self): ''' handle static files file_match but no hyphen mapping ''' if self.controller != 'static': return None, None version = regex_version.match(self.args(0)) if self.args and version: file = '/'.join(self.args[1:]) else: file = '/'.join(self.args) if len(self.args) == 0: bad_static = True # require a file name elif '/' in self.file_match: # match the path bad_static = not self.router._file_match.match(file) else: # match path elements bad_static = False for name in self.args: bad_static = bad_static or name in ( '', '.', '..') or not self.router._file_match.match(name) if bad_static: log_rewrite('bad static path=%s' % file) raise HTTP(400, THREAD_LOCAL.routes.error_message % 'invalid request', web2py_error='invalid static file') # # support language-specific static subdirectories, # eg /appname/en/static/filename => applications/appname/static/en/filename # if language-specific file doesn't exist, try same file in static # if self.language: static_file = pjoin(self.request.env.applications_parent, 'applications', self.application, 'static', self.language, file) if not self.language or not isfile(static_file): static_file = pjoin(self.request.env.applications_parent, 'applications', self.application, 'static', file) self.extension = None log_rewrite("route: static=%s" % static_file) return static_file, version def map_function(self): "handle function.extension" arg0 = self.harg0 # map hyphens functions = self.functions.get(self.controller, set()) if isinstance(self.router.default_function, dict): default_function = self.router.default_function.get( self.controller, None) else: default_function = self.router.default_function # str or None default_function = self.domain_function or default_function if not arg0 or functions and arg0 not in functions: self.function = default_function or "" self.pop_arg_if(arg0 and self.function == arg0) else: func_ext = arg0.split('.') if len(func_ext) > 1: self.function = func_ext[0] self.extension = func_ext[-1] else: self.function = arg0 self.pop_arg_if(True) log_rewrite( "route: function.ext=%s.%s" % (self.function, self.extension)) if not self.router._acfe_match.match(self.function): raise HTTP( 400, THREAD_LOCAL.routes.error_message % 'invalid request', web2py_error='invalid function') if self.extension and not self.router._acfe_match.match(self.extension): raise HTTP( 400, THREAD_LOCAL.routes.error_message % 'invalid request', web2py_error='invalid extension') def validate_args(self): ''' check args against validation pattern ''' for arg in self.args: if not self.router._args_match.match(arg): raise HTTP( 400, THREAD_LOCAL.routes.error_message % 'invalid request', web2py_error='invalid arg <%s>' % arg) def sluggify(self): "" self.request.env.update( (k.lower().replace('.', '_'), v) for k, v in self.env.iteritems()) def update_request(self): ''' update request from self build env.request_uri make lower-case versions of http headers in env ''' self.request.application = self.application self.request.controller = self.controller self.request.function = self.function self.request.extension = self.extension self.request.args = self.args if self.language: self.request.uri_language = self.language uri = '/%s/%s' % (self.controller, self.function) app = self.application if self.map_hyphen: uri = uri.replace('_', '-') app = app.replace('_', '-') if self.extension and self.extension != 'html': uri += '.' + self.extension if self.language: uri = '/%s%s' % (self.language, uri) uri = '/%s%s%s%s' % ( app, uri, urllib.quote('/' + '/'.join( str(x) for x in self.args)) if self.args else '', ('?' + self.query) if self.query else '') self.env['REQUEST_URI'] = uri self.sluggify() @property def arg0(self): "return first arg" return self.args(0) @property def harg0(self): "return first arg with optional hyphen mapping" if self.map_hyphen and self.args(0): return self.args(0).replace('-', '_') return self.args(0) def pop_arg_if(self, dopop): "conditionally remove first arg and return new first arg" if dopop: self.args.pop(0) class MapUrlOut(object): "logic for mapping outgoing URLs" def __init__(self, request, env, application, controller, function, args, other, scheme, host, port): "initialize a map-out object" self.default_application = routers.BASE.default_application if application in routers: self.router = routers[application] else: self.router = routers.BASE self.request = request self.env = env self.application = application self.controller = controller self.is_static = ( controller == 'static' or controller.startswith('static/')) self.function = function self.args = args self.other = other self.scheme = scheme self.host = host self.port = port self.applications = routers.BASE.applications self.controllers = self.router.controllers self.functions = self.router.functions.get(self.controller, set()) self.languages = self.router.languages self.default_language = self.router.default_language self.exclusive_domain = self.router.exclusive_domain self.map_hyphen = self.router.map_hyphen self.map_static = self.router.map_static self.path_prefix = routers.BASE.path_prefix self.domain_application = request and self.request.env.domain_application self.domain_controller = request and self.request.env.domain_controller if isinstance(self.router.default_function, dict): self.default_function = self.router.default_function.get( self.controller, None) else: self.default_function = self.router.default_function if (self.router.exclusive_domain and self.domain_application and self.domain_application != self.application and not self.host): raise SyntaxError('cross-domain conflict: must specify host') lang = request and request.uri_language if lang and self.languages and lang in self.languages: self.language = lang else: self.language = None self.omit_application = False self.omit_language = False self.omit_controller = False self.omit_function = False def omit_lang(self): "omit language if possible" if not self.language or self.language == self.default_language: self.omit_language = True def omit_acf(self): "omit what we can of a/c/f" router = self.router # Handle the easy no-args case of tail-defaults: /a/c /a / # if not self.args and self.function == self.default_function: self.omit_function = True if self.controller == router.default_controller: self.omit_controller = True if self.application == self.default_application: self.omit_application = True # omit default application # (which might be the domain default application) # default_application = self.domain_application or self.default_application if self.application == default_application: self.omit_application = True # omit controller if default controller # default_controller = ((self.application == self.domain_application) and self.domain_controller) or router.default_controller or '' if self.controller == default_controller: self.omit_controller = True # omit function if possible # if self.functions and self.function in self.functions and self.function == self.default_function: self.omit_function = True # prohibit ambiguous cases # # because we presume the lang string to be unambiguous, its presence protects application omission # if self.exclusive_domain: applications = [self.domain_application] else: applications = self.applications if self.omit_language: if not applications or self.controller in applications: self.omit_application = False if self.omit_application: if not applications or self.function in applications: self.omit_controller = False if not self.controllers or self.function in self.controllers: self.omit_controller = False if self.args: if self.args[0] in self.functions or self.args[0] in self.controllers or self.args[0] in applications: self.omit_function = False if self.omit_controller: if self.function in self.controllers or self.function in applications: self.omit_controller = False if self.omit_application: if self.controller in applications: self.omit_application = False # handle static as a special case # (easier for external static handling) # if self.is_static: if not self.map_static: self.omit_application = False if self.language: self.omit_language = False self.omit_controller = False self.omit_function = False def build_acf(self): "build acf from components" acf = '' if self.map_hyphen: self.application = self.application.replace('_', '-') self.controller = self.controller.replace('_', '-') if self.controller != 'static' and not self.controller.startswith('static/'): self.function = self.function.replace('_', '-') if not self.omit_application: acf += '/' + self.application # handle case of flipping lang/static/file to static/lang/file for external rewrite if self.is_static and self.map_static is False and not self.omit_language: acf += '/' + self.controller + '/' + self.language else: if not self.omit_language: acf += '/' + self.language if not self.omit_controller: acf += '/' + self.controller if not self.omit_function: acf += '/' + self.function if self.path_prefix: acf = '/' + '/'.join(self.path_prefix) + acf if self.args: return acf return acf or '/' def acf(self): "convert components to /app/lang/controller/function" if not routers: return None # use regex filter self.omit_lang() # try to omit language self.omit_acf() # try to omit a/c/f return self.build_acf() # build and return the /a/lang/c/f string def map_url_in(request, env, app=False): "route incoming URL" # initialize router-url object # THREAD_LOCAL.routes = params # default to base routes map = MapUrlIn(request=request, env=env) map.sluggify() map.map_prefix() # strip prefix if present map.map_app() # determine application # configure THREAD_LOCAL.routes for error rewrite # if params.routes_app: THREAD_LOCAL.routes = params_apps.get(app, params) if app: return map.application root_static_file, version = map.map_root_static( ) # handle root-static files if root_static_file: map.update_request() return (root_static_file, version, map.env) # handle mapping of lang/static to static/lang in externally-rewritten URLs # in case we have to handle them ourselves if map.languages and map.map_static is False and map.arg0 == 'static' and map.args(1) in map.languages: map.map_controller() map.map_language() else: map.map_language() map.map_controller() static_file, version = map.map_static() if static_file: map.update_request() return (static_file, version, map.env) map.map_function() map.validate_args() map.update_request() return (None, None, map.env) def map_url_out(request, env, application, controller, function, args, other, scheme, host, port): ''' supply /a/c/f (or /a/lang/c/f) portion of outgoing url The basic rule is that we can only make transformations that map_url_in can reverse. Suppose that the incoming arguments are a,c,f,args,lang and that the router defaults are da, dc, df, dl. We can perform these transformations trivially if args=[] and lang=None or dl: /da/dc/df => / /a/dc/df => /a /a/c/df => /a/c We would also like to be able to strip the default application or application/controller from URLs with function/args present, thus: /da/c/f/args => /c/f/args /da/dc/f/args => /f/args We use [applications] and [controllers] and {functions} to suppress ambiguous omissions. We assume that language names do not collide with a/c/f names. ''' map = MapUrlOut(request, env, application, controller, function, args, other, scheme, host, port) return map.acf() def get_effective_router(appname): "return a private copy of the effective router for the specified application" if not routers or appname not in routers: return None return Storage(routers[appname]) # return a copy
Python
from test_http import * from test_cache import * from test_dal import * from test_html import * from test_is_url import * from test_languages import * from test_router import * from test_routes import * from test_storage import * from test_template import * from test_utils import * from test_contribs import * from test_web import * import sys if sys.version[:3] == '2.7': from test_old_doctests import *
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ from SimpleXMLRPCServer import SimpleXMLRPCDispatcher def handler(request, response, methods): response.session_id = None # no sessions for xmlrpc dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None) for method in methods: dispatcher.register_function(method) dispatcher.register_introspection_functions() response.headers['Content-Type'] = 'text/xml' dispatch = getattr(dispatcher, '_dispatch', None) return dispatcher._marshaled_dispatch(request.body.read(), dispatch)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and Limodou <limodou@gmail.com>. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) This makes uses of the pywin32 package (http://sourceforge.net/projects/pywin32/). You do not need to install this package to use web2py. """ import time import os import sys import traceback try: import win32serviceutil import win32service import win32event except: if os.name == 'nt': print "Warning, winservice is unable to install the Mark Hammond Win32 extensions" import servicemanager import _winreg from fileutils import up __all__ = ['web2py_windows_service_handler'] class Service(win32serviceutil.ServiceFramework): _svc_name_ = '_unNamed' _svc_display_name_ = '_Service Template' def __init__(self, *args): win32serviceutil.ServiceFramework.__init__(self, *args) self.stop_event = win32event.CreateEvent(None, 0, 0, None) def log(self, msg): servicemanager.LogInfoMsg(str(msg)) def SvcDoRun(self): self.ReportServiceStatus(win32service.SERVICE_START_PENDING) try: self.ReportServiceStatus(win32service.SERVICE_RUNNING) self.start() win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE) except: self.log(traceback.format_exc(sys.exc_info)) self.SvcStop() self.ReportServiceStatus(win32service.SERVICE_STOPPED) def SvcStop(self): self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) try: self.stop() except: self.log(traceback.format_exc(sys.exc_info)) win32event.SetEvent(self.stop_event) self.ReportServiceStatus(win32service.SERVICE_STOPPED) # to be overridden def start(self): pass # to be overridden def stop(self): pass class Web2pyService(Service): _svc_name_ = 'web2py' _svc_display_name_ = 'web2py Service' _exe_args_ = 'options' server = None def chdir(self): try: h = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'SYSTEM\CurrentControlSet\Services\%s' % self._svc_name_) try: cls = _winreg.QueryValue(h, 'PythonClass') finally: _winreg.CloseKey(h) dir = os.path.dirname(cls) os.chdir(dir) from gluon.settings import global_settings global_settings.gluon_parent = dir return True except: self.log("Can't change to web2py working path; server is stopped") return False def start(self): self.log('web2py server starting') if not self.chdir(): return if len(sys.argv) == 2: opt_mod = sys.argv[1] else: opt_mod = self._exe_args_ options = __import__(opt_mod, [], [], '') if True: # legacy support for old options files, which have only (deprecated) numthreads if hasattr(options, 'numthreads') and not hasattr(options, 'minthreads'): options.minthreads = options.numthreads if not hasattr(options, 'minthreads'): options.minthreads = None if not hasattr(options, 'maxthreads'): options.maxthreads = None import main self.server = main.HttpServer( ip=options.ip, port=options.port, password=options.password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_filename=options.profiler_filename, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=options.request_queue_size, timeout=options.timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder ) try: from rewrite import load load() self.server.start() except: # self.server.stop() self.server = None raise def stop(self): self.log('web2py server stopping') if not self.chdir(): return if self.server: self.server.stop() time.sleep(1) class Web2pyCronService(Web2pyService): _svc_name_ = 'web2py_cron' _svc_display_name_ = 'web2py Cron Service' _exe_args_ = 'options' def start(self): import newcron import global_settings self.log('web2py server starting') if not self.chdir(): return if len(sys.argv) == 2: opt_mod = sys.argv[1] else: opt_mod = self._exe_args_ options = __import__(opt_mod, [], [], '') global_settings.global_settings.web2py_crontype = 'external' if options.scheduler: # -K apps = [app.strip() for app in options.scheduler.split( ',') if check_existent_app(options, app.strip())] else: apps = None self.extcron = newcron.extcron(options.folder, apps=apps) try: self.extcron.start() except: # self.server.stop() self.extcron = None raise def stop(self): self.log('web2py cron stopping') if not self.chdir(): return if self.extcron: self.extcron.join() def register_service_handler(argv=None, opt_file='options', cls=Web2pyService): path = os.path.dirname(__file__) web2py_path = up(path) if web2py_path.endswith('.zip'): # in case bianry distro 'library.zip' web2py_path = os.path.dirname(web2py_path) os.chdir(web2py_path) classstring = os.path.normpath( os.path.join(web2py_path, 'gluon.winservice.'+cls.__name__)) if opt_file: cls._exe_args_ = opt_file win32serviceutil.HandleCommandLine( cls, serviceClassString=classstring, argv=['', 'install']) win32serviceutil.HandleCommandLine( cls, serviceClassString=classstring, argv=argv) if __name__ == '__main__': register_service_handler(cls=Web2pyService) register_service_handler(cls=Web2pyCronService)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import cgi import os import re import copy import types import urllib import base64 import sanitizer import itertools import decoder import copy_reg import cPickle import marshal from HTMLParser import HTMLParser from htmlentitydefs import name2codepoint from storage import Storage from utils import web2py_uuid, simple_hash, compare from highlight import highlight regex_crlf = re.compile('\r|\n') join = ''.join # name2codepoint is incomplete respect to xhtml (and xml): 'apos' is missing. entitydefs = dict(map(lambda ( k, v): (k, unichr(v).encode('utf-8')), name2codepoint.iteritems())) entitydefs.setdefault('apos', u"'".encode('utf-8')) __all__ = [ 'A', 'B', 'BEAUTIFY', 'BODY', 'BR', 'BUTTON', 'CENTER', 'CAT', 'CODE', 'COL', 'COLGROUP', 'DIV', 'EM', 'EMBED', 'FIELDSET', 'FORM', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'HEAD', 'HR', 'HTML', 'I', 'IFRAME', 'IMG', 'INPUT', 'LABEL', 'LEGEND', 'LI', 'LINK', 'OL', 'UL', 'MARKMIN', 'MENU', 'META', 'OBJECT', 'ON', 'OPTION', 'P', 'PRE', 'SCRIPT', 'OPTGROUP', 'SELECT', 'SPAN', 'STRONG', 'STYLE', 'TABLE', 'TAG', 'TD', 'TEXTAREA', 'TH', 'THEAD', 'TBODY', 'TFOOT', 'TITLE', 'TR', 'TT', 'URL', 'XHTML', 'XML', 'xmlescape', 'embed64', ] def xmlescape(data, quote=True): """ returns an escaped string of the provided data :param data: the data to be escaped :param quote: optional (default False) """ # first try the xml function if hasattr(data, 'xml') and callable(data.xml): return data.xml() # otherwise, make it a string if not isinstance(data, (str, unicode)): data = str(data) elif isinstance(data, unicode): data = data.encode('utf8', 'xmlcharrefreplace') # ... and do the escaping data = cgi.escape(data, quote).replace("'", "&#x27;") return data def call_as_list(f,*a,**b): if not isinstance(f, (list,tuple)): f = [f] for item in f: item(*a,**b) def truncate_string(text, length, dots='...'): text = text.decode('utf-8') if len(text) > length: text = text[:length - len(dots)].encode('utf-8') + dots return text def URL( a=None, c=None, f=None, r=None, args=None, vars=None, anchor='', extension=None, env=None, hmac_key=None, hash_vars=True, salt=None, user_signature=None, scheme=None, host=None, port=None, encode_embedded_slash=False, url_encode=True ): """ generate a URL example:: >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':1, 'q':2}, anchor='1')) '/a/c/f/x/y/z?p=1&q=2#1' >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':(1,3), 'q':2}, anchor='1')) '/a/c/f/x/y/z?p=1&p=3&q=2#1' >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':(3,1), 'q':2}, anchor='1')) '/a/c/f/x/y/z?p=3&p=1&q=2#1' >>> str(URL(a='a', c='c', f='f', anchor='1+2')) '/a/c/f#1%2B2' >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], ... vars={'p':(1,3), 'q':2}, anchor='1', hmac_key='key')) '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f#1' >>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z'])) '/a/c/f/w/x/y/z' >>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z'], encode_embedded_slash=True)) '/a/c/f/w%2Fx/y%2Fz' >>> str(URL(a='a', c='c', f='f', args=['%(id)d'], url_encode=False)) '/a/c/f/%(id)d' >>> str(URL(a='a', c='c', f='f', args=['%(id)d'], url_encode=True)) '/a/c/f/%25%28id%29d' >>> str(URL(a='a', c='c', f='f', vars={'id' : '%(id)d' }, url_encode=False)) '/a/c/f?id=%(id)d' >>> str(URL(a='a', c='c', f='f', vars={'id' : '%(id)d' }, url_encode=True)) '/a/c/f?id=%25%28id%29d' >>> str(URL(a='a', c='c', f='f', anchor='%(id)d', url_encode=False)) '/a/c/f#%(id)d' >>> str(URL(a='a', c='c', f='f', anchor='%(id)d', url_encode=True)) '/a/c/f#%25%28id%29d' generates a url '/a/c/f' corresponding to application a, controller c and function f. If r=request is passed, a, c, f are set, respectively, to r.application, r.controller, r.function. The more typical usage is: URL(r=request, f='index') that generates a url for the index function within the present application and controller. :param a: application (default to current if r is given) :param c: controller (default to current if r is given) :param f: function (default to current if r is given) :param r: request (optional) :param args: any arguments (optional) :param vars: any variables (optional) :param anchor: anchorname, without # (optional) :param hmac_key: key to use when generating hmac signature (optional) :param hash_vars: which of the vars to include in our hmac signature True (default) - hash all vars, False - hash none of the vars, iterable - hash only the included vars ['key1','key2'] :param scheme: URI scheme (True, 'http' or 'https', etc); forces absolute URL (optional) :param host: string to force absolute URL with host (True means http_host) :param port: optional port number (forces absolute URL) :raises SyntaxError: when no application, controller or function is available :raises SyntaxError: when a CRLF is found in the generated url """ from rewrite import url_out # done here in case used not-in web2py if args in (None, []): args = [] vars = vars or {} application = None controller = None function = None if not isinstance(args, (list, tuple)): args = [args] if not r: if a and not c and not f: (f, a, c) = (a, c, f) elif a and c and not f: (c, f, a) = (a, c, f) from globals import current if hasattr(current, 'request'): r = current.request if r: application = r.application controller = r.controller function = r.function env = r.env if extension is None and r.extension != 'html': extension = r.extension if a: application = a if c: controller = c if f: if not isinstance(f, str): if hasattr(f, '__name__'): function = f.__name__ else: raise SyntaxError( 'when calling URL, function or function name required') elif '/' in f: if f.startswith("/"): f = f[1:] items = f.split('/') function = f = items[0] args = items[1:] + args else: function = f # if the url gets a static resource, don't force extention if controller == 'static': extension = None if '.' in function: function, extension = function.rsplit('.', 1) function2 = '%s.%s' % (function, extension or 'html') if not (application and controller and function): raise SyntaxError('not enough information to build the url (%s %s %s)' % (application, controller, function)) if args: if url_encode: if encode_embedded_slash: other = '/' + '/'.join([urllib.quote(str( x), '') for x in args]) else: other = args and urllib.quote( '/' + '/'.join([str(x) for x in args])) else: other = args and ('/' + '/'.join([str(x) for x in args])) else: other = '' if other.endswith('/'): other += '/' # add trailing slash to make last trailing empty arg explicit list_vars = [] for (key, vals) in sorted(vars.items()): if key == '_signature': continue if not isinstance(vals, (list, tuple)): vals = [vals] for val in vals: list_vars.append((key, val)) if user_signature: from globals import current if current.session.auth: hmac_key = current.session.auth.hmac_key if hmac_key: # generate an hmac signature of the vars & args so can later # verify the user hasn't messed with anything h_args = '/%s/%s/%s%s' % (application, controller, function2, other) # how many of the vars should we include in our hash? if hash_vars is True: # include them all h_vars = list_vars elif hash_vars is False: # include none of them h_vars = '' else: # include just those specified if hash_vars and not isinstance(hash_vars, (list, tuple)): hash_vars = [hash_vars] h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars] # re-assembling the same way during hash authentication message = h_args + '?' + urllib.urlencode(sorted(h_vars)) sig = simple_hash( message, hmac_key or '', salt or '', digest_alg='sha1') # add the signature into vars list_vars.append(('_signature', sig)) if list_vars: if url_encode: other += '?%s' % urllib.urlencode(list_vars) else: other += '?%s' % '&'.join(['%s=%s' % var[:2] for var in list_vars]) if anchor: if url_encode: other += '#' + urllib.quote(str(anchor)) else: other += '#' + (str(anchor)) if extension: function += '.' + extension if regex_crlf.search(join([application, controller, function, other])): raise SyntaxError('CRLF Injection Detected') url = url_out(r, env, application, controller, function, args, other, scheme, host, port) return url def verifyURL(request, hmac_key=None, hash_vars=True, salt=None, user_signature=None): """ Verifies that a request's args & vars have not been tampered with by the user :param request: web2py's request object :param hmac_key: the key to authenticate with, must be the same one previously used when calling URL() :param hash_vars: which vars to include in our hashing. (Optional) Only uses the 1st value currently True (or undefined) means all, False none, an iterable just the specified keys do not call directly. Use instead: URL.verify(hmac_key='...') the key has to match the one used to generate the URL. >>> r = Storage() >>> gv = Storage(p=(1,3),q=2,_signature='a32530f0d0caa80964bb92aad2bedf8a4486a31f') >>> r.update(dict(application='a', controller='c', function='f', extension='html')) >>> r['args'] = ['x', 'y', 'z'] >>> r['get_vars'] = gv >>> verifyURL(r, 'key') True >>> verifyURL(r, 'kay') False >>> r.get_vars.p = (3, 1) >>> verifyURL(r, 'key') True >>> r.get_vars.p = (3, 2) >>> verifyURL(r, 'key') False """ if not '_signature' in request.get_vars: return False # no signature in the request URL # check if user_signature requires if user_signature: from globals import current if not current.session or not current.session.auth: return False hmac_key = current.session.auth.hmac_key if not hmac_key: return False # get our sig from request.get_vars for later comparison original_sig = request.get_vars._signature # now generate a new hmac for the remaining args & vars vars, args = request.get_vars, request.args # remove the signature var since it was not part of our signed message request.get_vars.pop('_signature') # join all the args & vars into one long string # always include all of the args other = args and urllib.quote('/' + '/'.join([str(x) for x in args])) or '' h_args = '/%s/%s/%s.%s%s' % (request.application, request.controller, request.function, request.extension, other) # but only include those vars specified (allows more flexibility for use with # forms or ajax) list_vars = [] for (key, vals) in sorted(vars.items()): if not isinstance(vals, (list, tuple)): vals = [vals] for val in vals: list_vars.append((key, val)) # which of the vars are to be included? if hash_vars is True: # include them all h_vars = list_vars elif hash_vars is False: # include none of them h_vars = '' else: # include just those specified # wrap in a try - if the desired vars have been removed it'll fail try: if hash_vars and not isinstance(hash_vars, (list, tuple)): hash_vars = [hash_vars] h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars] except: # user has removed one of our vars! Immediate fail return False # build the full message string with both args & vars message = h_args + '?' + urllib.urlencode(sorted(h_vars)) # hash with the hmac_key provided sig = simple_hash(message, str(hmac_key), salt or '', digest_alg='sha1') # put _signature back in get_vars just in case a second call to URL.verify is performed # (otherwise it'll immediately return false) request.get_vars['_signature'] = original_sig # return whether or not the signature in the request matched the one we just generated # (I.E. was the message the same as the one we originally signed) return compare(original_sig, sig) URL.verify = verifyURL ON = True class XmlComponent(object): """ Abstract root for all Html components """ # TODO: move some DIV methods to here def xml(self): raise NotImplementedError def __mul__(self, n): return CAT(*[self for i in range(n)]) def __add__(self, other): if isinstance(self, CAT): components = self.components else: components = [self] if isinstance(other, CAT): components += other.components else: components += [other] return CAT(*components) def add_class(self, name): """ add a class to _class attribute """ c = self['_class'] classes = (set(c.split()) if c else set()) | set(name.split()) self['_class'] = ' '.join(classes) if classes else None return self def remove_class(self, name): """ remove a class from _class attribute """ c = self['_class'] classes = (set(c.split()) if c else set()) - set(name.split()) self['_class'] = ' '.join(classes) if classes else None return self class XML(XmlComponent): """ use it to wrap a string that contains XML/HTML so that it will not be escaped by the template example: >>> XML('<h1>Hello</h1>').xml() '<h1>Hello</h1>' """ def __init__( self, text, sanitize=False, permitted_tags=[ 'a', 'b', 'blockquote', 'br/', 'i', 'li', 'ol', 'ul', 'p', 'cite', 'code', 'pre', 'img/', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'table', 'tr', 'td', 'div', 'strong','span', ], allowed_attributes={ 'a': ['href', 'title', 'target'], 'img': ['src', 'alt'], 'blockquote': ['type'], 'td': ['colspan'], }, ): """ :param text: the XML text :param sanitize: sanitize text using the permitted tags and allowed attributes (default False) :param permitted_tags: list of permitted tags (default: simple list of tags) :param allowed_attributes: dictionary of allowed attributed (default for A, IMG and BlockQuote). The key is the tag; the value is a list of allowed attributes. """ if sanitize: text = sanitizer.sanitize(text, permitted_tags, allowed_attributes) if isinstance(text, unicode): text = text.encode('utf8', 'xmlcharrefreplace') elif not isinstance(text, str): text = str(text) self.text = text def xml(self): return self.text def __str__(self): return self.text def __add__(self, other): return '%s%s' % (self, other) def __radd__(self, other): return '%s%s' % (other, self) def __cmp__(self, other): return cmp(str(self), str(other)) def __hash__(self): return hash(str(self)) # why was this here? Break unpickling in sessions # def __getattr__(self, name): # return getattr(str(self), name) def __getitem__(self, i): return str(self)[i] def __getslice__(self, i, j): return str(self)[i:j] def __iter__(self): for c in str(self): yield c def __len__(self): return len(str(self)) def flatten(self, render=None): """ return the text stored by the XML object rendered by the render function """ if render: return render(self.text, None, {}) return self.text def elements(self, *args, **kargs): """ to be considered experimental since the behavior of this method is questionable another options could be TAG(self.text).elements(*args,**kargs) """ return [] ### important to allow safe session.flash=T(....) def XML_unpickle(data): return marshal.loads(data) def XML_pickle(data): return XML_unpickle, (marshal.dumps(str(data)),) copy_reg.pickle(XML, XML_pickle, XML_unpickle) class DIV(XmlComponent): """ HTML helper, for easy generating and manipulating a DOM structure. Little or no validation is done. Behaves like a dictionary regarding updating of attributes. Behaves like a list regarding inserting/appending components. example:: >>> DIV('hello', 'world', _style='color:red;').xml() '<div style=\"color:red;\">helloworld</div>' all other HTML helpers are derived from DIV. _something=\"value\" attributes are transparently translated into something=\"value\" HTML attributes """ # name of the tag, subclasses should update this # tags ending with a '/' denote classes that cannot # contain components tag = 'div' def __init__(self, *components, **attributes): """ :param *components: any components that should be nested in this element :param **attributes: any attributes you want to give to this element :raises SyntaxError: when a stand alone tag receives components """ if self.tag[-1:] == '/' and components: raise SyntaxError('<%s> tags cannot have components' % self.tag) if len(components) == 1 and isinstance(components[0], (list, tuple)): self.components = list(components[0]) else: self.components = list(components) self.attributes = attributes self._fixup() # converts special attributes in components attributes self.parent = None for c in self.components: self._setnode(c) self._postprocessing() def update(self, **kargs): """ dictionary like updating of the tag attributes """ for (key, value) in kargs.iteritems(): self[key] = value return self def append(self, value): """ list style appending of components >>> a=DIV() >>> a.append(SPAN('x')) >>> print a <div><span>x</span></div> """ self._setnode(value) ret = self.components.append(value) self._fixup() return ret def insert(self, i, value): """ list style inserting of components >>> a=DIV() >>> a.insert(0,SPAN('x')) >>> print a <div><span>x</span></div> """ self._setnode(value) ret = self.components.insert(i, value) self._fixup() return ret def __getitem__(self, i): """ gets attribute with name 'i' or component #i. If attribute 'i' is not found returns None :param i: index if i is a string: the name of the attribute otherwise references to number of the component """ if isinstance(i, str): try: return self.attributes[i] except KeyError: return None else: return self.components[i] def __setitem__(self, i, value): """ sets attribute with name 'i' or component #i. :param i: index if i is a string: the name of the attribute otherwise references to number of the component :param value: the new value """ self._setnode(value) if isinstance(i, (str, unicode)): self.attributes[i] = value else: self.components[i] = value def __delitem__(self, i): """ deletes attribute with name 'i' or component #i. :param i: index if i is a string: the name of the attribute otherwise references to number of the component """ if isinstance(i, str): del self.attributes[i] else: del self.components[i] def __len__(self): """ returns the number of included components """ return len(self.components) def __nonzero__(self): """ always return True """ return True def _fixup(self): """ Handling of provided components. Nothing to fixup yet. May be overridden by subclasses, eg for wrapping some components in another component or blocking them. """ return def _wrap_components(self, allowed_parents, wrap_parent=None, wrap_lambda=None): """ helper for _fixup. Checks if a component is in allowed_parents, otherwise wraps it in wrap_parent :param allowed_parents: (tuple) classes that the component should be an instance of :param wrap_parent: the class to wrap the component in, if needed :param wrap_lambda: lambda to use for wrapping, if needed """ components = [] for c in self.components: if isinstance(c, allowed_parents): pass elif wrap_lambda: c = wrap_lambda(c) else: c = wrap_parent(c) if isinstance(c, DIV): c.parent = self components.append(c) self.components = components def _postprocessing(self): """ Handling of attributes (normally the ones not prefixed with '_'). Nothing to postprocess yet. May be overridden by subclasses """ return def _traverse(self, status, hideerror=False): # TODO: docstring newstatus = status for c in self.components: if hasattr(c, '_traverse') and callable(c._traverse): c.vars = self.vars c.request_vars = self.request_vars c.errors = self.errors c.latest = self.latest c.session = self.session c.formname = self.formname c['hideerror'] = hideerror or \ self.attributes.get('hideerror', False) newstatus = c._traverse(status, hideerror) and newstatus # for input, textarea, select, option # deal with 'value' and 'validation' name = self['_name'] if newstatus: newstatus = self._validate() self._postprocessing() elif 'old_value' in self.attributes: self['value'] = self['old_value'] self._postprocessing() elif name and name in self.vars: self['value'] = self.vars[name] self._postprocessing() if name: self.latest[name] = self['value'] return newstatus def _validate(self): """ nothing to validate yet. May be overridden by subclasses """ return True def _setnode(self, value): if isinstance(value, DIV): value.parent = self def _xml(self): """ helper for xml generation. Returns separately: - the component attributes - the generated xml of the inner components Component attributes start with an underscore ('_') and do not have a False or None value. The underscore is removed. A value of True is replaced with the attribute name. :returns: tuple: (attributes, components) """ # get the attributes for this component # (they start with '_', others may have special meanings) attr = [] for key, value in self.attributes.iteritems(): if key[:1] != '_': continue name = key[1:] if value is True: value = name elif value is False or value is None: continue attr.append((name, value)) data = self.attributes.get('data',{}) for key, value in data.iteritems(): name = 'data-' + key value = data[key] attr.append((name,value)) attr.sort() fa = '' for name,value in attr: fa += ' %s="%s"' % (name, xmlescape(value, True)) # get the xml for the inner components co = join([xmlescape(component) for component in self.components]) return (fa, co) def xml(self): """ generates the xml for this component. """ (fa, co) = self._xml() if not self.tag: return co if self.tag[-1:] == '/': # <tag [attributes] /> return '<%s%s />' % (self.tag[:-1], fa) # else: <tag [attributes]> inner components xml </tag> return '<%s%s>%s</%s>' % (self.tag, fa, co, self.tag) def __str__(self): """ str(COMPONENT) returns equals COMPONENT.xml() """ return self.xml() def flatten(self, render=None): """ return the text stored by the DIV object rendered by the render function the render function must take text, tagname, and attributes render=None is equivalent to render=lambda text, tag, attr: text >>> markdown = lambda text,tag=None,attributes={}: \ {None: re.sub('\s+',' ',text), \ 'h1':'#'+text+'\\n\\n', \ 'p':text+'\\n'}.get(tag,text) >>> a=TAG('<h1>Header</h1><p>this is a test</p>') >>> a.flatten(markdown) '#Header\\n\\nthis is a test\\n' """ text = '' for c in self.components: if isinstance(c, XmlComponent): s = c.flatten(render) elif render: s = render(str(c)) else: s = str(c) text += s if render: text = render(text, self.tag, self.attributes) return text regex_tag = re.compile('^[\w\-\:]+') regex_id = re.compile('#([\w\-]+)') regex_class = re.compile('\.([\w\-]+)') regex_attr = re.compile('\[([\w\-\:]+)=(.*?)\]') def elements(self, *args, **kargs): """ find all component that match the supplied attribute dictionary, or None if nothing could be found All components of the components are searched. >>> a = DIV(DIV(SPAN('x'),3,DIV(SPAN('y')))) >>> for c in a.elements('span',first_only=True): c[0]='z' >>> print a <div><div><span>z</span>3<div><span>y</span></div></div></div> >>> for c in a.elements('span'): c[0]='z' >>> print a <div><div><span>z</span>3<div><span>z</span></div></div></div> It also supports a syntax compatible with jQuery >>> a=TAG('<div><span><a id="1-1" u:v=$>hello</a></span><p class="this is a test">world</p></div>') >>> for e in a.elements('div a#1-1, p.is'): print e.flatten() hello world >>> for e in a.elements('#1-1'): print e.flatten() hello >>> a.elements('a[u:v=$]')[0].xml() '<a id="1-1" u:v="$">hello</a>' >>> a=FORM( INPUT(_type='text'), SELECT(range(1)), TEXTAREA() ) >>> for c in a.elements('input, select, textarea'): c['_disabled'] = 'disabled' >>> a.xml() '<form action="#" enctype="multipart/form-data" method="post"><input disabled="disabled" type="text" /><select disabled="disabled"><option value="0">0</option></select><textarea cols="40" disabled="disabled" rows="10"></textarea></form>' Elements that are matched can also be replaced or removed by specifying a "replace" argument (note, a list of the original matching elements is still returned as usual). >>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc')))) >>> b = a.elements('span.abc', replace=P('x', _class='xyz')) >>> print a <div><div><p class="xyz">x</p><div><p class="xyz">x</p><p class="xyz">x</p></div></div></div> "replace" can be a callable, which will be passed the original element and should return a new element to replace it. >>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc')))) >>> b = a.elements('span.abc', replace=lambda el: P(el[0], _class='xyz')) >>> print a <div><div><p class="xyz">x</p><div><p class="xyz">y</p><p class="xyz">z</p></div></div></div> If replace=None, matching elements will be removed completely. >>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc')))) >>> b = a.elements('span', find='y', replace=None) >>> print a <div><div><span class="abc">x</span><div><span class="abc">z</span></div></div></div> If a "find_text" argument is specified, elements will be searched for text components that match find_text, and any matching text components will be replaced (find_text is ignored if "replace" is not also specified). Like the "find" argument, "find_text" can be a string or a compiled regex. >>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc')))) >>> b = a.elements(find_text=re.compile('x|y|z'), replace='hello') >>> print a <div><div><span class="abc">hello</span><div><span class="abc">hello</span><span class="abc">hello</span></div></div></div> If other attributes are specified along with find_text, then only components that match the specified attributes will be searched for find_text. >>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='efg'), SPAN('z', _class='abc')))) >>> b = a.elements('span.efg', find_text=re.compile('x|y|z'), replace='hello') >>> print a <div><div><span class="abc">x</span><div><span class="efg">hello</span><span class="abc">z</span></div></div></div> """ if len(args) == 1: args = [a.strip() for a in args[0].split(',')] if len(args) > 1: subset = [self.elements(a, **kargs) for a in args] return reduce(lambda a, b: a + b, subset, []) elif len(args) == 1: items = args[0].split() if len(items) > 1: subset = [a.elements(' '.join( items[1:]), **kargs) for a in self.elements(items[0])] return reduce(lambda a, b: a + b, subset, []) else: item = items[0] if '#' in item or '.' in item or '[' in item: match_tag = self.regex_tag.search(item) match_id = self.regex_id.search(item) match_class = self.regex_class.search(item) match_attr = self.regex_attr.finditer(item) args = [] if match_tag: args = [match_tag.group()] if match_id: kargs['_id'] = match_id.group(1) if match_class: kargs['_class'] = re.compile('(?<!\w)%s(?!\w)' % match_class.group(1).replace('-', '\\-').replace(':', '\\:')) for item in match_attr: kargs['_' + item.group(1)] = item.group(2) return self.elements(*args, **kargs) # make a copy of the components matches = [] # check if the component has an attribute with the same # value as provided check = True tag = getattr(self, 'tag').replace('/', '') if args and tag not in args: check = False for (key, value) in kargs.iteritems(): if key not in ['first_only', 'replace', 'find_text']: if isinstance(value, (str, int)): if self[key] != str(value): check = False elif key in self.attributes: if not value.search(str(self[key])): check = False else: check = False if 'find' in kargs: find = kargs['find'] is_regex = not isinstance(find, (str, int)) for c in self.components: if (isinstance(c, str) and ((is_regex and find.search(c)) or (str(find) in c))): check = True # if found, return the component if check: matches.append(self) first_only = kargs.get('first_only', False) replace = kargs.get('replace', False) find_text = replace is not False and kargs.get('find_text', False) is_regex = not isinstance(find_text, (str, int, bool)) find_components = not (check and first_only) def replace_component(i): if replace is None: del self[i] elif callable(replace): self[i] = replace(self[i]) else: self[i] = replace # loop the components if find_text or find_components: for i, c in enumerate(self.components): if check and find_text and isinstance(c, str) and \ ((is_regex and find_text.search(c)) or (str(find_text) in c)): replace_component(i) if find_components and isinstance(c, XmlComponent): child_matches = c.elements(*args, **kargs) if len(child_matches): if not find_text and replace is not False and child_matches[0] is c: replace_component(i) if first_only: return child_matches matches.extend(child_matches) return matches def element(self, *args, **kargs): """ find the first component that matches the supplied attribute dictionary, or None if nothing could be found Also the components of the components are searched. """ kargs['first_only'] = True elements = self.elements(*args, **kargs) if not elements: # we found nothing return None return elements[0] def siblings(self, *args, **kargs): """ find all sibling components that match the supplied argument list and attribute dictionary, or None if nothing could be found """ sibs = [s for s in self.parent.components if not s == self] matches = [] first_only = False if 'first_only' in kargs: first_only = kargs.pop('first_only') for c in sibs: try: check = True tag = getattr(c, 'tag').replace("/", "") if args and tag not in args: check = False for (key, value) in kargs.iteritems(): if c[key] != value: check = False if check: matches.append(c) if first_only: break except: pass return matches def sibling(self, *args, **kargs): """ find the first sibling component that match the supplied argument list and attribute dictionary, or None if nothing could be found """ kargs['first_only'] = True sibs = self.siblings(*args, **kargs) if not sibs: return None return sibs[0] class CAT(DIV): tag = '' def TAG_unpickler(data): return cPickle.loads(data) def TAG_pickler(data): d = DIV() d.__dict__ = data.__dict__ marshal_dump = cPickle.dumps(d) return (TAG_unpickler, (marshal_dump,)) class __TAG__(XmlComponent): """ TAG factory example:: >>> print TAG.first(TAG.second('test'), _key = 3) <first key=\"3\"><second>test</second></first> """ def __getitem__(self, name): return self.__getattr__(name) def __getattr__(self, name): if name[-1:] == '_': name = name[:-1] + '/' if isinstance(name, unicode): name = name.encode('utf-8') class __tag__(DIV): tag = name copy_reg.pickle(__tag__, TAG_pickler, TAG_unpickler) return lambda *a, **b: __tag__(*a, **b) def __call__(self, html): return web2pyHTMLParser(decoder.decoder(html)).tree TAG = __TAG__() class HTML(DIV): """ There are four predefined document type definitions. They can be specified in the 'doctype' parameter: -'strict' enables strict doctype -'transitional' enables transitional doctype (default) -'frameset' enables frameset doctype -'html5' enables HTML 5 doctype -any other string will be treated as user's own doctype 'lang' parameter specifies the language of the document. Defaults to 'en'. See also :class:`DIV` """ tag = 'html' strict = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n' transitional = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' frameset = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">\n' html5 = '<!DOCTYPE HTML>\n' def xml(self): lang = self['lang'] if not lang: lang = 'en' self.attributes['_lang'] = lang doctype = self['doctype'] if doctype is None: doctype = self.transitional elif doctype == 'strict': doctype = self.strict elif doctype == 'transitional': doctype = self.transitional elif doctype == 'frameset': doctype = self.frameset elif doctype == 'html5': doctype = self.html5 elif doctype == '': doctype = '' else: doctype = '%s\n' % doctype (fa, co) = self._xml() return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag) class XHTML(DIV): """ This is XHTML version of the HTML helper. There are three predefined document type definitions. They can be specified in the 'doctype' parameter: -'strict' enables strict doctype -'transitional' enables transitional doctype (default) -'frameset' enables frameset doctype -any other string will be treated as user's own doctype 'lang' parameter specifies the language of the document and the xml document. Defaults to 'en'. 'xmlns' parameter specifies the xml namespace. Defaults to 'http://www.w3.org/1999/xhtml'. See also :class:`DIV` """ tag = 'html' strict = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n' transitional = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n' frameset = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">\n' xmlns = 'http://www.w3.org/1999/xhtml' def xml(self): xmlns = self['xmlns'] if xmlns: self.attributes['_xmlns'] = xmlns else: self.attributes['_xmlns'] = self.xmlns lang = self['lang'] if not lang: lang = 'en' self.attributes['_lang'] = lang self.attributes['_xml:lang'] = lang doctype = self['doctype'] if doctype: if doctype == 'strict': doctype = self.strict elif doctype == 'transitional': doctype = self.transitional elif doctype == 'frameset': doctype = self.frameset else: doctype = '%s\n' % doctype else: doctype = self.transitional (fa, co) = self._xml() return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag) class HEAD(DIV): tag = 'head' class TITLE(DIV): tag = 'title' class META(DIV): tag = 'meta/' class LINK(DIV): tag = 'link/' class SCRIPT(DIV): tag = 'script' def xml(self): (fa, co) = self._xml() # no escaping of subcomponents co = '\n'.join([str(component) for component in self.components]) if co: # <script [attributes]><!--//--><![CDATA[//><!-- # script body # //--><!]]></script> # return '<%s%s><!--//--><![CDATA[//><!--\n%s\n//--><!]]></%s>' % (self.tag, fa, co, self.tag) return '<%s%s><!--\n%s\n//--></%s>' % (self.tag, fa, co, self.tag) else: return DIV.xml(self) class STYLE(DIV): tag = 'style' def xml(self): (fa, co) = self._xml() # no escaping of subcomponents co = '\n'.join([str(component) for component in self.components]) if co: # <style [attributes]><!--/*--><![CDATA[/*><!--*/ # style body # /*]]>*/--></style> return '<%s%s><!--/*--><![CDATA[/*><!--*/\n%s\n/*]]>*/--></%s>' % (self.tag, fa, co, self.tag) else: return DIV.xml(self) class IMG(DIV): tag = 'img/' class SPAN(DIV): tag = 'span' class BODY(DIV): tag = 'body' class H1(DIV): tag = 'h1' class H2(DIV): tag = 'h2' class H3(DIV): tag = 'h3' class H4(DIV): tag = 'h4' class H5(DIV): tag = 'h5' class H6(DIV): tag = 'h6' class P(DIV): """ Will replace ``\\n`` by ``<br />`` if the `cr2br` attribute is provided. see also :class:`DIV` """ tag = 'p' def xml(self): text = DIV.xml(self) if self['cr2br']: text = text.replace('\n', '<br />') return text class STRONG(DIV): tag = 'strong' class B(DIV): tag = 'b' class BR(DIV): tag = 'br/' class HR(DIV): tag = 'hr/' class A(DIV): tag = 'a' def xml(self): if not self.components and self['_href']: self.append(self['_href']) if self['delete']: d = "jQuery(this).closest('%s').remove();" % self['delete'] else: d = '' if self['component']: self['_onclick'] = "web2py_component('%s','%s');%sreturn false;" % \ (self['component'], self['target'] or '', d) self['_href'] = self['_href'] or '#null' elif self['callback']: returnfalse = "var e = arguments[0] || window.event; e.cancelBubble=true; if (e.stopPropagation) {e.stopPropagation(); e.stopImmediatePropagation(); e.preventDefault();}" if d and not self['noconfirm']: self['_onclick'] = "if(confirm(w2p_ajax_confirm_message||'Are you sure you want to delete this object?')){ajax('%s',[],'%s');%s};%s" % \ (self['callback'], self['target'] or '', d, returnfalse) else: self['_onclick'] = "ajax('%s',[],'%s');%sreturn false" % \ (self['callback'], self['target'] or '', d) self['_href'] = self['_href'] or '#null' elif self['cid']: pre = self['pre_call'] + ';' if self['pre_call'] else '' self['_onclick'] = '%sweb2py_component("%s","%s");%sreturn false;' % \ (pre,self['_href'], self['cid'], d) return DIV.xml(self) class BUTTON(DIV): tag = 'button' class EM(DIV): tag = 'em' class EMBED(DIV): tag = 'embed/' class TT(DIV): tag = 'tt' class PRE(DIV): tag = 'pre' class CENTER(DIV): tag = 'center' class CODE(DIV): """ displays code in HTML with syntax highlighting. :param attributes: optional attributes: - language: indicates the language, otherwise PYTHON is assumed - link: can provide a link - styles: for styles Example:: {{=CODE(\"print 'hello world'\", language='python', link=None, counter=1, styles={}, highlight_line=None)}} supported languages are \"python\", \"html_plain\", \"c\", \"cpp\", \"web2py\", \"html\". The \"html\" language interprets {{ and }} tags as \"web2py\" code, \"html_plain\" doesn't. if a link='/examples/global/vars/' is provided web2py keywords are linked to the online docs. the counter is used for line numbering, counter can be None or a prompt string. """ def xml(self): language = self['language'] or 'PYTHON' link = self['link'] counter = self.attributes.get('counter', 1) highlight_line = self.attributes.get('highlight_line', None) context_lines = self.attributes.get('context_lines', None) styles = self['styles'] or {} return highlight( join(self.components), language=language, link=link, counter=counter, styles=styles, attributes=self.attributes, highlight_line=highlight_line, context_lines=context_lines, ) class LABEL(DIV): tag = 'label' class LI(DIV): tag = 'li' class UL(DIV): """ UL Component. If subcomponents are not LI-components they will be wrapped in a LI see also :class:`DIV` """ tag = 'ul' def _fixup(self): self._wrap_components(LI, LI) class OL(UL): tag = 'ol' class TD(DIV): tag = 'td' class TH(DIV): tag = 'th' class TR(DIV): """ TR Component. If subcomponents are not TD/TH-components they will be wrapped in a TD see also :class:`DIV` """ tag = 'tr' def _fixup(self): self._wrap_components((TD, TH), TD) class THEAD(DIV): tag = 'thead' def _fixup(self): self._wrap_components(TR, TR) class TBODY(DIV): tag = 'tbody' def _fixup(self): self._wrap_components(TR, TR) class TFOOT(DIV): tag = 'tfoot' def _fixup(self): self._wrap_components(TR, TR) class COL(DIV): tag = 'col' class COLGROUP(DIV): tag = 'colgroup' class TABLE(DIV): """ TABLE Component. If subcomponents are not TR/TBODY/THEAD/TFOOT-components they will be wrapped in a TR see also :class:`DIV` """ tag = 'table' def _fixup(self): self._wrap_components((TR, TBODY, THEAD, TFOOT, COL, COLGROUP), TR) class I(DIV): tag = 'i' class IFRAME(DIV): tag = 'iframe' class INPUT(DIV): """ INPUT Component examples:: >>> INPUT(_type='text', _name='name', value='Max').xml() '<input name=\"name\" type=\"text\" value=\"Max\" />' >>> INPUT(_type='checkbox', _name='checkbox', value='on').xml() '<input checked=\"checked\" name=\"checkbox\" type=\"checkbox\" value=\"on\" />' >>> INPUT(_type='radio', _name='radio', _value='yes', value='yes').xml() '<input checked=\"checked\" name=\"radio\" type=\"radio\" value=\"yes\" />' >>> INPUT(_type='radio', _name='radio', _value='no', value='yes').xml() '<input name=\"radio\" type=\"radio\" value=\"no\" />' the input helper takes two special attributes value= and requires=. :param value: used to pass the initial value for the input field. value differs from _value because it works for checkboxes, radio, textarea and select/option too. - for a checkbox value should be '' or 'on'. - for a radio or select/option value should be the _value of the checked/selected item. :param requires: should be None, or a validator or a list of validators for the value of the field. """ tag = 'input/' def _validate(self): # # this only changes value, not _value name = self['_name'] if name is None or name == '': return True name = str(name) request_vars_get = self.request_vars.get if self['_type'] != 'checkbox': self['old_value'] = self['value'] or self['_value'] or '' value = request_vars_get(name, '') self['value'] = value if not hasattr(value,'file') else None else: self['old_value'] = self['value'] or False value = request_vars_get(name) if isinstance(value, (tuple, list)): self['value'] = self['_value'] in value else: self['value'] = self['_value'] == value requires = self['requires'] if requires: if not isinstance(requires, (list, tuple)): requires = [requires] for validator in requires: (value, errors) = validator(value) if not errors is None: self.vars[name] = value self.errors[name] = errors break if not name in self.errors: self.vars[name] = value return True return False def _postprocessing(self): t = self['_type'] if not t: t = self['_type'] = 'text' t = t.lower() value = self['value'] if self['_value'] is None or isinstance(self['_value'],cgi.FieldStorage): _value = None else: _value = str(self['_value']) if '_checked' in self.attributes and not 'value' in self.attributes: pass elif t == 'checkbox': if not _value: _value = self['_value'] = 'on' if not value: value = [] elif value is True: value = [_value] elif not isinstance(value, (list, tuple)): value = str(value).split('|') self['_checked'] = _value in value and 'checked' or None elif t == 'radio': if str(value) == str(_value): self['_checked'] = 'checked' else: self['_checked'] = None elif not t == 'submit': if value is None: self['value'] = _value elif not isinstance(value, list): self['_value'] = value def xml(self): name = self.attributes.get('_name', None) if name and hasattr(self, 'errors') \ and self.errors.get(name, None) \ and self['hideerror'] != True: self['_class'] = (self['_class'] and self['_class'] + ' ' or '') + 'invalidinput' return DIV.xml(self) + DIV( DIV( self.errors[name], _class='error', errors=None, _id='%s__error' % name), _class='error_wrapper').xml() else: if self['_class'] and self['_class'].endswith('invalidinput'): self['_class'] = self['_class'][:-12] if self['_class'] == '': self['_class'] = None return DIV.xml(self) class TEXTAREA(INPUT): """ example:: TEXTAREA(_name='sometext', value='blah '*100, requires=IS_NOT_EMPTY()) 'blah blah blah ...' will be the content of the textarea field. """ tag = 'textarea' def _postprocessing(self): if not '_rows' in self.attributes: self['_rows'] = 10 if not '_cols' in self.attributes: self['_cols'] = 40 if not self['value'] is None: self.components = [self['value']] elif self.components: self['value'] = self.components[0] class OPTION(DIV): tag = 'option' def _fixup(self): if not '_value' in self.attributes: self.attributes['_value'] = str(self.components[0]) class OBJECT(DIV): tag = 'object' class OPTGROUP(DIV): tag = 'optgroup' def _fixup(self): components = [] for c in self.components: if isinstance(c, OPTION): components.append(c) else: components.append(OPTION(c, _value=str(c))) self.components = components class SELECT(INPUT): """ example:: >>> from validators import IS_IN_SET >>> SELECT('yes', 'no', _name='selector', value='yes', ... requires=IS_IN_SET(['yes', 'no'])).xml() '<select name=\"selector\"><option selected=\"selected\" value=\"yes\">yes</option><option value=\"no\">no</option></select>' """ tag = 'select' def _fixup(self): components = [] for c in self.components: if isinstance(c, (OPTION, OPTGROUP)): components.append(c) else: components.append(OPTION(c, _value=str(c))) self.components = components def _postprocessing(self): component_list = [] for c in self.components: if isinstance(c, OPTGROUP): component_list.append(c.components) else: component_list.append([c]) options = itertools.chain(*component_list) value = self['value'] if not value is None: if not self['_multiple']: for c in options: # my patch if ((value is not None) and (str(c['_value']) == str(value))): c['_selected'] = 'selected' else: c['_selected'] = None else: if isinstance(value, (list, tuple)): values = [str(item) for item in value] else: values = [str(value)] for c in options: # my patch if ((value is not None) and (str(c['_value']) in values)): c['_selected'] = 'selected' else: c['_selected'] = None class FIELDSET(DIV): tag = 'fieldset' class LEGEND(DIV): tag = 'legend' class FORM(DIV): """ example:: >>> from validators import IS_NOT_EMPTY >>> form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY())) >>> form.xml() '<form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"test\" type=\"text\" /></form>' a FORM is container for INPUT, TEXTAREA, SELECT and other helpers form has one important method:: form.accepts(request.vars, session) if form is accepted (and all validators pass) form.vars contains the accepted vars, otherwise form.errors contains the errors. in case of errors the form is modified to present the errors to the user. """ tag = 'form' def __init__(self, *components, **attributes): DIV.__init__(self, *components, **attributes) self.vars = Storage() self.errors = Storage() self.latest = Storage() self.accepted = None # none for not submitted def assert_status(self, status, request_vars): return status def accepts( self, request_vars, session=None, formname='default', keepvalues=False, onvalidation=None, hideerror=False, **kwargs ): """ kwargs is not used but allows to specify the same interface for FORM and SQLFORM """ if request_vars.__class__.__name__ == 'Request': request_vars = request_vars.post_vars self.errors.clear() self.request_vars = Storage() self.request_vars.update(request_vars) self.session = session self.formname = formname self.keepvalues = keepvalues # if this tag is a form and we are in accepting mode (status=True) # check formname and formkey status = True changed = False request_vars = self.request_vars if session is not None: formkey = session.get('_formkey[%s]' % formname, None) # check if user tampering with form and void CSRF if not formkey or formkey != request_vars._formkey: status = False if formname != request_vars._formname: status = False if status and session: # check if editing a record that has been modified by the server if hasattr(self, 'record_hash') and self.record_hash != formkey: status = False self.record_changed = changed = True status = self._traverse(status, hideerror) status = self.assert_status(status, request_vars) if onvalidation: if isinstance(onvalidation, dict): onsuccess = onvalidation.get('onsuccess', None) onfailure = onvalidation.get('onfailure', None) onchange = onvalidation.get('onchange', None) if [k for k in onvalidation if not k in ( 'onsuccess','onfailure','onchange')]: raise RuntimeError('Invalid key in onvalidate dict') if onsuccess and status: call_as_list(onsuccess,self) if onfailure and request_vars and not status: call_as_list(onfailure,self) status = len(self.errors) == 0 if changed: if onchange and self.record_changed and \ self.detect_record_change: call_as_list(onchange,self) elif status: call_as_list(onvalidation, self) if self.errors: status = False if not session is None: if hasattr(self, 'record_hash'): formkey = self.record_hash else: formkey = web2py_uuid() self.formkey = session['_formkey[%s]' % formname] = formkey if status and not keepvalues: self._traverse(False, hideerror) self.accepted = status return status def _postprocessing(self): if not '_action' in self.attributes: self['_action'] = '#' if not '_method' in self.attributes: self['_method'] = 'post' if not '_enctype' in self.attributes: self['_enctype'] = 'multipart/form-data' def hidden_fields(self): c = [] attr = self.attributes.get('hidden', {}) if 'hidden' in self.attributes: c = [INPUT(_type='hidden', _name=key, _value=value) for (key, value) in attr.iteritems()] if hasattr(self, 'formkey') and self.formkey: c.append(INPUT(_type='hidden', _name='_formkey', _value=self.formkey)) if hasattr(self, 'formname') and self.formname: c.append(INPUT(_type='hidden', _name='_formname', _value=self.formname)) return DIV(c, _style="display:none;") def xml(self): newform = FORM(*self.components, **self.attributes) hidden_fields = self.hidden_fields() if hidden_fields.components: newform.append(hidden_fields) return DIV.xml(newform) def validate(self, **kwargs): """ This function validates the form, you can use it instead of directly form.accepts. Usage: In controller def action(): form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY())) form.validate() #you can pass some args here - see below return dict(form=form) This can receive a bunch of arguments onsuccess = 'flash' - will show message_onsuccess in response.flash None - will do nothing can be a function (lambda form: pass) onfailure = 'flash' - will show message_onfailure in response.flash None - will do nothing can be a function (lambda form: pass) onchange = 'flash' - will show message_onchange in response.flash None - will do nothing can be a function (lambda form: pass) message_onsuccess message_onfailure message_onchange next = where to redirect in case of success any other kwargs will be passed for form.accepts(...) """ from gluon import current, redirect kwargs['request_vars'] = kwargs.get( 'request_vars', current.request.post_vars) kwargs['session'] = kwargs.get('session', current.session) kwargs['dbio'] = kwargs.get('dbio', False) # necessary for SQLHTML forms onsuccess = kwargs.get('onsuccess', 'flash') onfailure = kwargs.get('onfailure', 'flash') onchange = kwargs.get('onchange', 'flash') message_onsuccess = kwargs.get('message_onsuccess', current.T("Success!")) message_onfailure = kwargs.get('message_onfailure', current.T("Errors in form, please check it out.")) message_onchange = kwargs.get('message_onchange', current.T("Form consecutive submissions not allowed. " + "Try re-submitting or refreshing the form page.")) next = kwargs.get('next', None) for key in ('message_onsuccess', 'message_onfailure', 'onsuccess', 'onfailure', 'next', 'message_onchange', 'onchange'): if key in kwargs: del kwargs[key] if self.accepts(**kwargs): if onsuccess == 'flash': if next: current.session.flash = message_onsuccess else: current.response.flash = message_onsuccess elif callable(onsuccess): onsuccess(self) if next: if self.vars: for key, value in self.vars.iteritems(): next = next.replace('[%s]' % key, urllib.quote(str(value))) if not next.startswith('/'): next = URL(next) redirect(next) return True elif self.errors: if onfailure == 'flash': current.response.flash = message_onfailure elif callable(onfailure): onfailure(self) return False elif hasattr(self, "record_changed"): if self.record_changed and self.detect_record_change: if onchange == 'flash': current.response.flash = message_onchange elif callable(onchange): onchange(self) return False def process(self, **kwargs): """ Perform the .validate() method but returns the form Usage in controllers: # directly on return def action(): #some code here return dict(form=FORM(...).process(...)) You can use it with FORM, SQLFORM or FORM based plugins Examples: #response.flash messages def action(): form = SQLFORM(db.table).process(message_onsuccess='Sucess!') retutn dict(form=form) # callback function # callback receives True or False as first arg, and a list of args. def my_callback(status, msg): response.flash = "Success! "+msg if status else "Errors occured" # after argument can be 'flash' to response.flash messages # or a function name to use as callback or None to do nothing. def action(): return dict(form=SQLFORM(db.table).process(onsuccess=my_callback) """ kwargs['dbio'] = kwargs.get('dbio', True) # necessary for SQLHTML forms self.validate(**kwargs) return self REDIRECT_JS = "window.location='%s';return false" def add_button(self, value, url, _class=None): submit = self.element('input[type=submit]') submit.parent.append( INPUT(_type="button", _value=value, _class=_class, _onclick=self.REDIRECT_JS % url)) @staticmethod def confirm(text='OK', buttons=None, hidden=None): if not buttons: buttons = {} if not hidden: hidden = {} inputs = [INPUT(_type='button', _value=name, _onclick=FORM.REDIRECT_JS % link) for name, link in buttons.iteritems()] inputs += [INPUT(_type='hidden', _name=name, _value=value) for name, value in hidden.iteritems()] form = FORM(INPUT(_type='submit', _value=text), *inputs) form.process() return form def as_dict(self, flat=False, sanitize=True): """EXPERIMENTAL Sanitize is naive. It should catch any unsafe value for client retrieval. """ SERIALIZABLE = (int, float, bool, basestring, long, set, list, dict, tuple, Storage, type(None)) UNSAFE = ("PASSWORD", "CRYPT") d = self.__dict__ def sanitizer(obj): if isinstance(obj, dict): for k in obj.keys(): if any([unsafe in str(k).upper() for unsafe in UNSAFE]): # erease unsafe pair obj.pop(k) else: # not implemented pass return obj def flatten(obj): if isinstance(obj, (dict, Storage)): newobj = obj.copy() else: newobj = obj if sanitize: newobj = sanitizer(newobj) if flat: if type(obj) in SERIALIZABLE: if isinstance(newobj, (dict, Storage)): for k in newobj: newk = flatten(k) newobj[newk] = flatten(newobj[k]) if k != newk: newobj.pop(k) return newobj elif isinstance(newobj, (list, tuple, set)): return [flatten(item) for item in newobj] else: return newobj else: return str(newobj) else: return newobj return flatten(d) def as_json(self, sanitize=True): d = self.as_dict(flat=True, sanitize=sanitize) from serializers import json return json(d) def as_yaml(self, sanitize=True): d = self.as_dict(flat=True, sanitize=sanitize) from serializers import yaml return yaml(d) def as_xml(self, sanitize=True): d = self.as_dict(flat=True, sanitize=sanitize) from serializers import xml return xml(d) class BEAUTIFY(DIV): """ example:: >>> BEAUTIFY(['a', 'b', {'hello': 'world'}]).xml() '<div><table><tr><td><div>a</div></td></tr><tr><td><div>b</div></td></tr><tr><td><div><table><tr><td style="font-weight:bold;vertical-align:top">hello</td><td valign="top">:</td><td><div>world</div></td></tr></table></div></td></tr></table></div>' turns any list, dictionary, etc into decent looking html. Two special attributes are :sorted: a function that takes the dict and returned sorted keys :keyfilter: a funciton that takes a key and returns its representation or None if the key is to be skipped. By default key[:1]=='_' is skipped. """ tag = 'div' @staticmethod def no_underscore(key): if key[:1] == '_': return None return key def __init__(self, component, **attributes): self.components = [component] self.attributes = attributes sorter = attributes.get('sorted', sorted) keyfilter = attributes.get('keyfilter', BEAUTIFY.no_underscore) components = [] attributes = copy.copy(self.attributes) level = attributes['level'] = attributes.get('level', 6) - 1 if '_class' in attributes: attributes['_class'] += 'i' if level == 0: return for c in self.components: if hasattr(c, 'value') and not callable(c.value): if c.value: components.append(c.value) if hasattr(c, 'xml') and callable(c.xml): components.append(c) continue elif hasattr(c, 'keys') and callable(c.keys): rows = [] try: keys = (sorter and sorter(c)) or c for key in keys: if isinstance(key, (str, unicode)) and keyfilter: filtered_key = keyfilter(key) else: filtered_key = str(key) if filtered_key is None: continue value = c[key] if isinstance(value, types.LambdaType): continue rows.append( TR( TD(filtered_key, _style='font-weight:bold;vertical-align:top'), TD(':', _valign='top'), TD(BEAUTIFY(value, **attributes)))) components.append(TABLE(*rows, **attributes)) continue except: pass if isinstance(c, str): components.append(str(c)) elif isinstance(c, unicode): components.append(c.encode('utf8')) elif isinstance(c, (list, tuple)): items = [TR(TD(BEAUTIFY(item, **attributes))) for item in c] components.append(TABLE(*items, **attributes)) elif isinstance(c, cgi.FieldStorage): components.append('FieldStorage object') else: components.append(repr(c)) self.components = components class MENU(DIV): """ Used to build menus Optional arguments _class: defaults to 'web2py-menu web2py-menu-vertical' ul_class: defaults to 'web2py-menu-vertical' li_class: defaults to 'web2py-menu-expand' li_first: defaults to 'web2py-menu-first' li_last: defaults to 'web2py-menu-last' Example: menu = MENU([['name', False, URL(...), [submenu]], ...]) {{=menu}} """ tag = 'ul' def __init__(self, data, **args): self.data = data self.attributes = args self.components = [] if not '_class' in self.attributes: self['_class'] = 'web2py-menu web2py-menu-vertical' if not 'ul_class' in self.attributes: self['ul_class'] = 'web2py-menu-vertical' if not 'li_class' in self.attributes: self['li_class'] = 'web2py-menu-expand' if not 'li_first' in self.attributes: self['li_first'] = 'web2py-menu-first' if not 'li_last' in self.attributes: self['li_last'] = 'web2py-menu-last' if not 'li_active' in self.attributes: self['li_active'] = 'web2py-menu-active' if not 'mobile' in self.attributes: self['mobile'] = False def serialize(self, data, level=0): if level == 0: ul = UL(**self.attributes) else: ul = UL(_class=self['ul_class']) for item in data: if isinstance(item,LI): ul.append(item) else: (name, active, link) = item[:3] if isinstance(link, DIV): li = LI(link) elif 'no_link_url' in self.attributes and self['no_link_url'] == link: li = LI(DIV(name)) elif isinstance(link,dict): li = LI(A(name, **link)) elif link: li = LI(A(name, _href=link)) elif not link and isinstance(name, A): li = LI(name) else: li = LI(A(name, _href='#', _onclick='javascript:void(0);return false;')) if level == 0 and item == data[0]: li['_class'] = self['li_first'] elif level == 0 and item == data[-1]: li['_class'] = self['li_last'] if len(item) > 3 and item[3]: li['_class'] = self['li_class'] li.append(self.serialize(item[3], level + 1)) if active or ('active_url' in self.attributes and self['active_url'] == link): if li['_class']: li['_class'] = li['_class'] + ' ' + self['li_active'] else: li['_class'] = self['li_active'] if len(item) <= 4 or item[4] == True: ul.append(li) return ul def serialize_mobile(self, data, select=None, prefix=''): if not select: select = SELECT(**self.attributes) for item in data: if len(item) <= 4 or item[4] == True: select.append(OPTION(CAT(prefix, item[0]), _value=item[2], _selected=item[1])) if len(item) > 3 and len(item[3]): self.serialize_mobile( item[3], select, prefix=CAT(prefix, item[0], '/')) select['_onchange'] = 'window.location=this.value' return select def xml(self): if self['mobile']: return self.serialize_mobile(self.data, 0).xml() else: return self.serialize(self.data, 0).xml() def embed64( filename=None, file=None, data=None, extension='image/gif', ): """ helper to encode the provided (binary) data into base64. :param filename: if provided, opens and reads this file in 'rb' mode :param file: if provided, reads this file :param data: if provided, uses the provided data """ if filename and os.path.exists(file): fp = open(filename, 'rb') data = fp.read() fp.close() data = base64.b64encode(data) return 'data:%s;base64,%s' % (extension, data) def test(): """ Example: >>> from validators import * >>> print DIV(A('click me', _href=URL(a='a', c='b', f='c')), BR(), HR(), DIV(SPAN(\"World\"), _class='unknown')).xml() <div><a href=\"/a/b/c\">click me</a><br /><hr /><div class=\"unknown\"><span>World</span></div></div> >>> print DIV(UL(\"doc\",\"cat\",\"mouse\")).xml() <div><ul><li>doc</li><li>cat</li><li>mouse</li></ul></div> >>> print DIV(UL(\"doc\", LI(\"cat\", _class='feline'), 18)).xml() <div><ul><li>doc</li><li class=\"feline\">cat</li><li>18</li></ul></div> >>> print TABLE(['a', 'b', 'c'], TR('d', 'e', 'f'), TR(TD(1), TD(2), TD(3))).xml() <table><tr><td>a</td><td>b</td><td>c</td></tr><tr><td>d</td><td>e</td><td>f</td></tr><tr><td>1</td><td>2</td><td>3</td></tr></table> >>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_EXPR('int(value)<10'))) >>> print form.xml() <form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" /></form> >>> print form.accepts({'myvar':'34'}, formname=None) False >>> print form.xml() <form action="#" enctype="multipart/form-data" method="post"><input class="invalidinput" name="myvar" type="text" value="34" /><div class="error_wrapper"><div class="error" id="myvar__error">invalid expression</div></div></form> >>> print form.accepts({'myvar':'4'}, formname=None, keepvalues=True) True >>> print form.xml() <form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" value=\"4\" /></form> >>> form=FORM(SELECT('cat', 'dog', _name='myvar')) >>> print form.accepts({'myvar':'dog'}, formname=None, keepvalues=True) True >>> print form.xml() <form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><select name=\"myvar\"><option value=\"cat\">cat</option><option selected=\"selected\" value=\"dog\">dog</option></select></form> >>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_MATCH('^\w+$', 'only alphanumeric!'))) >>> print form.accepts({'myvar':'as df'}, formname=None) False >>> print form.xml() <form action="#" enctype="multipart/form-data" method="post"><input class="invalidinput" name="myvar" type="text" value="as df" /><div class="error_wrapper"><div class="error" id="myvar__error">only alphanumeric!</div></div></form> >>> session={} >>> form=FORM(INPUT(value=\"Hello World\", _name=\"var\", requires=IS_MATCH('^\w+$'))) >>> isinstance(form.as_dict(), dict) True >>> form.as_dict(flat=True).has_key("vars") True >>> isinstance(form.as_json(), basestring) and len(form.as_json(sanitize=False)) > 0 True >>> if form.accepts({}, session,formname=None): print 'passed' >>> if form.accepts({'var':'test ', '_formkey': session['_formkey[None]']}, session, formname=None): print 'passed' """ pass class web2pyHTMLParser(HTMLParser): """ obj = web2pyHTMLParser(text) parses and html/xml text into web2py helpers. obj.tree contains the root of the tree, and tree can be manipulated >>> str(web2pyHTMLParser('hello<div a="b" c=3>wor&lt;ld<span>xxx</span>y<script/>yy</div>zzz').tree) 'hello<div a="b" c="3">wor&lt;ld<span>xxx</span>y<script></script>yy</div>zzz' >>> str(web2pyHTMLParser('<div>a<span>b</div>c').tree) '<div>a<span>b</span></div>c' >>> tree = web2pyHTMLParser('hello<div a="b">world</div>').tree >>> tree.element(_a='b')['_c']=5 >>> str(tree) 'hello<div a="b" c="5">world</div>' """ def __init__(self, text, closed=('input', 'link')): HTMLParser.__init__(self) self.tree = self.parent = TAG['']() self.closed = closed self.tags = [x for x in __all__ if isinstance(eval(x), DIV)] self.last = None self.feed(text) def handle_starttag(self, tagname, attrs): if tagname.upper() in self.tags: tag = eval(tagname.upper()) else: if tagname in self.closed: tagname += '/' tag = TAG[tagname]() for key, value in attrs: tag['_' + key] = value tag.parent = self.parent self.parent.append(tag) if not tag.tag.endswith('/'): self.parent = tag else: self.last = tag.tag[:-1] def handle_data(self, data): if not isinstance(data, unicode): try: data = data.decode('utf8') except: data = data.decode('latin1') self.parent.append(data.encode('utf8', 'xmlcharref')) def handle_charref(self, name): if name.startswith('x'): self.parent.append(unichr(int(name[1:], 16)).encode('utf8')) else: self.parent.append(unichr(int(name)).encode('utf8')) def handle_entityref(self, name): self.parent.append(entitydefs[name]) def handle_endtag(self, tagname): # this deals with unbalanced tags if tagname == self.last: return while True: try: parent_tagname = self.parent.tag self.parent = self.parent.parent except: raise RuntimeError("unable to balance tag %s" % tagname) if parent_tagname[:len(tagname)] == tagname: break def markdown_serializer(text, tag=None, attr=None): attr = attr or {} if tag is None: return re.sub('\s+', ' ', text) if tag == 'br': return '\n\n' if tag == 'h1': return '#' + text + '\n\n' if tag == 'h2': return '#' * 2 + text + '\n\n' if tag == 'h3': return '#' * 3 + text + '\n\n' if tag == 'h4': return '#' * 4 + text + '\n\n' if tag == 'p': return text + '\n\n' if tag == 'b' or tag == 'strong': return '**%s**' % text if tag == 'em' or tag == 'i': return '*%s*' % text if tag == 'tt' or tag == 'code': return '`%s`' % text if tag == 'a': return '[%s](%s)' % (text, attr.get('_href', '')) if tag == 'img': return '![%s](%s)' % (attr.get('_alt', ''), attr.get('_src', '')) return text def markmin_serializer(text, tag=None, attr=None): attr = attr or {} # if tag is None: return re.sub('\s+',' ',text) if tag == 'br': return '\n\n' if tag == 'h1': return '# ' + text + '\n\n' if tag == 'h2': return '#' * 2 + ' ' + text + '\n\n' if tag == 'h3': return '#' * 3 + ' ' + text + '\n\n' if tag == 'h4': return '#' * 4 + ' ' + text + '\n\n' if tag == 'p': return text + '\n\n' if tag == 'li': return '\n- ' + text.replace('\n', ' ') if tag == 'tr': return text[3:].replace('\n', ' ') + '\n' if tag in ['table', 'blockquote']: return '\n-----\n' + text + '\n------\n' if tag in ['td', 'th']: return ' | ' + text if tag in ['b', 'strong', 'label']: return '**%s**' % text if tag in ['em', 'i']: return "''%s''" % text if tag in ['tt']: return '``%s``' % text.strip() if tag in ['code']: return '``\n%s``' % text if tag == 'a': return '[[%s %s]]' % (text, attr.get('_href', '')) if tag == 'img': return '[[%s %s left]]' % (attr.get('_alt', 'no title'), attr.get('_src', '')) return text class MARKMIN(XmlComponent): """ For documentation: http://web2py.com/examples/static/markmin.html """ def __init__(self, text, extra=None, allowed=None, sep='p', url=None, environment=None, latex='google', autolinks='default', protolinks='default', class_prefix='', id_prefix='markmin_'): self.text = text self.extra = extra or {} self.allowed = allowed or {} self.sep = sep self.url = URL if url == True else url self.environment = environment self.latex = latex self.autolinks = autolinks self.protolinks = protolinks self.class_prefix = class_prefix self.id_prefix = id_prefix def xml(self): """ calls the gluon.contrib.markmin render function to convert the wiki syntax """ from contrib.markmin.markmin2html import render return render(self.text, extra=self.extra, allowed=self.allowed, sep=self.sep, latex=self.latex, URL=self.url, environment=self.environment, autolinks=self.autolinks, protolinks=self.protolinks, class_prefix=self.class_prefix, id_prefix=self.id_prefix) def __str__(self): return self.xml() def flatten(self, render=None): """ return the text stored by the MARKMIN object rendered by the render function """ return self.text def elements(self, *args, **kargs): """ to be considered experimental since the behavior of this method is questionable another options could be TAG(self.text).elements(*args,**kargs) """ return [self.text] if __name__ == '__main__': import doctest doctest.testmod()
Python
""" Extract client information from http user agent The module does not try to detect all capabilities of browser in current form (it can easily be extended though). Aim is * fast * very easy to extend * reliable enough for practical purposes * and assist python web apps to detect clients. Taken from http://pypi.python.org/pypi/httpagentparser (MIT license) Modified my Ross Peoples for web2py to better support iPhone and iPad. Modified by Angelo Compagnucci <angelo.compagnucci@gmail.com> to better support a wide ringe of mobile devices. Now it supports: tablet device (is_tablet), BlackBerry, BlackBerry PlayBook, Android Tablets, Windows Mobile, Symbian. """ import sys class DetectorsHub(dict): _known_types = ['os', 'dist', 'flavor', 'browser'] def __init__(self, *args, **kw): dict.__init__(self, *args, **kw) for typ in self._known_types: self.setdefault(typ, []) self.registerDetectors() def register(self, detector): if detector.info_type not in self._known_types: self[detector.info_type] = [detector] self._known_types.insert(detector.order, detector.info_type) else: self[detector.info_type].append(detector) def reorderByPrefs(self, detectors, prefs): if prefs is None: return [] elif prefs == []: return detectors else: prefs.insert(0, '') def key_name(d): return d.name in prefs and prefs.index(d.name) or sys.maxint return sorted(detectors, key=key_name) def __iter__(self): return iter(self._known_types) def registerDetectors(self): detectors = [v() for v in globals().values() if DetectorBase in getattr(v, '__mro__', [])] for d in detectors: if d.can_register: self.register(d) class DetectorBase(object): name = "" # "to perform match in DetectorsHub object" info_type = '' # override me result_key = '' # override me order = 10 # 0 is highest look_for = [] # list of words to look for skip_if_found = [] # strings if present stop processin can_register = False is_mobile = False is_tablet = False prefs = dict() # dict(info_type = [name1, name2], ..) version_splitters = ["/", " "] _suggested_detectors = None def __init__(self): if not self.name: self.name = self.__class__.__name__ self.can_register = (self.__class__.__dict__.get('can_register', True)) def detect(self, agent, result): if agent and self.checkWords(agent): result[self.info_type] = dict(name=self.name) is_mobile = self.is_mobile is_tablet = self.is_tablet if is_mobile: result['is_mobile'] = is_mobile if is_tablet: result['is_tablet'] = is_tablet version = self.getVersion(agent) if version: result[self.info_type]['version'] = version return True return False def checkWords(self, agent): for w in self.skip_if_found: if w in agent: return False for w in self.look_for: if not w in agent: return False return True # This works only for the first element of look_for # If you want a different behaviour, you have to # override this method def getVersion(self, agent): # -> version string /None vs = self.version_splitters return agent.partition(self.look_for[0] + vs[0])[2].partition(vs[1])[0].strip() class OS(DetectorBase): info_type = "os" can_register = False version_splitters = [";", " "] class Dist(DetectorBase): info_type = "dist" can_register = False class Flavor(DetectorBase): info_type = "flavor" can_register = False class Browser(DetectorBase): info_type = "browser" can_register = False class Macintosh(OS): look_for = ['Macintosh'] prefs = dict(dist=None) def getVersion(self, agent): pass class Firefox(Browser): look_for = ["Firefox"] class Konqueror(Browser): look_for = ["Konqueror"] version_splitters = ["/", ";"] class Opera(Browser): look_for = ["Opera"] def getVersion(self, agent): return agent.partition(self.look_for[0])[2][1:].partition(' ')[0] class Netscape(Browser): look_for = ["Netscape"] class MSIE(Browser): look_for = ["MSIE"] skip_if_found = ["Opera"] name = "Microsoft Internet Explorer" version_splitters = [" ", ";"] class Galeon(Browser): look_for = ["Galeon"] class Safari(Browser): look_for = ["Safari"] skip_if_found = ["Chrome", "OmniWeb", "Mobile", "iPad", 'Android'] def getVersion(self, agent): if "Version/" in agent: return agent.partition('Version/')[2].partition(' ')[0].strip() class SafariTablet(Browser): name = "Safari" look_for = ['Safari', 'Android'] skip_if_found = ["Chrome", "OmniWeb", "Mobile", "iPad"] is_mobile = True is_tablet = True def getVersion(self, agent): if "Version/" in agent: return agent.partition('Version/')[2].partition(' ')[0].strip() class SafariMobile(Browser): name = "Safari" look_for = ["Safari", "Mobile"] is_mobile = True def getVersion(self, agent): if "Version/" in agent: return agent.partition('Version/')[2].partition(' ')[0].strip() class SafariNokia(Browser): name = "Safari" look_for = ["Safari", "SymbianOS"] is_mobile = True def getVersion(self, agent): pass class SafariiPad(Browser): name = "Safari" look_for = ["Safari", "iPad"] skip_if_found = ["Chrome", "OmniWeb"] is_mobile = True is_tablet = True def getVersion(self, agent): if "Version/" in agent: return agent.partition('Version/')[2].partition(' ')[0].strip() class Linux(OS): look_for = ["Linux"] prefs = dict(dist=["Ubuntu", "Android", "Debian"], flavor=None) def getVersion(self, agent): pass class BlackBerry(OS): look_for = ['BlackBerry'] prefs = dict(flavor=['PlayBook']) is_mobile = True # Manual check for tablet def checkWords(self, agent): if 'BlackBerry' in agent or 'PlayBook' in agent: return True return False def getVersion(self, agent): pass class PlayBook(Flavor): look_for = ['PlayBook'] is_mobile = True is_tablet = True def getVersion(self, agent): return agent.partition('Tablet OS')[2].partition(';')[0].strip() class Macintosh(OS): look_for = ['Macintosh'] prefs = dict(dist=None, flavor=['MacOS']) def getVersion(self, agent): pass class MacOS(Flavor): look_for = ['Mac OS'] prefs = dict(browser=['Safari', 'SafariMobile', 'SafariIpad', 'Firefox', 'Opera', "Microsoft Internet Explorer"]) def getVersion(self, agent): version_end_chars = [';', ')'] part = agent.partition('Mac OS')[2].strip() for c in version_end_chars: if c in part: version = part.partition(c)[0] break return version.replace('_', '.') class Windows(OS): look_for = ['Windows', 'NT'] prefs = dict(browser=["Microsoft Internet Explorer", 'Firefox'], dist=['WindowsMobile'], flavor=None) def getVersion(self, agent): v = agent.partition('NT') return v[1] + ' ' + v[2].replace(')', ';').partition(';')[0].strip() class WindowsMobile(Dist): name = 'Phone' look_for = ['Windows', 'Phone'] is_mobile = True def getVersion(self, agent): return agent.partition('Windows Phone')[2].replace(')', '').partition(';')[0].strip() class Ubuntu(Dist): look_for = ['Ubuntu'] version_splitters = ["/", " "] prefs = dict(browser=['Firefox']) class Debian(Dist): look_for = ['Debian'] version_splitters = ["/", " "] prefs = dict(browser=['Firefox']) class Chrome(Browser): look_for = ['Chrome'] version_splitters = ["/", " "] class ChromeOS(OS): look_for = ['CrOS'] version_splitters = [" ", ")"] prefs = dict(browser=['Chrome']) def getVersion(self, agent): vs = self.version_splitters return agent.partition(self.look_for[0] + vs[0])[2].partition(vs[1])[0].partition(" ")[2].strip() class Android(Dist): look_for = ['Android'] prefs = dict(browser=['SafariTablet', 'SafariMobile']) is_mobile = True def getVersion(self, agent): return agent.partition('Android')[2].partition(';')[0].strip() class SymbianOS(OS): look_for = ['SymbianOS'] prefs = dict(dist=['Series'], browser=['Safari', 'Opera']) is_mobile = True version_splitters = ['/', '; '] class Series(Flavor): look_for = ['SymbianOS', 'Series'] version_splitters = ['/', ';'] def getVersion(self, agent): return agent.partition('Series')[2].partition(' ')[0].replace('/', ' ') class BrowserNG(Browser): look_for = ['BrowserNG'] version_splitters = ['/', ';'] class iPhone(Dist): look_for = ['iPhone'] is_mobile = True prefs = dict(browser=['SafariMobile']) def getVersion(self, agent): version_end_chars = ['like', ';', ')'] if (not 'CPU iPhone OS' in agent) and (not 'CPU OS' in agent): return 'X' part = agent.partition('OS')[2].strip() for c in version_end_chars: if c in part: version = 'iOS ' + part.partition(c)[0].strip() break return version.replace('_', '.') class iPad(Dist): look_for = ['iPad'] is_mobile = True is_tablet = True def getVersion(self, agent): version_end_chars = ['like', ';', ')'] if not 'OS' in agent: return '' part = agent.partition('OS')[2].strip() for c in version_end_chars: if c in part: version = 'iOS ' + part.partition(c)[0].strip() break return version.replace('_', '.') detectorshub = DetectorsHub() def detect(agent): result = dict() prefs = dict() result['is_mobile'] = False result['is_tablet'] = False for info_type in detectorshub: detectors = detectorshub[info_type] _d_prefs = prefs.get(info_type, []) detectors = detectorshub.reorderByPrefs(detectors, _d_prefs) try: for detector in detectors: if detector.detect(agent, result): prefs = detector.prefs break except Exception, ex: result['exception'] = ex return result class Result(dict): def __missing__(self, k): return "" def simple_detect(agent): """ -> (os, browser, is_mobile) # tuple of strings """ result = detect(agent) os_list = [] if 'flavor' in result: os_list.append(result['flavor']['name']) if 'dist' in result: os_list.append(result['dist']['name']) if 'os' in result: os_list.append(result['os']['name']) os = os_list and " ".join(os_list) or "Unknown OS" os_version = os_list and ('flavor' in result and result['flavor'] and result['flavor'].get( 'version')) or ('dist' in result and result['dist'] and result['dist'].get('version')) \ or ('os' in result and result['os'] and result['os'].get('version')) or "" browser = 'browser' in result and result['browser'][ 'name'] or 'Unknown Browser' browser_version = 'browser' in result \ and result['browser'].get('version') or "" if browser_version: browser = " ".join((browser, browser_version)) if os_version: os = " ".join((os, os_version)) #is_mobile = ('dist' in result and result.dist.is_mobile) or ('os' in result and result.os.is_mobile) or False return os, browser, result['is_mobile'] if __name__ == '__main__': import time import unittest data = ( ( 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia6120c/3.83; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413', ('Series SymbianOS 60 3.1', 'Safari', True), {'is_mobile': True, 'is_tablet': False, 'flavor': {'name': 'Series', 'version': '60 3.1'}, 'os': {'name': 'SymbianOS', 'version': '9.2'}, 'browser': {'name': 'Safari'}},), ( 'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124', ('Series SymbianOS 60 5.0', 'BrowserNG 7.1.18124', True), {'is_mobile': True, 'is_tablet': False, 'flavor': {'name': 'Series', 'version': '60 5.0'}, 'os': {'name': 'SymbianOS', 'version': '9.4'}, 'browser': {'name': 'BrowserNG', 'version': '7.1.18124'}},), ( 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Windows Phone 6.5.3.5)', ('Phone Windows 6.5.3.5', 'Microsoft Internet Explorer 6.0', True), {'is_mobile': True, 'is_tablet': False, 'dist': {'name': 'Phone', 'version': '6.5.3.5'}, 'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '6.0'}},), ( 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/0.0.1 Safari/534.8+', ('PlayBook BlackBerry 1.0.0', 'Safari 0.0.1', True), {'is_mobile': True, 'is_tablet': True, 'flavor': {'name': 'PlayBook', 'version': '1.0.0'}, 'os': {'name': 'BlackBerry'}, 'browser': {'name': 'Safari', 'version': '0.0.1'}},), ( 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en-US) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.246 Mobile Safari/534.1+', ('BlackBerry', 'Safari 6.0.0.246', True), {'is_mobile': True, 'is_tablet': False, 'os': {'name': 'BlackBerry'}, 'browser': {'name': 'Safari', 'version': '6.0.0.246'}},), ( 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/6.0.0.600 Mobile Safari/534.8+', ('BlackBerry', 'Safari 6.0.0.600', True), {'is_mobile': True, 'is_tablet': False, 'os': {'name': 'BlackBerry'}, 'browser': {'name': 'Safari', 'version': '6.0.0.600'}},), ( 'Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5', ('MacOS iPad X', 'Safari 5.0.2', True), {'is_mobile': True, 'is_tablet': True, 'flavor': {'version': 'X', 'name': 'MacOS'}, 'dist': {'version': 'iOS 4.2.1', 'name': 'iPad'}, 'browser': {'name': 'Safari', 'version': '5.0.2'}},), ( 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20060127 Netscape/8.1', ('Windows NT 5.1', 'Netscape 8.1', False), {'is_mobile': False, 'is_tablet': False, 'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Netscape', 'version': '8.1'}},), ( 'Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13', ('Android Linux 3.0.1', 'Safari 4.0', True), {'is_mobile': True, 'is_tablet': True, 'dist': {'version': '3.0.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},), ( 'Mozilla/5.0 (Linux; U; Android 2.3.7; it-it; Dream/Sapphire Build/FRG83) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1', ('Android Linux 2.3.7', 'Safari 4.0', True), {'is_mobile': True, 'is_tablet': False, 'dist': {'version': '2.3.7', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},), ( 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-GB; rv:1.9.0.10) Gecko/2009042315 Firefox/3.0.10', ('MacOS Macintosh X 10.5', 'Firefox 3.0.10', False), {'is_mobile': False, 'is_tablet': False, 'flavor': {'version': 'X 10.5', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '3.0.10', 'name': 'Firefox'}},), ( 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.3 Safari/534.24,gzip(gfe)', ('MacOS Macintosh X 10.6.6', 'Chrome 11.0.696.3', False), {'is_mobile': False, 'is_tablet': False, 'flavor': {'version': 'X 10.6.6', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '11.0.696.3', 'name': 'Chrome'}},), ( 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2) Gecko/20100308 Ubuntu/10.04 (lucid) Firefox/3.6 GTB7.1', ('Ubuntu Linux 10.04', 'Firefox 3.6', False), {'is_mobile': False, 'is_tablet': False, 'dist': {'version': '10.04', 'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '3.6', 'name': 'Firefox'}},), ( 'Mozilla/5.0 (Linux; U; Android 2.2.1; fr-ch; A43 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1', ('Android Linux 2.2.1', 'Safari 4.0', True), {'is_mobile': True, 'is_tablet': False, 'dist': {'version': '2.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},), ( 'Mozilla/5.0 (Linux; U; Android 2.3.4; it-it; LG-P990 Build/GRJ22) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 MMS/LG-Android-MMS-V1.0/1.2', ('Android Linux 2.3.4', 'Safari 4.0', True), {'is_mobile': True, 'is_tablet': False, 'dist': {'version': '2.3.4', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},), ( 'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3', ('MacOS iPhone X', 'Safari 3.0', True), {'is_mobile': True, 'is_tablet': False, 'flavor': {'version': 'X', 'name': 'MacOS'}, 'dist': {'version': 'X', 'name': 'iPhone'}, 'browser': {'version': '3.0', 'name': 'Safari'}},), ( 'Mozilla/5.0 (X11; CrOS i686 0.0.0) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.27 Safari/534.24,gzip(gfe)', ('ChromeOS 0.0.0', 'Chrome 11.0.696.27', False), {'is_mobile': False, 'is_tablet': False, 'os': {'name': 'ChromeOS', 'version': '0.0.0'}, 'browser': {'name': 'Chrome', 'version': '11.0.696.27'}},), ( 'Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.1) Opera 7.02 [en]', ('Windows NT 5.1', 'Opera 7.02', False), {'is_mobile': False, 'is_tablet': False, 'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Opera', 'version': '7.02'}},), ('Opera/9.80 (X11; Linux i686; U; en) Presto/2.9.168 Version/11.50', ('Linux', 'Opera 9.80', False), {'is_mobile': False, 'is_tablet': False, 'os': {'name': 'Linux'}, 'browser': {'name': 'Opera', 'version': '9.80'}},), ) class TestHAP(unittest.TestCase): def setUp(self): self.harass_repeat = 100 self.data = data def test_simple_detect(self): for agent, simple_res, res in data: self.assertEqual(simple_detect(agent), simple_res) def test_detect(self): for agent, simple_res, res in data: self.assertEqual(detect(agent), res) def test_harass(self): then = time.time() for agent, simple_res, res in data * self.harass_repeat: detect(agent) time_taken = time.time() - then no_of_tests = len(self.data) * self.harass_repeat print "\nTime taken for %s detecttions: %s" \ % (no_of_tests, time_taken) print "Time taken for single detecttion: ", \ time_taken / (len(self.data) * self.harass_repeat) unittest.main() class mobilize(object): def __init__(self, func): self.func = func def __call__(self): from gluon import current user_agent = current.request.user_agent() if user_agent.is_mobile: items = current.response.view.split('.') items.insert(-1, 'mobile') current.response.view = '.'.join(items) return self.func()
Python
# -*- coding: utf-8 -*- # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. "Pythonic simple JSON RPC Client implementation" __author__ = "Mariano Reingart (reingart@gmail.com)" __copyright__ = "Copyright (C) 2011 Mariano Reingart" __license__ = "LGPL 3.0" __version__ = "0.05" import urllib from xmlrpclib import Transport, SafeTransport from cStringIO import StringIO import random import sys try: import gluon.contrib.simplejson as json # try web2py json serializer except ImportError: try: import json # try stdlib (py2.6) except: import simplejson as json # try external module class JSONRPCError(RuntimeError): "Error object for remote procedure call fail" def __init__(self, code, message, data=None): value = "%s: %s\n%s" % (code, message, '\n'.join(data)) RuntimeError.__init__(self, value) self.code = code self.message = message self.data = data class JSONDummyParser: "json wrapper for xmlrpclib parser interfase" def __init__(self): self.buf = StringIO() def feed(self, data): self.buf.write(data) def close(self): return self.buf.getvalue() class JSONTransportMixin: "json wrapper for xmlrpclib transport interfase" def send_content(self, connection, request_body): connection.putheader("Content-Type", "application/json") connection.putheader("Content-Length", str(len(request_body))) connection.endheaders() if request_body: connection.send(request_body) # todo: add gzip compression def getparser(self): # get parser and unmarshaller parser = JSONDummyParser() return parser, parser class JSONTransport(JSONTransportMixin, Transport): pass class JSONSafeTransport(JSONTransportMixin, SafeTransport): pass class ServerProxy(object): "JSON RPC Simple Client Service Proxy" def __init__(self, uri, transport=None, encoding=None, verbose=0): self.location = uri # server location (url) self.trace = verbose # show debug messages self.exceptions = True # raise errors? (JSONRPCError) self.timeout = None self.json_request = self.json_response = '' type, uri = urllib.splittype(uri) if type not in ("http", "https"): raise IOError("unsupported JSON-RPC protocol") self.__host, self.__handler = urllib.splithost(uri) if transport is None: if type == "https": transport = JSONSafeTransport() else: transport = JSONTransport() self.__transport = transport self.__encoding = encoding self.__verbose = verbose def __getattr__(self, attr): "pseudo method that can be called" return lambda *args: self.call(attr, *args) def call(self, method, *args): "JSON RPC communication (method invocation)" # build data sent to the service request_id = random.randint(0, sys.maxint) data = {'id': request_id, 'method': method, 'params': args, } request = json.dumps(data) # make HTTP request (retry if connection is lost) response = self.__transport.request( self.__host, self.__handler, request, verbose=self.__verbose ) # store plain request and response for further debugging self.json_request = request self.json_response = response # parse json data coming from service # {'version': '1.1', 'id': id, 'result': result, 'error': None} response = json.loads(response) if response['id'] != request_id: raise JSONRPCError(0, "JSON Request ID != Response ID") self.error = response.get('error', {}) if self.error and self.exceptions: raise JSONRPCError(self.error.get('code', 0), self.error.get('message', ''), self.error.get('data', None)) return response.get('result') ServiceProxy = ServerProxy if __name__ == "__main__": # basic tests: location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc" client = ServerProxy(location, verbose='--verbose' in sys.argv,) print client.add(1, 2)
Python
#!/usr/bin/env python """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Attention: Requires Chrome or Safari. For IE of Firefox you need https://github.com/gimite/web-socket-js 1) install tornado (requires Tornado 2.1) easy_install tornado 2) start this app: python gluon/contrib/websocket_messaging.py -k mykey -p 8888 3) from any web2py app you can post messages with from gluon.contrib.websocket_messaging import websocket_send websocket_send('http://127.0.0.1:8888','Hello World','mykey','mygroup') 4) from any template you can receive them with <script> $(document).ready(function(){ if(!web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup',function(e){alert(e.data)})) alert("html5 websocket not supported by your browser, try Google Chrome"); }); </script> When the server posts a message, all clients connected to the page will popup an alert message Or if you want to send json messages and store evaluated json in a var called data: <script> $(document).ready(function(){ var data; web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup',function(e){data=eval('('+e.data+')')}); }); </script> - All communications between web2py and websocket_messaging will be digitally signed with hmac. - All validation is handled on the web2py side and there is no need to modify websocket_messaging.py - Multiple web2py instances can talk with one or more websocket_messaging servers. - "ws://127.0.0.1:8888/realtime/" must be contain the IP of the websocket_messaging server. - Via group='mygroup' name you can support multiple groups of clients (think of many chat-rooms) Here is a complete sample web2py action: def index(): form=LOAD('default','ajax_form',ajax=True) script=SCRIPT(''' jQuery(document).ready(function(){ var callback=function(e){alert(e.data)}; if(!web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup',callback)) alert("html5 websocket not supported by your browser, try Google Chrome"); }); ''') return dict(form=form, script=script) def ajax_form(): form=SQLFORM.factory(Field('message')) if form.accepts(request,session): from gluon.contrib.websocket_messaging import websocket_send websocket_send( 'http://127.0.0.1:8888',form.vars.message,'mykey','mygroup') return form Acknowledgements: Tornado code inspired by http://thomas.pelletier.im/2010/08/websocket-tornado-redis/ """ import tornado.httpserver import tornado.websocket import tornado.ioloop import tornado.web import hmac import sys import optparse import urllib import time listeners = {} names = {} tokens = {} def websocket_send(url, message, hmac_key=None, group='default'): sig = hmac_key and hmac.new(hmac_key, message).hexdigest() or '' params = urllib.urlencode( {'message': message, 'signature': sig, 'group': group}) f = urllib.urlopen(url, params) data = f.read() f.close() return data class PostHandler(tornado.web.RequestHandler): """ only authorized parties can post messages """ def post(self): if hmac_key and not 'signature' in self.request.arguments: return 'false' if 'message' in self.request.arguments: message = self.request.arguments['message'][0] group = self.request.arguments.get('group', ['default'])[0] print '%s:MESSAGE to %s:%s' % (time.time(), group, message) if hmac_key: signature = self.request.arguments['signature'][0] if not hmac.new(hmac_key, message).hexdigest() == signature: return 'false' for client in listeners.get(group, []): client.write_message(message) return 'true' return 'false' class TokenHandler(tornado.web.RequestHandler): """ if running with -t post a token to allow a client to join using the token the message here is the token (any uuid) allows only authorized parties to joins, for example, a chat """ def post(self): if hmac_key and not 'message' in self.request.arguments: return 'false' if 'message' in self.request.arguments: message = self.request.arguments['message'][0] if hmac_key: signature = self.request.arguments['signature'][0] if not hmac.new(hmac_key, message).hexdigest() == signature: return 'false' tokens[message] = None return 'true' return 'false' class DistributeHandler(tornado.websocket.WebSocketHandler): def open(self, params): group, token, name = params.split('/') + [None, None] self.group = group or 'default' self.token = token or 'none' self.name = name or 'anonymous' # only authorized parties can join if DistributeHandler.tokens: if not self.token in tokens or not token[self.token] is None: self.close() else: tokens[self.token] = self if not self.group in listeners: listeners[self.group] = [] # notify clients that a member has joined the groups for client in listeners.get(self.group, []): client.write_message('+' + self.name) listeners[self.group].append(self) names[self] = self.name print '%s:CONNECT to %s' % (time.time(), self.group) def on_message(self, message): pass def on_close(self): if self.group in listeners: listeners[self.group].remove(self) del names[self] # notify clients that a member has left the groups for client in listeners.get(self.group, []): client.write_message('-' + self.name) print '%s:DISCONNECT from %s' % (time.time(), self.group) if __name__ == "__main__": usage = __doc__ version = "" parser = optparse.OptionParser(usage, None, optparse.Option, version) parser.add_option('-p', '--port', default='8888', dest='port', help='socket') parser.add_option('-l', '--listen', default='0.0.0.0', dest='address', help='listener address') parser.add_option('-k', '--hmac_key', default='', dest='hmac_key', help='hmac_key') parser.add_option('-t', '--tokens', action='store_true', default=False, dest='tokens', help='require tockens to join') (options, args) = parser.parse_args() hmac_key = options.hmac_key DistributeHandler.tokens = options.tokens urls = [ (r'/', PostHandler), (r'/token', TokenHandler), (r'/realtime/(.*)', DistributeHandler)] application = tornado.web.Application(urls, auto_reload=True) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(int(options.port), address=options.address) tornado.ioloop.IOLoop.instance().start()
Python
"""Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit https://code.google.com/p/feedparser/ for the latest version Visit http://packages.python.org/feedparser/ for the latest documentation Required: Python 2.4 or later Recommended: iconv_codec <http://cjkpython.i18n.org/> """ __version__ = "5.1.2" __license__ = """ Copyright (c) 2010-2012 Kurt McKee <contactme@kurtmckee.org> Copyright (c) 2002-2008 Mark Pilgrim All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>", "Sam Ruby <http://intertwingly.net/>", "Ade Oshineye <http://blog.oshineye.com/>", "Martin Pool <http://sourcefrog.net/>", "Kurt McKee <http://kurtmckee.org/>"] # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> # or utidylib <http://utidylib.berlios.de/>. TIDY_MARKUP = 0 # List of Python interfaces for HTML Tidy, in order of preference. Only useful # if TIDY_MARKUP = 1 PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] # If you want feedparser to automatically resolve all relative URIs, set this # to 1. RESOLVE_RELATIVE_URIS = 1 # If you want feedparser to automatically sanitize all potentially unsafe # HTML content, set this to 1. SANITIZE_HTML = 1 # If you want feedparser to automatically parse microformat content embedded # in entry contents, set this to 1 PARSE_MICROFORMATS = 1 # ---------- Python 3 modules (make it work if possible) ---------- try: import rfc822 except ImportError: from email import _parseaddr as rfc822 try: # Python 3.1 introduces bytes.maketrans and simultaneously # deprecates string.maketrans; use bytes.maketrans if possible _maketrans = bytes.maketrans except (NameError, AttributeError): import string _maketrans = string.maketrans # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii except ImportError: base64 = binascii = None else: # Python 3.1 deprecates decodestring in favor of decodebytes _base64decode = getattr(base64, 'decodebytes', base64.decodestring) # _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3 # _l2bytes: convert a list of ints to bytes if the interpreter is Python 3 try: if bytes is str: # In Python 2.5 and below, bytes doesn't exist (NameError) # In Python 2.6 and above, bytes and str are the same type raise NameError except NameError: # Python 2 def _s2bytes(s): return s def _l2bytes(l): return ''.join(map(chr, l)) else: # Python 3 def _s2bytes(s): return bytes(s, 'utf8') def _l2bytes(l): return bytes(l) # If you want feedparser to allow all URL schemes, set this to () # List culled from Python's urlparse documentation at: # http://docs.python.org/library/urlparse.html # as well as from "URI scheme" at Wikipedia: # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme # Many more will likely need to be added! ACCEPTABLE_URI_SCHEMES = ( 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet', 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais', # Additional common-but-unofficial schemes 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', ) #ACCEPTABLE_URI_SCHEMES = () # ---------- required modules (should come with any Python distribution) ---------- import cgi import codecs import copy import datetime import re import struct import time import types import urllib import urllib2 import urlparse import warnings from htmlentitydefs import name2codepoint, codepoint2name, entitydefs try: from io import BytesIO as _StringIO except ImportError: try: from cStringIO import StringIO as _StringIO except ImportError: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except ImportError: gzip = None try: import zlib except ImportError: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax from xml.sax.saxutils import escape as _xmlescape except ImportError: _XML_AVAILABLE = 0 def _xmlescape(data,entities={}): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') for char, entity in entities: data = data.replace(char, entity) return data else: try: xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers except xml.sax.SAXReaderNotAvailable: _XML_AVAILABLE = 0 else: _XML_AVAILABLE = 1 # sgmllib is not available by default in Python 3; if the end user doesn't have # it available then we'll lose illformed XML parsing, content santizing, and # microformat support (at least while feedparser depends on BeautifulSoup). try: import sgmllib except ImportError: # This is probably Python 3, which doesn't include sgmllib anymore _SGML_AVAILABLE = 0 # Mock sgmllib enough to allow subclassing later on class sgmllib(object): class SGMLParser(object): def goahead(self, i): pass def parse_starttag(self, i): pass else: _SGML_AVAILABLE = 1 # sgmllib defines a number of module-level regular expressions that are # insufficient for the XML parsing feedparser needs. Rather than modify # the variables directly in sgmllib, they're defined here using the same # names, and the compiled code objects of several sgmllib.SGMLParser # methods are copied into _BaseHTMLProcessor so that they execute in # feedparser's scope instead of sgmllib's scope. charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);') tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') attrfind = re.compile( r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*' r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?' ) # Unfortunately, these must be copied over to prevent NameError exceptions entityref = sgmllib.entityref incomplete = sgmllib.incomplete interesting = sgmllib.interesting shorttag = sgmllib.shorttag shorttagopen = sgmllib.shorttagopen starttagopen = sgmllib.starttagopen class _EndBracketRegEx: def __init__(self): # Overriding the built-in sgmllib.endbracket regex allows the # parser to find angle brackets embedded in element attributes. self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''') def search(self, target, index=0): match = self.endbracket.match(target, index) if match is not None: # Returning a new object in the calling thread's context # resolves a thread-safety. return EndBracketMatch(match) return None class EndBracketMatch: def __init__(self, match): self.match = match def start(self, n): return self.match.end(n) endbracket = _EndBracketRegEx() # iconv_codec provides support for more character encodings. # It's available from http://cjkpython.i18n.org/ try: import iconv_codec except ImportError: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet except ImportError: chardet = None # BeautifulSoup is used to extract microformat content from HTML # feedparser is tested using BeautifulSoup 3.2.0 # http://www.crummy.com/software/BeautifulSoup/ try: import BeautifulSoup except ImportError: BeautifulSoup = None PARSE_MICROFORMATS = False # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass SUPPORTED_VERSIONS = {'': u'unknown', 'rss090': u'RSS 0.90', 'rss091n': u'RSS 0.91 (Netscape)', 'rss091u': u'RSS 0.91 (Userland)', 'rss092': u'RSS 0.92', 'rss093': u'RSS 0.93', 'rss094': u'RSS 0.94', 'rss20': u'RSS 2.0', 'rss10': u'RSS 1.0', 'rss': u'RSS (unknown version)', 'atom01': u'Atom 0.1', 'atom02': u'Atom 0.2', 'atom03': u'Atom 0.3', 'atom10': u'Atom 1.0', 'atom': u'Atom (unknown version)', 'cdf': u'CDF', } class FeedParserDict(dict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['summary', 'subtitle'], 'description_detail': ['summary_detail', 'subtitle_detail'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): if key == 'category': try: return dict.__getitem__(self, 'tags')[0]['term'] except IndexError: raise KeyError, "object doesn't have key 'category'" elif key == 'enclosures': norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel']) return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure'] elif key == 'license': for link in dict.__getitem__(self, 'links'): if link['rel']==u'license' and 'href' in link: return link['href'] elif key == 'updated': # Temporarily help developers out by keeping the old # broken behavior that was reported in issue 310. # This fix was proposed in issue 328. if not dict.__contains__(self, 'updated') and \ dict.__contains__(self, 'published'): warnings.warn("To avoid breaking existing software while " "fixing issue 310, a temporary mapping has been created " "from `updated` to `published` if `updated` doesn't " "exist. This fallback will be removed in a future version " "of feedparser.", DeprecationWarning) return dict.__getitem__(self, 'published') return dict.__getitem__(self, 'updated') elif key == 'updated_parsed': if not dict.__contains__(self, 'updated_parsed') and \ dict.__contains__(self, 'published_parsed'): warnings.warn("To avoid breaking existing software while " "fixing issue 310, a temporary mapping has been created " "from `updated_parsed` to `published_parsed` if " "`updated_parsed` doesn't exist. This fallback will be " "removed in a future version of feedparser.", DeprecationWarning) return dict.__getitem__(self, 'published_parsed') return dict.__getitem__(self, 'updated_parsed') else: realkey = self.keymap.get(key, key) if isinstance(realkey, list): for k in realkey: if dict.__contains__(self, k): return dict.__getitem__(self, k) elif dict.__contains__(self, realkey): return dict.__getitem__(self, realkey) return dict.__getitem__(self, key) def __contains__(self, key): if key in ('updated', 'updated_parsed'): # Temporarily help developers out by keeping the old # broken behavior that was reported in issue 310. # This fix was proposed in issue 328. return dict.__contains__(self, key) try: self.__getitem__(key) except KeyError: return False else: return True has_key = __contains__ def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default def __setitem__(self, key, value): key = self.keymap.get(key, key) if isinstance(key, list): key = key[0] return dict.__setitem__(self, key, value) def setdefault(self, key, value): if key not in self: self[key] = value return value return self[key] def __getattr__(self, key): # __getattribute__() is called first; this will be called # only if an attribute was not already found try: return self.__getitem__(key) except KeyError: raise AttributeError, "object has no attribute '%s'" % key def __hash__(self): return id(self) _cp1252 = { 128: unichr(8364), # euro sign 130: unichr(8218), # single low-9 quotation mark 131: unichr( 402), # latin small letter f with hook 132: unichr(8222), # double low-9 quotation mark 133: unichr(8230), # horizontal ellipsis 134: unichr(8224), # dagger 135: unichr(8225), # double dagger 136: unichr( 710), # modifier letter circumflex accent 137: unichr(8240), # per mille sign 138: unichr( 352), # latin capital letter s with caron 139: unichr(8249), # single left-pointing angle quotation mark 140: unichr( 338), # latin capital ligature oe 142: unichr( 381), # latin capital letter z with caron 145: unichr(8216), # left single quotation mark 146: unichr(8217), # right single quotation mark 147: unichr(8220), # left double quotation mark 148: unichr(8221), # right double quotation mark 149: unichr(8226), # bullet 150: unichr(8211), # en dash 151: unichr(8212), # em dash 152: unichr( 732), # small tilde 153: unichr(8482), # trade mark sign 154: unichr( 353), # latin small letter s with caron 155: unichr(8250), # single right-pointing angle quotation mark 156: unichr( 339), # latin small ligature oe 158: unichr( 382), # latin small letter z with caron 159: unichr( 376), # latin capital letter y with diaeresis } _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) #try: if not isinstance(uri, unicode): uri = uri.decode('utf-8', 'ignore') uri = urlparse.urljoin(base, uri) if not isinstance(uri, unicode): return uri.decode('utf-8', 'ignore') return uri #except: # uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)]) # return urlparse.urljoin(base, uri) class _FeedParserMixin: namespaces = { '': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace 'http://search.yahoo.com/mrss/': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/1999/xlink': 'xlink', 'http://www.w3.org/XML/1998/namespace': 'xml', } _matchnamespaces = {} can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']) can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']) can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']) html_types = [u'text/html', u'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'): if not self._matchnamespaces: for k, v in self.namespaces.items(): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = u'' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or u'' self.lang = baselang or None self.svgOK = 0 self.title_depth = -1 self.depth = 0 if baselang: self.feeddata['language'] = baselang.replace('_','-') # A map of the following form: # { # object_that_value_is_set_on: { # property_name: depth_of_node_property_was_extracted_from, # other_property: depth_of_node_property_was_extracted_from, # }, # } self.property_depth_map = {} def _normalize_attributes(self, kv): k = kv[0].lower() v = k in ('rel', 'type') and kv[1].lower() or kv[1] # the sgml parser doesn't handle entities in attributes, nor # does it pass the attribute values through as unicode, while # strict xml parsers do -- account for this difference if isinstance(self, _LooseFeedParser): v = v.replace('&amp;', '&') if not isinstance(v, unicode): v = v.decode('utf-8') return (k, v) def unknown_starttag(self, tag, attrs): # increment depth counter self.depth += 1 # normalize attrs attrs = map(self._normalize_attributes, attrs) # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri if not isinstance(baseuri, unicode): baseuri = baseuri.decode(self.encoding, 'ignore') # ensure that self.baseuri is always an absolute URI that # uses a whitelisted URI scheme (e.g. not `javscript:`) if self.baseuri: self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri else: self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang.replace('_','-') self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'): if tag in ('xhtml:div', 'div'): return # typepad does this 10/2007 # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = u'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml': if tag.find(':') <> -1: prefix, tag = tag.split(':', 1) namespace = self.namespacesInUse.get(prefix, '') if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML': attrs.append(('xmlns',namespace)) if tag=='svg' and namespace=='http://www.w3.org/2000/svg': attrs.append(('xmlns',namespace)) if tag == 'svg': self.svgOK += 1 return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: # Since there's no handler or something has gone wrong we explicitly add the element and its attributes unknown_tag = prefix + suffix if len(attrsD) == 0: # No attributes so merge it into the encosing dictionary return self.push(unknown_tag, 1) else: # Has attributes so create it in its own dictionary context = self._getContext() context[unknown_tag] = attrsD def unknown_endtag(self, tag): # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' if suffix == 'svg' and self.svgOK: self.svgOK -= 1 # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: if self.svgOK: raise AttributeError() method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'): # element declared itself as escaped markup, but it isn't really if tag in ('xhtml:div', 'div'): return # typepad does this 10/2007 self.contentparams['type'] = u'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] self.depth -= 1 def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref elif ref in self.entities: text = self.entities[ref] if text.startswith('&#') and text.endswith(';'): return self.handle_entityref(text) else: try: name2codepoint[ref] except KeyError: text = '&%s;' % ref else: text = unichr(name2codepoint[ref]).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type') == u'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: # CDATA block began but didn't finish k = len(self.rawdata) return k self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) if k >= 0: return k+1 else: # We have an incomplete CDATA block. return k def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text' or contentType == 'plain': contentType = u'text/plain' elif contentType == 'html': contentType = u'text/html' elif contentType == 'xhtml': contentType = u'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if not self.version: if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'): self.version = u'rss090' elif loweruri == 'http://purl.org/rss/1.0/': self.version = u'rss10' elif loweruri == 'http://www.w3.org/2005/atom': self.version = u'atom10' if loweruri.find(u'backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = u'http://backend.userland.com/rss' loweruri = uri if loweruri in self._matchnamespaces: self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or u'', uri) def decodeEntities(self, element, data): return data def strattrs(self, attrs): return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'&quot;'})) for t in attrs]) def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml': # remove enclosing child element, but only if it is a <div> and # only if all the remaining content is nested underneath it. # This means that the divs would be retained in the following: # <div>foo</div><div>bar</div> while pieces and len(pieces)>1 and not pieces[-1].strip(): del pieces[-1] while pieces and len(pieces)>1 and not pieces[0].strip(): del pieces[0] if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>': depth = 0 for piece in pieces[:-1]: if piece.startswith('</'): depth -= 1 if depth == 0: break elif piece.startswith('<') and not piece.endswith('/>'): depth += 1 else: pieces = pieces[1:-1] # Ensure each piece is a str for Python 3 for (i, v) in enumerate(pieces): if not isinstance(v, unicode): pieces[i] = v.decode('utf-8') output = u''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = _base64decode(output) except binascii.Error: pass except binascii.Incomplete: pass except TypeError: # In Python 3, base64 takes and outputs bytes, not str # This may not be the most correct way to accomplish this output = _base64decode(output.encode('utf-8')).decode('utf-8') # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) # some feed formats require consumers to guess # whether the content is html or plain text if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain': if self.lookslikehtml(output): self.contentparams['type'] = u'text/html' # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types # resolve relative URIs within embedded markup if is_htmlish and RESOLVE_RELATIVE_URIS: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html')) # parse microformats # (must do this before sanitizing because some microformats # rely on elements that we sanitize) if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']: mfresults = _parseMicroformats(output, self.baseuri, self.encoding) if mfresults: for tag in mfresults.get('tags', []): self._addTag(tag['term'], tag['scheme'], tag['label']) for enclosure in mfresults.get('enclosures', []): self._start_enclosure(enclosure) for xfn in mfresults.get('xfn', []): self._addXFN(xfn['relationships'], xfn['href'], xfn['name']) vcard = mfresults.get('vcard') if vcard: self._getContext()['vcard'] = vcard # sanitize embedded markup if is_htmlish and SANITIZE_HTML: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html')) if self.encoding and not isinstance(output, unicode): output = output.decode(self.encoding, 'ignore') # address common error where people take data that is already # utf-8, presume that it is iso-8859-1, and re-encode it. if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode): try: output = output.encode('iso-8859-1').decode('utf-8') except (UnicodeEncodeError, UnicodeDecodeError): pass # map win-1252 extensions to the proper code points if isinstance(output, unicode): output = output.translate(_cp1252) # categories/tags/keywords/whatever are handled in _end_category if element == 'category': return output if element == 'title' and -1 < self.title_depth <= self.depth: return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': if not self.inimage: # query variables in urls in link elements are improperly # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're # unhandled character references. fix this special case. output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element) if old_value_depth is None or self.depth <= old_value_depth: self.property_depth_map[self.entries[-1]][element] = self.depth self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage): context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': # fix query variables; see above for the explanation output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) context[element] = output context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent(self, tag, attrsD, defaultContentType, expectingText): self.incontent += 1 if self.lang: self.lang=self.lang.replace('_','-') self.contentparams = FeedParserDict({ 'type': self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base': self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value # a number of elements in a number of RSS variants are nominally plain # text, but this is routinely ignored. This is an attempt to detect # the most common cases. As false positives often result in silent # data loss, this function errs on the conservative side. @staticmethod def lookslikehtml(s): # must have a close tag or an entity reference to qualify if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)): return # all tags must be in a restricted subset of valid HTML tags if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements, re.findall(r'</?(\w+)',s)): return # all entities must have been defined as valid HTML entities if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)): return return 1 def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith(u'text/'): return 0 if self.contentparams['type'].endswith(u'+xml'): return 0 if self.contentparams['type'].endswith(u'/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value, overwrite=False): context = self._getContext() if overwrite: context[key] = value else: context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': u'rss091u', '0.92': u'rss092', '0.93': u'rss093', '0.94': u'rss094'} #If we're here then this is an RSS feed. #If we don't have a version or have a version that starts with something #other than RSS then there's been a mistake. Correct it. if not self.version or not self.version.startswith(u'rss'): attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = u'rss20' else: self.version = u'rss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) def _cdf_common(self, attrsD): if 'lastmod' in attrsD: self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if 'href' in attrsD: self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': u'atom01', '0.2': u'atom02', '0.3': u'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = u'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): context = self._getContext() if not self.inentry: context.setdefault('image', FeedParserDict()) self.inimage = 1 self.title_depth = -1 self.push('image', 0) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): context = self._getContext() context.setdefault('textinput', FeedParserDict()) self.intextinput = 1 self.title_depth = -1 self.push('textinput', 0) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) # Append a new FeedParserDict when expecting an author context = self._getContext() context.setdefault('authors', []) context['authors'].append(FeedParserDict()) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except ValueError: value = 0 if self.inimage: context = self._getContext() context['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except ValueError: value = 0 if self.inimage: context = self._getContext() context['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inimage and 'image' in self.feeddata: context = self.feeddata['image'] elif self.intextinput: context = self.feeddata['textinput'] elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value, prefix='author'): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() context.setdefault('authors', [FeedParserDict()]) context['authors'][-1][key] = value def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = u'%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author, email = context.get(key), None if not author: return emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author) if emailmatch: email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, u'') author = author.replace(u'()', u'') author = author.replace(u'<>', u'') author = author.replace(u'&lt;&gt;', u'') author = author.strip() if author and (author[0] == u'('): author = author[1:] if author and (author[-1] == u')'): author = author[:-1] author = author.strip() if author or email: context.setdefault('%s_detail' % key, FeedParserDict()) if author: context['%s_detail' % key]['name'] = author if email: context['%s_detail' % key]['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, u'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, u'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 self.title_depth = -1 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published _start_pubdate = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value), overwrite=True) _end_dcterms_issued = _end_published _end_issued = _end_published _end_pubdate = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_dc_date = _start_updated _start_lastbuilddate = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value, overwrite=True) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_dc_date = _end_updated _end_lastbuilddate = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value), overwrite=True) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True) def _start_cc_license(self, attrsD): context = self._getContext() value = self._getAttribute(attrsD, 'rdf:resource') attrsD = FeedParserDict() attrsD['rel'] = u'license' if value: attrsD['href']=value context.setdefault('links', []).append(attrsD) def _start_creativecommons_license(self, attrsD): self.push('license', 1) _start_creativeCommons_license = _start_creativecommons_license def _end_creativecommons_license(self): value = self.pop('license') context = self._getContext() attrsD = FeedParserDict() attrsD['rel'] = u'license' if value: attrsD['href'] = value context.setdefault('links', []).append(attrsD) del context['license'] _end_creativeCommons_license = _end_creativecommons_license def _addXFN(self, relationships, href, name): context = self._getContext() xfn = context.setdefault('xfn', []) value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name}) if value not in xfn: xfn.append(value) def _addTag(self, term, scheme, label): context = self._getContext() tags = context.setdefault('tags', []) if (not term) and (not scheme) and (not label): return value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) if value not in tags: tags.append(value) def _start_category(self, attrsD): term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _start_media_category(self, attrsD): attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema') self._start_category(attrsD) def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(','): if term.strip(): self._addTag(term.strip(), u'http://www.itunes.com/', None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category _end_media_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', u'alternate') if attrsD['rel'] == u'self': attrsD.setdefault('type', u'application/atom+xml') else: attrsD.setdefault('type', u'text/html') context = self._getContext() attrsD = self._itsAnHrefDamnIt(attrsD) if 'href' in attrsD: attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context.setdefault('links', []) if not (self.inentry and self.inimage): context['links'].append(FeedParserDict(attrsD)) if 'href' in attrsD: expectingText = 0 if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): context['link'] = attrsD['href'] else: self.push('link', expectingText) def _end_link(self): value = self.pop('link') def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) _start_id = _start_guid def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and 'link' not in self._getContext()) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) _end_id = _end_guid def _start_title(self, attrsD): if self.svgOK: return self.unknown_starttag('title', attrsD.items()) self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): if self.svgOK: return value = self.popContent('title') if not value: return self.title_depth = self.depth _end_dc_title = _end_title def _end_media_title(self): title_depth = self.title_depth self._end_title() self.title_depth = title_depth def _start_description(self, attrsD): context = self._getContext() if 'summary' in context: self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource) _start_dc_description = _start_description def _start_abstract(self, attrsD): self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') self._summaryKey = None _end_abstract = _end_description _end_dc_description = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, u'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if 'href' in attrsD: attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if 'generator_detail' in context: context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href': value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if 'summary' in context: self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, u'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) context = self._getContext() attrsD['rel'] = u'enclosure' context.setdefault('links', []).append(FeedParserDict(attrsD)) def _start_source(self, attrsD): if 'url' in attrsD: # This means that we're processing a source element from an RSS 2.0 feed self.sourcedata['href'] = attrsD[u'url'] self.push('source', 1) self.insource = 1 self.title_depth = -1 def _end_source(self): self.insource = 0 value = self.pop('source') if value: self.sourcedata['title'] = value self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, u'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, u'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, u'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types) value = self.popContent('content') if copyToSummary: self._save('summary', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) if attrsD.get('href'): self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) elif attrsD.get('url'): self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) # Convert 'yes' -> True, 'clean' to False, and any other value to None # False and None both evaluate as False, so the difference can be ignored # by applications that only need to know if the content is explicit. self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0] def _start_media_content(self, attrsD): context = self._getContext() context.setdefault('media_content', []) context['media_content'].append(attrsD) def _start_media_thumbnail(self, attrsD): context = self._getContext() context.setdefault('media_thumbnail', []) self.push('url', 1) # new context['media_thumbnail'].append(attrsD) def _end_media_thumbnail(self): url = self.pop('url') context = self._getContext() if url != None and len(url.strip()) != 0: if 'url' not in context['media_thumbnail'][-1]: context['media_thumbnail'][-1]['url'] = url def _start_media_player(self, attrsD): self.push('media_player', 0) self._getContext()['media_player'] = FeedParserDict(attrsD) def _end_media_player(self): value = self.pop('media_player') context = self._getContext() context['media_player']['content'] = value def _start_newlocation(self, attrsD): self.push('newlocation', 1) def _end_newlocation(self): url = self.pop('newlocation') context = self._getContext() # don't set newlocation if the context isn't right if context is not self.feeddata: return context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip()) if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None self.decls = {} def startPrefixMapping(self, prefix, uri): if not uri: return # Jython uses '' instead of None; standardize on None prefix = prefix or None self.trackNamespace(prefix, uri) if prefix and uri == 'http://www.w3.org/1999/xlink': self.decls['xmlns:' + prefix] = uri def startElementNS(self, name, qname, attrs): namespace, localname = name lowernamespace = str(namespace or '').lower() if lowernamespace.find(u'backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = u'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse: raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix localname = str(localname).lower() # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD, self.decls = self.decls, {} if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML': attrsD['xmlns']=namespace if localname=='svg' and namespace=='http://www.w3.org/2000/svg': attrsD['xmlns']=namespace if prefix: localname = prefix.lower() + ':' + localname elif namespace and not qname: #Expat for name,value in self.namespacesInUse.items(): if name and value == namespace: localname = name + ':' + localname break for (namespace, attrlocalname), attrvalue in attrs.items(): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname elif namespace and not qname: #Expat for name,value in self.namespacesInUse.items(): if name and value == namespace: localname = name + ':' + localname break localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc # drv_libxml2 calls warning() in some cases warning = error def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): special = re.compile('''[<>'"]''') bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)") elements_no_end_tag = set([ 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param', 'source', 'track', 'wbr' ]) def __init__(self, encoding, _type): self.encoding = encoding self._type = _type sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' # By declaring these methods and overriding their compiled code # with the code from sgmllib, the original code will execute in # feedparser's scope instead of sgmllib's. This means that the # `tagfind` and `charref` regular expressions will be found as # they're declared above, not as they're declared in sgmllib. def goahead(self, i): pass goahead.func_code = sgmllib.SGMLParser.goahead.func_code def __parse_starttag(self, i): pass __parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code def parse_starttag(self,i): j = self.__parse_starttag(i) if self._type == 'application/xhtml+xml': if j>2 and self.rawdata[j-2:j]=='/>': self.unknown_endtag(self.lasttag) return j def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') try: bytes if bytes is str: raise NameError self.encoding = self.encoding + u'_INVALID_PYTHON_3' except NameError: if self.encoding and isinstance(data, unicode): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) sgmllib.SGMLParser.close(self) def normalize_attrs(self, attrs): if not attrs: return attrs # utility method to be called by descendants attrs = dict([(k.lower(), v) for k, v in attrs]).items() attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] attrs.sort() return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] uattrs = [] strattrs='' if attrs: for key, value in attrs: value=value.replace('>','&gt;').replace('<','&lt;').replace('"','&quot;') value = self.bare_ampersand.sub("&amp;", value) # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds if not isinstance(value, unicode): value = value.decode(self.encoding, 'ignore') try: # Currently, in Python 3 the key is already a str, and cannot be decoded again uattrs.append((unicode(key, self.encoding), value)) except TypeError: uattrs.append((key, value)) strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]) if self.encoding: try: strattrs = strattrs.encode(self.encoding) except (UnicodeEncodeError, LookupError): pass if tag in self.elements_no_end_tag: self.pieces.append('<%s%s />' % (tag, strattrs)) else: self.pieces.append('<%s%s>' % (tag, strattrs)) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%s>" % tag) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. if ref.startswith('x'): value = int(ref[1:], 16) else: value = int(ref) if value in _cp1252: self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) else: self.pieces.append('&#%s;' % ref) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. if ref in name2codepoint or ref == 'apos': self.pieces.append('&%s;' % ref) else: self.pieces.append('&amp;%s' % ref) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%s-->' % text) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%s>' % text) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%s>' % text) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def convert_charref(self, name): return '&#%s;' % name def convert_entityref(self, name): return '&%s;' % name def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces]) def parse_declaration(self, i): try: return sgmllib.SGMLParser.parse_declaration(self, i) except sgmllib.SGMLParseError: # escape the doctype declaration and continue parsing self.handle_data('&lt;') return i+1 class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding, entities): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml') self.entities=entities def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#x3C;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#x3E;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if not self.contentparams.get('type', u'xml').endswith(u'xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data def strattrs(self, attrs): return ''.join([' %s="%s"' % (n,v.replace('"','&quot;')) for n,v in attrs]) class _MicroformatsParser: STRING = 1 DATE = 2 URI = 3 NODE = 4 EMAIL = 5 known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']) known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']) def __init__(self, data, baseuri, encoding): self.document = BeautifulSoup.BeautifulSoup(data) self.baseuri = baseuri self.encoding = encoding if isinstance(data, unicode): data = data.encode(encoding) self.tags = [] self.enclosures = [] self.xfn = [] self.vcard = None def vcardEscape(self, s): if isinstance(s, basestring): s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n') return s def vcardFold(self, s): s = re.sub(';+$', '', s) sFolded = '' iMax = 75 sPrefix = '' while len(s) > iMax: sFolded += sPrefix + s[:iMax] + '\n' s = s[iMax:] sPrefix = ' ' iMax = 74 sFolded += sPrefix + s return sFolded def normalize(self, s): return re.sub(r'\s+', ' ', s).strip() def unique(self, aList): results = [] for element in aList: if element not in results: results.append(element) return results def toISO8601(self, dt): return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt) def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0): all = lambda x: 1 sProperty = sProperty.lower() bFound = 0 bNormalize = 1 propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)} if bAllowMultiple and (iPropertyType != self.NODE): snapResults = [] containers = elmRoot(['ul', 'ol'], propertyMatch) for container in containers: snapResults.extend(container('li')) bFound = (len(snapResults) != 0) if not bFound: snapResults = elmRoot(all, propertyMatch) bFound = (len(snapResults) != 0) if (not bFound) and (sProperty == 'value'): snapResults = elmRoot('pre') bFound = (len(snapResults) != 0) bNormalize = not bFound if not bFound: snapResults = [elmRoot] bFound = (len(snapResults) != 0) arFilter = [] if sProperty == 'vcard': snapFilter = elmRoot(all, propertyMatch) for node in snapFilter: if node.findParent(all, propertyMatch): arFilter.append(node) arResults = [] for node in snapResults: if node not in arFilter: arResults.append(node) bFound = (len(arResults) != 0) if not bFound: if bAllowMultiple: return [] elif iPropertyType == self.STRING: return '' elif iPropertyType == self.DATE: return None elif iPropertyType == self.URI: return '' elif iPropertyType == self.NODE: return None else: return None arValues = [] for elmResult in arResults: sValue = None if iPropertyType == self.NODE: if bAllowMultiple: arValues.append(elmResult) continue else: return elmResult sNodeName = elmResult.name.lower() if (iPropertyType == self.EMAIL) and (sNodeName == 'a'): sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0] if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if (not sValue) and (sNodeName == 'abbr'): sValue = elmResult.get('title') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if (not sValue) and (iPropertyType == self.URI): if sNodeName == 'a': sValue = elmResult.get('href') elif sNodeName == 'img': sValue = elmResult.get('src') elif sNodeName == 'object': sValue = elmResult.get('data') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if (not sValue) and (sNodeName == 'img'): sValue = elmResult.get('alt') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if not sValue: sValue = elmResult.renderContents() sValue = re.sub(r'<\S[^>]*>', '', sValue) sValue = sValue.replace('\r\n', '\n') sValue = sValue.replace('\r', '\n') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if not sValue: continue if iPropertyType == self.DATE: sValue = _parse_date_iso8601(sValue) if bAllowMultiple: arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue) else: return bAutoEscape and self.vcardEscape(sValue) or sValue return arValues def findVCards(self, elmRoot, bAgentParsing=0): sVCards = '' if not bAgentParsing: arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1) else: arCards = [elmRoot] for elmCard in arCards: arLines = [] def processSingleString(sProperty): sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding) if sValue: arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue)) return sValue or u'' def processSingleURI(sProperty): sValue = self.getPropertyValue(elmCard, sProperty, self.URI) if sValue: sContentType = '' sEncoding = '' sValueKey = '' if sValue.startswith('data:'): sEncoding = ';ENCODING=b' sContentType = sValue.split(';')[0].split('/').pop() sValue = sValue.split(',', 1).pop() else: elmValue = self.getPropertyValue(elmCard, sProperty) if elmValue: if sProperty != 'url': sValueKey = ';VALUE=uri' sContentType = elmValue.get('type', '').strip().split('/').pop().strip() sContentType = sContentType.upper() if sContentType == 'OCTET-STREAM': sContentType = '' if sContentType: sContentType = ';TYPE=' + sContentType.upper() arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue)) def processTypeValue(sProperty, arDefaultType, arForceType=None): arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1) for elmResult in arResults: arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1) if arForceType: arType = self.unique(arForceType + arType) if not arType: arType = arDefaultType sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0) if sValue: arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue)) # AGENT # must do this before all other properties because it is destructive # (removes nested class="vcard" nodes so they don't interfere with # this vcard's other properties) arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1) for elmAgent in arAgent: if re.compile(r'\bvcard\b').search(elmAgent.get('class')): sAgentValue = self.findVCards(elmAgent, 1) + '\n' sAgentValue = sAgentValue.replace('\n', '\\n') sAgentValue = sAgentValue.replace(';', '\\;') if sAgentValue: arLines.append(self.vcardFold('AGENT:' + sAgentValue)) # Completely remove the agent element from the parse tree elmAgent.extract() else: sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1); if sAgentValue: arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue)) # FN (full name) sFN = processSingleString('fn') # N (name) elmName = self.getPropertyValue(elmCard, 'n') if elmName: sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1) sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1) arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1) arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1) arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1) arLines.append(self.vcardFold('N:' + sFamilyName + ';' + sGivenName + ';' + ','.join(arAdditionalNames) + ';' + ','.join(arHonorificPrefixes) + ';' + ','.join(arHonorificSuffixes))) elif sFN: # implied "N" optimization # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization arNames = self.normalize(sFN).split() if len(arNames) == 2: bFamilyNameFirst = (arNames[0].endswith(',') or len(arNames[1]) == 1 or ((len(arNames[1]) == 2) and (arNames[1].endswith('.')))) if bFamilyNameFirst: arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1])) else: arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0])) # SORT-STRING sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1) if sSortString: arLines.append(self.vcardFold('SORT-STRING:' + sSortString)) # NICKNAME arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1) if arNickname: arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname))) # PHOTO processSingleURI('photo') # BDAY dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE) if dtBday: arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday))) # ADR (address) arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1) for elmAdr in arAdr: arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1) if not arType: arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1 sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1) sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1) sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1) sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1) sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1) sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1) sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1) arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' + sPostOfficeBox + ';' + sExtendedAddress + ';' + sStreetAddress + ';' + sLocality + ';' + sRegion + ';' + sPostalCode + ';' + sCountryName)) # LABEL processTypeValue('label', ['intl','postal','parcel','work']) # TEL (phone number) processTypeValue('tel', ['voice']) # EMAIL processTypeValue('email', ['internet'], ['internet']) # MAILER processSingleString('mailer') # TZ (timezone) processSingleString('tz') # GEO (geographical information) elmGeo = self.getPropertyValue(elmCard, 'geo') if elmGeo: sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1) sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1) arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude)) # TITLE processSingleString('title') # ROLE processSingleString('role') # LOGO processSingleURI('logo') # ORG (organization) elmOrg = self.getPropertyValue(elmCard, 'org') if elmOrg: sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1) if not sOrganizationName: # implied "organization-name" optimization # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1) if sOrganizationName: arLines.append(self.vcardFold('ORG:' + sOrganizationName)) else: arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1) arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit))) # CATEGORY arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1) if arCategory: arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory))) # NOTE processSingleString('note') # REV processSingleString('rev') # SOUND processSingleURI('sound') # UID processSingleString('uid') # URL processSingleURI('url') # CLASS processSingleString('class') # KEY processSingleURI('key') if arLines: arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard'] # XXX - this is super ugly; properly fix this with issue 148 for i, s in enumerate(arLines): if not isinstance(s, unicode): arLines[i] = s.decode('utf-8', 'ignore') sVCards += u'\n'.join(arLines) + u'\n' return sVCards.strip() def isProbablyDownloadable(self, elm): attrsD = elm.attrMap if 'href' not in attrsD: return 0 linktype = attrsD.get('type', '').strip() if linktype.startswith('audio/') or \ linktype.startswith('video/') or \ (linktype.startswith('application/') and not linktype.endswith('xml')): return 1 path = urlparse.urlparse(attrsD['href'])[2] if path.find('.') == -1: return 0 fileext = path.split('.').pop().lower() return fileext in self.known_binary_extensions def findTags(self): all = lambda x: 1 for elm in self.document(all, {'rel': re.compile(r'\btag\b')}): href = elm.get('href') if not href: continue urlscheme, domain, path, params, query, fragment = \ urlparse.urlparse(_urljoin(self.baseuri, href)) segments = path.split('/') tag = segments.pop() if not tag: if segments: tag = segments.pop() else: # there are no tags continue tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', '')) if not tagscheme.endswith('/'): tagscheme += '/' self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''})) def findEnclosures(self): all = lambda x: 1 enclosure_match = re.compile(r'\benclosure\b') for elm in self.document(all, {'href': re.compile(r'.+')}): if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm): continue if elm.attrMap not in self.enclosures: self.enclosures.append(elm.attrMap) if elm.string and not elm.get('title'): self.enclosures[-1]['title'] = elm.string def findXFN(self): all = lambda x: 1 for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}): rels = elm.get('rel', u'').split() xfn_rels = [r for r in rels if r in self.known_xfn_relationships] if xfn_rels: self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string}) def _parseMicroformats(htmlSource, baseURI, encoding): if not BeautifulSoup: return try: p = _MicroformatsParser(htmlSource, baseURI, encoding) except UnicodeEncodeError: # sgmllib throws this exception when performing lookups of tags # with non-ASCII characters in them. return p.vcard = p.findVCards(p.document) p.findTags() p.findEnclosures() p.findXFN() return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard} class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = set([('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')]) def __init__(self, baseuri, encoding, _type): _BaseHTMLProcessor.__init__(self, encoding, _type) self.baseuri = baseuri def resolveURI(self, uri): return _makeSafeAbsoluteURI(self.baseuri, uri.strip()) def unknown_starttag(self, tag, attrs): attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type): if not _SGML_AVAILABLE: return htmlSource p = _RelativeURIResolver(baseURI, encoding, _type) p.feed(htmlSource) return p.output() def _makeSafeAbsoluteURI(base, rel=None): # bail if ACCEPTABLE_URI_SCHEMES is empty if not ACCEPTABLE_URI_SCHEMES: try: return _urljoin(base, rel or u'') except ValueError: return u'' if not base: return rel or u'' if not rel: try: scheme = urlparse.urlparse(base)[0] except ValueError: return u'' if not scheme or scheme in ACCEPTABLE_URI_SCHEMES: return base return u'' try: uri = _urljoin(base, rel) except ValueError: return u'' if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: return u'' return uri class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area', 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']) acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', 'background', 'balance', 'bgcolor', 'bgproperties', 'border', 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', 'xml:lang']) unacceptable_elements_with_end_tag = set(['script', 'applet', 'style']) acceptable_css_properties = set(['azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width']) # survey of common keywords found in feeds acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow']) valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' + '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$') mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math', 'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', 'munderover', 'none', 'semantics']) mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign', 'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth', 'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows', 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', 'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']) # svgtiny - foreignObject + linearGradient + radialGradient + stop svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion', 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject', 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']) # svgtiny + class + opacity + offset + xmlns + xmlns:xlink svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic', 'arabic-form', 'ascent', 'attributeName', 'attributeType', 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid', 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max', 'min', 'name', 'offset', 'opacity', 'orient', 'origin', 'overline-position', 'overline-thickness', 'panose-1', 'path', 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity', 'strikethrough-position', 'strikethrough-thickness', 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan']) svg_attr_map = None svg_elem_map = None acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity']) def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 self.mathmlOK = 0 self.svgOK = 0 def unknown_starttag(self, tag, attrs): acceptable_attributes = self.acceptable_attributes keymap = {} if not tag in self.acceptable_elements or self.svgOK: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 # add implicit namespaces to html5 inline svg/mathml if self._type.endswith('html'): if not dict(attrs).get('xmlns'): if tag=='svg': attrs.append( ('xmlns','http://www.w3.org/2000/svg') ) if tag=='math': attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') ) # not otherwise acceptable, perhaps it is MathML or SVG? if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs: self.mathmlOK += 1 if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs: self.svgOK += 1 # chose acceptable attributes based on tag class, else bail if self.mathmlOK and tag in self.mathml_elements: acceptable_attributes = self.mathml_attributes elif self.svgOK and tag in self.svg_elements: # for most vocabularies, lowercasing is a good idea. Many # svg elements, however, are camel case if not self.svg_attr_map: lower=[attr.lower() for attr in self.svg_attributes] mix=[a for a in self.svg_attributes if a not in lower] self.svg_attributes = lower self.svg_attr_map = dict([(a.lower(),a) for a in mix]) lower=[attr.lower() for attr in self.svg_elements] mix=[a for a in self.svg_elements if a not in lower] self.svg_elements = lower self.svg_elem_map = dict([(a.lower(),a) for a in mix]) acceptable_attributes = self.svg_attributes tag = self.svg_elem_map.get(tag,tag) keymap = self.svg_attr_map elif not tag in self.acceptable_elements: return # declare xlink namespace, if needed if self.mathmlOK or self.svgOK: if filter(lambda (n,v): n.startswith('xlink:'),attrs): if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs: attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink')) clean_attrs = [] for key, value in self.normalize_attrs(attrs): if key in acceptable_attributes: key=keymap.get(key,key) # make sure the uri uses an acceptable uri scheme if key == u'href': value = _makeSafeAbsoluteURI(value) clean_attrs.append((key,value)) elif key=='style': clean_value = self.sanitize_style(value) if clean_value: clean_attrs.append((key,clean_value)) _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 if self.mathmlOK and tag in self.mathml_elements: if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1 elif self.svgOK and tag in self.svg_elements: tag = self.svg_elem_map.get(tag,tag) if tag == 'svg' and self.svgOK: self.svgOK -= 1 else: return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def sanitize_style(self, style): # disallow urls style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) # gauntlet if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' # This replaced a regexp that used re.match and was prone to pathological back-tracking. if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return '' clean = [] for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): if not value: continue if prop.lower() in self.acceptable_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background','border','margin','padding']: for keyword in value.split(): if not keyword in self.acceptable_css_keywords and \ not self.valid_css_values.match(keyword): break else: clean.append(prop + ': ' + value + ';') elif self.svgOK and prop.lower() in self.acceptable_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean) def parse_comment(self, i, report=1): ret = _BaseHTMLProcessor.parse_comment(self, i, report) if ret >= 0: return ret # if ret == -1, this may be a malicious attempt to circumvent # sanitization, or a page-destroying unclosed comment match = re.compile(r'--[^>]*>').search(self.rawdata, i+4) if match: return match.end() # unclosed comment; deliberately fail to handle_data() return len(self.rawdata) def _sanitizeHTML(htmlSource, encoding, _type): if not _SGML_AVAILABLE: return htmlSource p = _HTMLSanitizer(encoding, _type) htmlSource = htmlSource.replace('<![CDATA[', '&lt;![CDATA[') p.feed(htmlSource) data = p.output() if TIDY_MARKUP: # loop through list of preferred Tidy interfaces looking for one that's installed, # then set up a common _tidy function to wrap the interface-specific API. _tidy = None for tidy_interface in PREFERRED_TIDY_INTERFACES: try: if tidy_interface == "uTidy": from tidy import parseString as _utidy def _tidy(data, **kwargs): return str(_utidy(data, **kwargs)) break elif tidy_interface == "mxTidy": from mx.Tidy import Tidy as _mxtidy def _tidy(data, **kwargs): nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) return data break except: pass if _tidy: utf8 = isinstance(data, unicode) if utf8: data = data.encode('utf-8') data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") if utf8: data = unicode(data, 'utf-8') if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): # The default implementation just raises HTTPError. # Forget that. fp.status = code return fp def http_error_301(self, req, fp, code, msg, hdrs): result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, hdrs) result.status = code result.newurl = result.geturl() return result # The default implementations in urllib2.HTTPRedirectHandler # are identical, so hardcoding a http_error_301 call above # won't affect anything http_error_300 = http_error_301 http_error_302 = http_error_301 http_error_303 = http_error_301 http_error_307 = http_error_301 def http_error_401(self, req, fp, code, msg, headers): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urlparse.urlparse(req.get_full_url())[1] if base64 is None or 'Authorization' not in req.headers \ or 'WWW-Authenticate' not in headers: return self.http_error_default(req, fp, code, msg, headers) auth = _base64decode(req.headers['Authorization'].split(' ')[1]) user, passw = auth.split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it can be a tuple of 9 integers (as returned by gmtime() in the standard Python time module) or a date string in any format supported by feedparser. Regardless, it MUST be in GMT (Greenwich Mean Time). It will be reformatted into an RFC 1123-compliant date and used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. if request_headers is supplied it is a dictionary of HTTP request headers that will override the values generated by FeedParser. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if isinstance(url_file_stream_or_string, basestring) \ and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'): # Deal with the feed URI scheme if url_file_stream_or_string.startswith('feed:http'): url_file_stream_or_string = url_file_stream_or_string[5:] elif url_file_stream_or_string.startswith('feed:'): url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:] if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.standard_b64encode(user_passwd).strip() # iri support if isinstance(url_file_stream_or_string, unicode): url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string) # try to open with urllib2 (to use optional headers) request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers) opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()])) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string, 'rb') except (IOError, UnicodeEncodeError, TypeError): # if url_file_stream_or_string is a unicode object that # cannot be converted to the encoding returned by # sys.getfilesystemencoding(), a UnicodeEncodeError # will be thrown # If url_file_stream_or_string is a string that contains NULL # (such as an XML document encoded in UTF-32), TypeError will # be thrown. pass # treat url_file_stream_or_string as string if isinstance(url_file_stream_or_string, unicode): return _StringIO(url_file_stream_or_string.encode('utf-8')) return _StringIO(url_file_stream_or_string) def _convert_to_idn(url): """Convert a URL to IDN notation""" # this function should only be called with a unicode string # strategy: if the host cannot be encoded in ascii, then # it'll be necessary to encode it in idn form parts = list(urlparse.urlsplit(url)) try: parts[1].encode('ascii') except UnicodeEncodeError: # the url needs to be converted to idn notation host = parts[1].rsplit(':', 1) newhost = [] port = u'' if len(host) == 2: port = host.pop() for h in host[0].split('.'): newhost.append(h.encode('idna').decode('utf-8')) parts[1] = '.'.join(newhost) if port: parts[1] += ':' + port return urlparse.urlunsplit(parts) else: return url def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers): request = urllib2.Request(url) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if isinstance(modified, basestring): modified = _parse_date(modified) elif isinstance(modified, datetime.datetime): modified = modified.utctimetuple() if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) # use this for whatever -- cookies, special headers, etc # [('Cookie','Something'),('x-special-header','Another Value')] for header_name, header_value in request_headers.items(): request.add_header(header_name, header_value) request.add_header('A-IM', 'feed') # RFC 3229 support return request _date_handlers = [] def registerDateHandler(func): '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(\.(?P<fracsecond>\d+))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] try: del tmpl except NameError: pass _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] try: del regex except NameError: pass def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params: year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(float(params.get('second', 0))) # weekday is normalized by mktime(), we can ignore it weekday = 0 daylight_savings_flag = -1 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tuple(tm))) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m or m.group(2) not in _hungarian_months: return None month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later # Modified to also support MSSQL-style datetimes as defined at: # http://msdn.microsoft.com/en-us/library/ms186724.aspx # (which basically means allowing a space as a date/time/timezone separator) def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group('year')) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group('julian') if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group('month') day = 1 if month is None: month = 1 else: month = int(month) day = m.group('day') if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group('hours') if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group('minutes')) seconds = m.group('seconds') if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): '''Return the Time Zone Designator as an offset in seconds from UTC.''' if not m: return 0 tzd = m.group('tzd') if not tzd: return 0 if tzd == 'Z': return 0 hours = int(m.group('tzdhours')) minutes = m.group('tzdminutes') if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == '+': return -offset return offset __date_re = ('(?P<year>\d\d\d\d)' '(?:(?P<dsep>-|)' '(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?' '|(?P<julian>\d\d\d)))?') __tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?' __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' '(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?' + __tzd_re) __datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) # Define the strings used by the RFC822 datetime parser _rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] _rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # Only the first three letters of the month name matter _rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months)) # The year may be 2 or 4 digits; capture the century if it exists _rfc822_year = "(?P<year>(?:\d{2})?\d{2})" _rfc822_day = "(?P<day> *\d{1,2})" _rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year) _rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?" _rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})" _rfc822_tznames = { 'ut': 0, 'gmt': 0, 'z': 0, 'adt': -3, 'ast': -4, 'at': -4, 'edt': -4, 'est': -5, 'et': -5, 'cdt': -5, 'cst': -6, 'ct': -6, 'mdt': -6, 'mst': -7, 'mt': -7, 'pdt': -7, 'pst': -8, 'pt': -8, 'a': -1, 'n': 1, 'm': -12, 'y': 12, } # The timezone may be prefixed by 'Etc/' _rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz) _rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames)) _rfc822_match = re.compile( "(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time) ).match def _parse_date_rfc822(dt): """Parse RFC 822 dates and times, with one minor difference: years may be 4DIGIT or 2DIGIT. http://tools.ietf.org/html/rfc822#section-5""" try: m = _rfc822_match(dt.lower()).groupdict(0) except AttributeError: return None # Calculate a date and timestamp for k in ('year', 'day', 'hour', 'minute', 'second'): m[k] = int(m[k]) m['month'] = _rfc822_months.index(m['month']) + 1 # If the year is 2 digits, assume everything in the 90's is the 1990's if m['year'] < 100: m['year'] += (1900, 2000)[m['year'] < 90] stamp = datetime.datetime(*[m[i] for i in ('year', 'month', 'day', 'hour', 'minute', 'second')]) # Use the timezone information to calculate the difference between # the given date and timestamp and Universal Coordinated Time tzhour = 0 tzmin = 0 if m['tz'] and m['tz'].startswith('gmt'): # Handle GMT and GMT+hh:mm timezone syntax (the trailing # timezone info will be handled by the next `if` block) m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt' if not m['tz']: pass elif m['tz'].startswith('+'): tzhour = int(m['tz'][1:3]) tzmin = int(m['tz'][3:]) elif m['tz'].startswith('-'): tzhour = int(m['tz'][1:3]) * -1 tzmin = int(m['tz'][3:]) * -1 else: tzhour = _rfc822_tznames[m['tz']] delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) # Return the date and timestamp in UTC return (stamp - delta).utctimetuple() registerDateHandler(_parse_date_rfc822) def _parse_date_asctime(dt): """Parse asctime-style dates""" dayname, month, day, remainder = dt.split(None, 3) # Convert month and day into zero-padded integers month = '%02i ' % (_rfc822_months.index(month.lower()) + 1) day = '%02i ' % (int(day),) dt = month + day + remainder return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, ) registerDateHandler(_parse_date_asctime) def _parse_date_perforce(aDateString): """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" # Fri, 2006/09/15 08:19:53 EDT _my_date_pattern = re.compile( \ r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') m = _my_date_pattern.search(aDateString) if m is None: return None dow, year, month, day, hour, minute, second, tz = m.groups() months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) registerDateHandler(_parse_date_perforce) def _parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' if not dateString: return None for handler in _date_handlers: try: date9tuple = handler(dateString) except (KeyError, OverflowError, ValueError): continue if not date9tuple: continue if len(date9tuple) != 9: continue return date9tuple return None # Each marker represents some of the characters of the opening XML # processing instruction ('<?xm') in the specified encoding. EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94]) UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F]) UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00]) UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C]) UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00]) ZERO_BYTES = _l2bytes([0x00, 0x00]) # Match the opening XML declaration. # Example: <?xml version="1.0" encoding="utf-8"?> RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>') # Capture the value of the XML processing instruction's encoding attribute. # Example: <?xml version="1.0" encoding="utf-8"?> RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')) def convert_to_utf8(http_headers, data): '''Detect and convert the character encoding to UTF-8. http_headers is a dictionary data is a raw string (not Unicode)''' # This is so much trickier than it sounds, it's not even funny. # According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type # is application/xml, application/*+xml, # application/xml-external-parsed-entity, or application/xml-dtd, # the encoding given in the charset parameter of the HTTP Content-Type # takes precedence over the encoding given in the XML prefix within the # document, and defaults to 'utf-8' if neither are specified. But, if # the HTTP Content-Type is text/xml, text/*+xml, or # text/xml-external-parsed-entity, the encoding given in the XML prefix # within the document is ALWAYS IGNORED and only the encoding given in # the charset parameter of the HTTP Content-Type header should be # respected, and it defaults to 'us-ascii' if not specified. # Furthermore, discussion on the atom-syntax mailing list with the # author of RFC 3023 leads me to the conclusion that any document # served with a Content-Type of text/* and no charset parameter # must be treated as us-ascii. (We now do this.) And also that it # must always be flagged as non-well-formed. (We now do this too.) # If Content-Type is unspecified (input was local file or non-HTTP source) # or unrecognized (server just got it totally wrong), then go by the # encoding given in the XML prefix of the document and default to # 'iso-8859-1' as per the HTTP specification (RFC 2616). # Then, assuming we didn't find a character encoding in the HTTP headers # (and the HTTP Content-type allowed us to look in the body), we need # to sniff the first few bytes of the XML data and try to determine # whether the encoding is ASCII-compatible. Section F of the XML # specification shows the way here: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info # If the sniffed encoding is not ASCII-compatible, we need to make it # ASCII compatible so that we can sniff further into the XML declaration # to find the encoding attribute, which will tell us the true encoding. # Of course, none of this guarantees that we will be able to parse the # feed in the declared character encoding (assuming it was declared # correctly, which many are not). iconv_codec can help a lot; # you should definitely install it if you can. # http://cjkpython.i18n.org/ bom_encoding = u'' xml_encoding = u'' rfc3023_encoding = u'' # Look at the first few bytes of the document to guess what # its encoding may be. We only need to decode enough of the # document that we can use an ASCII-compatible regular # expression to search for an XML encoding declaration. # The heuristic follows the XML specification, section F: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info # Check for BOMs first. if data[:4] == codecs.BOM_UTF32_BE: bom_encoding = u'utf-32be' data = data[4:] elif data[:4] == codecs.BOM_UTF32_LE: bom_encoding = u'utf-32le' data = data[4:] elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES: bom_encoding = u'utf-16be' data = data[2:] elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES: bom_encoding = u'utf-16le' data = data[2:] elif data[:3] == codecs.BOM_UTF8: bom_encoding = u'utf-8' data = data[3:] # Check for the characters '<?xm' in several encodings. elif data[:4] == EBCDIC_MARKER: bom_encoding = u'cp037' elif data[:4] == UTF16BE_MARKER: bom_encoding = u'utf-16be' elif data[:4] == UTF16LE_MARKER: bom_encoding = u'utf-16le' elif data[:4] == UTF32BE_MARKER: bom_encoding = u'utf-32be' elif data[:4] == UTF32LE_MARKER: bom_encoding = u'utf-32le' tempdata = data try: if bom_encoding: tempdata = data.decode(bom_encoding).encode('utf-8') except (UnicodeDecodeError, LookupError): # feedparser recognizes UTF-32 encodings that aren't # available in Python 2.4 and 2.5, so it's possible to # encounter a LookupError during decoding. xml_encoding_match = None else: xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata) if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower() # Normalize the xml_encoding if necessary. if bom_encoding and (xml_encoding in ( u'u16', u'utf-16', u'utf16', u'utf_16', u'u32', u'utf-32', u'utf32', u'utf_32', u'iso-10646-ucs-2', u'iso-10646-ucs-4', u'csucs4', u'csunicode', u'ucs-2', u'ucs-4' )): xml_encoding = bom_encoding # Find the HTTP Content-Type and, hopefully, a character # encoding provided by the server. The Content-Type is used # to choose the "correct" encoding among the BOM encoding, # XML declaration encoding, and HTTP encoding, following the # heuristic defined in RFC 3023. http_content_type = http_headers.get('content-type') or '' http_content_type, params = cgi.parse_header(http_content_type) http_encoding = params.get('charset', '').replace("'", "") if not isinstance(http_encoding, unicode): http_encoding = http_encoding.decode('utf-8', 'ignore') acceptable_content_type = 0 application_content_types = (u'application/xml', u'application/xml-dtd', u'application/xml-external-parsed-entity') text_content_types = (u'text/xml', u'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith(u'application/') and http_content_type.endswith(u'+xml')): acceptable_content_type = 1 rfc3023_encoding = http_encoding or xml_encoding or u'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith(u'text/') and http_content_type.endswith(u'+xml')): acceptable_content_type = 1 rfc3023_encoding = http_encoding or u'us-ascii' elif http_content_type.startswith(u'text/'): rfc3023_encoding = http_encoding or u'us-ascii' elif http_headers and 'content-type' not in http_headers: rfc3023_encoding = xml_encoding or u'iso-8859-1' else: rfc3023_encoding = xml_encoding or u'utf-8' # gb18030 is a superset of gb2312, so always replace gb2312 # with gb18030 for greater compatibility. if rfc3023_encoding.lower() == u'gb2312': rfc3023_encoding = u'gb18030' if xml_encoding.lower() == u'gb2312': xml_encoding = u'gb18030' # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data # - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications error = None if http_headers and (not acceptable_content_type): if 'content-type' in http_headers: msg = '%s is not an XML media type' % http_headers['content-type'] else: msg = 'no Content-type specified' error = NonXMLContentType(msg) # determine character encoding known_encoding = 0 chardet_encoding = None tried_encodings = [] if chardet: chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore') # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding, chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = data.decode(proposed_encoding) except (UnicodeDecodeError, LookupError): pass else: known_encoding = 1 # Update the encoding in the opening XML processing instruction. new_declaration = '''<?xml version='1.0' encoding='utf-8'?>''' if RE_XML_DECLARATION.search(data): data = RE_XML_DECLARATION.sub(new_declaration, data) else: data = new_declaration + u'\n' + data data = data.encode('utf-8') break # if still no luck, give up if not known_encoding: error = CharacterEncodingUnknown( 'document encoding unknown, I tried ' + '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % (rfc3023_encoding, xml_encoding)) rfc3023_encoding = u'' elif proposed_encoding != rfc3023_encoding: error = CharacterEncodingOverride( 'document declared as %s, but parsed as %s' % (rfc3023_encoding, proposed_encoding)) rfc3023_encoding = proposed_encoding return data, rfc3023_encoding, error # Match XML entity declarations. # Example: <!ENTITY copyright "(C)"> RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE) # Match XML DOCTYPE declarations. # Example: <!DOCTYPE feed [ ]> RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE) # Match safe entity declarations. # This will allow hexadecimal character references through, # as well as text, but not arbitrary nested entities. # Example: cubed "&#179;" # Example: copyright "(C)" # Forbidden: explode1 "&explode2;&explode2;" RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')) def replace_doctype(data): '''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document with a replaced DOCTYPE ''' # Divide the document into two groups by finding the location # of the first element that doesn't begin with '<?' or '<!'. start = re.search(_s2bytes('<\w'), data) start = start and start.start() or -1 head, data = data[:start+1], data[start+1:] # Save and then remove all of the ENTITY declarations. entity_results = RE_ENTITY_PATTERN.findall(head) head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head) # Find the DOCTYPE declaration and check the feed type. doctype_results = RE_DOCTYPE_PATTERN.findall(head) doctype = doctype_results and doctype_results[0] or _s2bytes('') if _s2bytes('netscape') in doctype.lower(): version = u'rss091n' else: version = None # Re-insert the safe ENTITY declarations if a DOCTYPE was found. replacement = _s2bytes('') if len(doctype_results) == 1 and entity_results: match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e) safe_entities = filter(match_safe_entities, entity_results) if safe_entities: replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \ + _s2bytes('>\n<!ENTITY ').join(safe_entities) \ + _s2bytes('>\n]>') data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data # Precompute the safe entities for the loose parser. safe_entities = dict((k.decode('utf-8'), v.decode('utf-8')) for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement)) return version, data, safe_entities def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None): '''Parse a feed from a URL, file, stream, or string. request_headers, if given, is a dict from http header name to value to add to the request; this overrides internally generated values. ''' if handlers is None: handlers = [] if request_headers is None: request_headers = {} if response_headers is None: response_headers = {} result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] result['bozo'] = 0 if not isinstance(handlers, list): handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = None f = None if hasattr(f, 'headers'): result['headers'] = dict(f.headers) # overwrite existing headers using response_headers if 'headers' in result: result['headers'].update(response_headers) elif response_headers: result['headers'] = copy.deepcopy(response_headers) # lowercase all of the HTTP headers for comparisons per RFC 2616 if 'headers' in result: http_headers = dict((k.lower(), v) for k, v in result['headers'].items()) else: http_headers = {} # if feed is gzip-compressed, decompress it if f and data and http_headers: if gzip and 'gzip' in http_headers.get('content-encoding', ''): try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except (IOError, struct.error), e: # IOError can occur if the gzip header is bad. # struct.error can occur if the data is damaged. result['bozo'] = 1 result['bozo_exception'] = e if isinstance(e, struct.error): # A gzip header was found but the data is corrupt. # Ideally, we should re-request the feed without the # 'Accept-encoding: gzip' header, but we don't. data = None elif zlib and 'deflate' in http_headers.get('content-encoding', ''): try: data = zlib.decompress(data) except zlib.error, e: try: # The data may have no headers and no checksum. data = zlib.decompress(data, -15) except zlib.error, e: result['bozo'] = 1 result['bozo_exception'] = e # save HTTP headers if http_headers: if 'etag' in http_headers: etag = http_headers.get('etag', u'') if not isinstance(etag, unicode): etag = etag.decode('utf-8', 'ignore') if etag: result['etag'] = etag if 'last-modified' in http_headers: modified = http_headers.get('last-modified', u'') if modified: result['modified'] = modified result['modified_parsed'] = _parse_date(modified) if hasattr(f, 'url'): if not isinstance(f.url, unicode): result['href'] = f.url.decode('utf-8', 'ignore') else: result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'close'): f.close() if data is None: return result # Stop processing if the server sent HTTP 304 Not Modified. if getattr(f, 'code', 0) == 304: result['version'] = u'' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result data, result['encoding'], error = convert_to_utf8(http_headers, data) use_strict_parser = result['encoding'] and True or False if error is not None: result['bozo'] = 1 result['bozo_exception'] = error result['version'], data, entities = replace_doctype(data) # Ensure that baseuri is an absolute URI using an acceptable URI scheme. contentloc = http_headers.get('content-location', u'') href = result.get('href', u'') baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href baselang = http_headers.get('content-language', None) if not isinstance(baselang, unicode) and baselang is not None: baselang = baselang.decode('utf-8', 'ignore') if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) try: # disable downloading external doctype references, if possible saxparser.setFeature(xml.sax.handler.feature_external_ges, 0) except xml.sax.SAXNotSupportedException: pass saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) try: saxparser.parse(source) except xml.sax.SAXException, e: result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser and _SGML_AVAILABLE: feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities) feedparser.feed(data.decode('utf-8', 'replace')) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result
Python
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $Id$ """ fcgi - a FastCGI/WSGI gateway. For more information about FastCGI, see <http://www.fastcgi.com/>. For more information about the Web Server Gateway Interface, see <http://www.python.org/peps/pep-0333.html>. Example usage: #!/usr/bin/env python from myapplication import app # Assume app is your WSGI application object from fcgi import WSGIServer WSGIServer(app).run() See the documentation for WSGIServer/Server for more information. On most platforms, fcgi will fallback to regular CGI behavior if run in a non-FastCGI context. If you want to force CGI behavior, set the environment variable FCGI_FORCE_CGI to "Y" or "y". """ __author__ = 'Allan Saddi <allan@saddi.com>' __version__ = '$Revision$' import sys import os import signal import struct import cStringIO as StringIO import select import socket import errno import traceback try: import thread import threading thread_available = True except ImportError: import dummy_thread as thread import dummy_threading as threading thread_available = False # Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case. if not hasattr(socket, 'SHUT_WR'): socket.SHUT_WR = 1 __all__ = ['WSGIServer'] # Constants from the spec. FCGI_LISTENSOCK_FILENO = 0 FCGI_HEADER_LEN = 8 FCGI_VERSION_1 = 1 FCGI_BEGIN_REQUEST = 1 FCGI_ABORT_REQUEST = 2 FCGI_END_REQUEST = 3 FCGI_PARAMS = 4 FCGI_STDIN = 5 FCGI_STDOUT = 6 FCGI_STDERR = 7 FCGI_DATA = 8 FCGI_GET_VALUES = 9 FCGI_GET_VALUES_RESULT = 10 FCGI_UNKNOWN_TYPE = 11 FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE FCGI_NULL_REQUEST_ID = 0 FCGI_KEEP_CONN = 1 FCGI_RESPONDER = 1 FCGI_AUTHORIZER = 2 FCGI_FILTER = 3 FCGI_REQUEST_COMPLETE = 0 FCGI_CANT_MPX_CONN = 1 FCGI_OVERLOADED = 2 FCGI_UNKNOWN_ROLE = 3 FCGI_MAX_CONNS = 'FCGI_MAX_CONNS' FCGI_MAX_REQS = 'FCGI_MAX_REQS' FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS' FCGI_Header = '!BBHHBx' FCGI_BeginRequestBody = '!HB5x' FCGI_EndRequestBody = '!LB3x' FCGI_UnknownTypeBody = '!B7x' FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody) FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody) if __debug__: import time # Set non-zero to write debug output to a file. DEBUG = 0 DEBUGLOG = '/tmp/fcgi.log' def _debug(level, msg): if DEBUG < level: return try: f = open(DEBUGLOG, 'a') f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg)) f.close() except: pass class InputStream(object): """ File-like object representing FastCGI input streams (FCGI_STDIN and FCGI_DATA). Supports the minimum methods required by WSGI spec. """ def __init__(self, conn): self._conn = conn # See Server. self._shrinkThreshold = conn.server.inputStreamShrinkThreshold self._buf = '' self._bufList = [] self._pos = 0 # Current read position. self._avail = 0 # Number of bytes currently available. self._eof = False # True when server has sent EOF notification. def _shrinkBuffer(self): """Gets rid of already read data (since we can't rewind).""" if self._pos >= self._shrinkThreshold: self._buf = self._buf[self._pos:] self._avail -= self._pos self._pos = 0 assert self._avail >= 0 def _waitForData(self): """Waits for more data to become available.""" self._conn.process_input() def read(self, n=-1): if self._pos == self._avail and self._eof: return '' while True: if n < 0 or (self._avail - self._pos) < n: # Not enough data available. if self._eof: # And there's no more coming. newPos = self._avail break else: # Wait for more data. self._waitForData() continue else: newPos = self._pos + n break # Merge buffer list, if necessary. if self._bufList: self._buf += ''.join(self._bufList) self._bufList = [] r = self._buf[self._pos:newPos] self._pos = newPos self._shrinkBuffer() return r def readline(self, length=None): if self._pos == self._avail and self._eof: return '' while True: # Unfortunately, we need to merge the buffer list early. if self._bufList: self._buf += ''.join(self._bufList) self._bufList = [] # Find newline. i = self._buf.find('\n', self._pos) if i < 0: # Not found? if self._eof: # No more data coming. newPos = self._avail break else: # Wait for more to come. self._waitForData() continue else: newPos = i + 1 break if length is not None: if self._pos + length < newPos: newPos = self._pos + length r = self._buf[self._pos:newPos] self._pos = newPos self._shrinkBuffer() return r def readlines(self, sizehint=0): total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def __iter__(self): return self def next(self): r = self.readline() if not r: raise StopIteration return r def add_data(self, data): if not data: self._eof = True else: self._bufList.append(data) self._avail += len(data) class MultiplexedInputStream(InputStream): """ A version of InputStream meant to be used with MultiplexedConnections. Assumes the MultiplexedConnection (the producer) and the Request (the consumer) are running in different threads. """ def __init__(self, conn): super(MultiplexedInputStream, self).__init__(conn) # Arbitrates access to this InputStream (it's used simultaneously # by a Request and its owning Connection object). lock = threading.RLock() # Notifies Request thread that there is new data available. self._lock = threading.Condition(lock) def _waitForData(self): # Wait for notification from add_data(). self._lock.wait() def read(self, n=-1): self._lock.acquire() try: return super(MultiplexedInputStream, self).read(n) finally: self._lock.release() def readline(self, length=None): self._lock.acquire() try: return super(MultiplexedInputStream, self).readline(length) finally: self._lock.release() def add_data(self, data): self._lock.acquire() try: super(MultiplexedInputStream, self).add_data(data) self._lock.notify() finally: self._lock.release() class OutputStream(object): """ FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to write() or writelines() immediately result in Records being sent back to the server. Buffering should be done in a higher level! """ def __init__(self, conn, req, type, buffered=False): self._conn = conn self._req = req self._type = type self._buffered = buffered self._bufList = [] # Used if buffered is True self.dataWritten = False self.closed = False def _write(self, data): length = len(data) while length: toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN) rec = Record(self._type, self._req.requestId) rec.contentLength = toWrite rec.contentData = data[:toWrite] self._conn.writeRecord(rec) data = data[toWrite:] length -= toWrite def write(self, data): assert not self.closed if not data: return self.dataWritten = True if self._buffered: self._bufList.append(data) else: self._write(data) def writelines(self, lines): assert not self.closed for line in lines: self.write(line) def flush(self): # Only need to flush if this OutputStream is actually buffered. if self._buffered: data = ''.join(self._bufList) self._bufList = [] self._write(data) # Though available, the following should NOT be called by WSGI apps. def close(self): """Sends end-of-stream notification, if necessary.""" if not self.closed and self.dataWritten: self.flush() rec = Record(self._type, self._req.requestId) self._conn.writeRecord(rec) self.closed = True class TeeOutputStream(object): """ Simple wrapper around two or more output file-like objects that copies written data to all streams. """ def __init__(self, streamList): self._streamList = streamList def write(self, data): for f in self._streamList: f.write(data) def writelines(self, lines): for line in lines: self.write(line) def flush(self): for f in self._streamList: f.flush() class StdoutWrapper(object): """ Wrapper for sys.stdout so we know if data has actually been written. """ def __init__(self, stdout): self._file = stdout self.dataWritten = False def write(self, data): if data: self.dataWritten = True self._file.write(data) def writelines(self, lines): for line in lines: self.write(line) def __getattr__(self, name): return getattr(self._file, name) def decode_pair(s, pos=0): """ Decodes a name/value pair. The number of bytes decoded as well as the name/value pair are returned. """ nameLength = ord(s[pos]) if nameLength & 128: nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff pos += 4 else: pos += 1 valueLength = ord(s[pos]) if valueLength & 128: valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff pos += 4 else: pos += 1 name = s[pos:pos+nameLength] pos += nameLength value = s[pos:pos+valueLength] pos += valueLength return (pos, (name, value)) def encode_pair(name, value): """ Encodes a name/value pair. The encoded string is returned. """ nameLength = len(name) if nameLength < 128: s = chr(nameLength) else: s = struct.pack('!L', nameLength | 0x80000000L) valueLength = len(value) if valueLength < 128: s += chr(valueLength) else: s += struct.pack('!L', valueLength | 0x80000000L) return s + name + value class Record(object): """ A FastCGI Record. Used for encoding/decoding records. """ def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID): self.version = FCGI_VERSION_1 self.type = type self.requestId = requestId self.contentLength = 0 self.paddingLength = 0 self.contentData = '' def _recvall(sock, length): """ Attempts to receive length bytes from a socket, blocking if necessary. (Socket may be blocking or non-blocking.) """ dataList = [] recvLen = 0 while length: try: data = sock.recv(length) except socket.error, e: if e[0] == errno.EAGAIN: select.select([sock], [], []) continue else: raise if not data: # EOF break dataList.append(data) dataLen = len(data) recvLen += dataLen length -= dataLen return ''.join(dataList), recvLen _recvall = staticmethod(_recvall) def read(self, sock): """Read and decode a Record from a socket.""" try: header, length = self._recvall(sock, FCGI_HEADER_LEN) except: raise EOFError if length < FCGI_HEADER_LEN: raise EOFError self.version, self.type, self.requestId, self.contentLength, \ self.paddingLength = struct.unpack(FCGI_Header, header) if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, ' 'contentLength = %d' % (sock.fileno(), self.type, self.requestId, self.contentLength)) if self.contentLength: try: self.contentData, length = self._recvall(sock, self.contentLength) except: raise EOFError if length < self.contentLength: raise EOFError if self.paddingLength: try: self._recvall(sock, self.paddingLength) except: raise EOFError def _sendall(sock, data): """ Writes data to a socket and does not return until all the data is sent. """ length = len(data) while length: try: sent = sock.send(data) except socket.error, e: if e[0] == errno.EAGAIN: select.select([], [sock], []) continue else: raise data = data[sent:] length -= sent _sendall = staticmethod(_sendall) def write(self, sock): """Encode and write a Record to a socket.""" self.paddingLength = -self.contentLength & 7 if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, ' 'contentLength = %d' % (sock.fileno(), self.type, self.requestId, self.contentLength)) header = struct.pack(FCGI_Header, self.version, self.type, self.requestId, self.contentLength, self.paddingLength) self._sendall(sock, header) if self.contentLength: self._sendall(sock, self.contentData) if self.paddingLength: self._sendall(sock, '\x00'*self.paddingLength) class Request(object): """ Represents a single FastCGI request. These objects are passed to your handler and is the main interface between your handler and the fcgi module. The methods should not be called by your handler. However, server, params, stdin, stdout, stderr, and data are free for your handler's use. """ def __init__(self, conn, inputStreamClass): self._conn = conn self.server = conn.server self.params = {} self.stdin = inputStreamClass(conn) self.stdout = OutputStream(conn, self, FCGI_STDOUT) self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True) self.data = inputStreamClass(conn) def run(self): """Runs the handler, flushes the streams, and ends the request.""" try: protocolStatus, appStatus = self.server.handler(self) except: traceback.print_exc(file=self.stderr) self.stderr.flush() if not self.stdout.dataWritten: self.server.error(self) protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0 if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' % (protocolStatus, appStatus)) self._flush() self._end(appStatus, protocolStatus) def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE): self._conn.end_request(self, appStatus, protocolStatus) def _flush(self): self.stdout.close() self.stderr.close() class CGIRequest(Request): """A normal CGI request disguised as a FastCGI request.""" def __init__(self, server): # These are normally filled in by Connection. self.requestId = 1 self.role = FCGI_RESPONDER self.flags = 0 self.aborted = False self.server = server self.params = dict(os.environ) self.stdin = sys.stdin self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity! self.stderr = sys.stderr self.data = StringIO.StringIO() def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE): sys.exit(appStatus) def _flush(self): # Not buffered, do nothing. pass class Connection(object): """ A Connection with the web server. Each Connection is associated with a single socket (which is connected to the web server) and is responsible for handling all the FastCGI message processing for that socket. """ _multiplexed = False _inputStreamClass = InputStream def __init__(self, sock, addr, server): self._sock = sock self._addr = addr self.server = server # Active Requests for this Connection, mapped by request ID. self._requests = {} def _cleanupSocket(self): """Close the Connection's socket.""" try: self._sock.shutdown(socket.SHUT_WR) except: return try: while True: r, w, e = select.select([self._sock], [], []) if not r or not self._sock.recv(1024): break except: pass self._sock.close() def run(self): """Begin processing data from the socket.""" self._keepGoing = True while self._keepGoing: try: self.process_input() except EOFError: break except (select.error, socket.error), e: if e[0] == errno.EBADF: # Socket was closed by Request. break raise self._cleanupSocket() def process_input(self): """Attempt to read a single Record from the socket and process it.""" # Currently, any children Request threads notify this Connection # that it is no longer needed by closing the Connection's socket. # We need to put a timeout on select, otherwise we might get # stuck in it indefinitely... (I don't like this solution.) while self._keepGoing: try: r, w, e = select.select([self._sock], [], [], 1.0) except ValueError: # Sigh. ValueError gets thrown sometimes when passing select # a closed socket. raise EOFError if r: break if not self._keepGoing: return rec = Record() rec.read(self._sock) if rec.type == FCGI_GET_VALUES: self._do_get_values(rec) elif rec.type == FCGI_BEGIN_REQUEST: self._do_begin_request(rec) elif rec.type == FCGI_ABORT_REQUEST: self._do_abort_request(rec) elif rec.type == FCGI_PARAMS: self._do_params(rec) elif rec.type == FCGI_STDIN: self._do_stdin(rec) elif rec.type == FCGI_DATA: self._do_data(rec) elif rec.requestId == FCGI_NULL_REQUEST_ID: self._do_unknown_type(rec) else: # Need to complain about this. pass def writeRecord(self, rec): """ Write a Record to the socket. """ rec.write(self._sock) def end_request(self, req, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE, remove=True): """ End a Request. Called by Request objects. An FCGI_END_REQUEST Record is sent to the web server. If the web server no longer requires the connection, the socket is closed, thereby ending this Connection (run() returns). """ rec = Record(FCGI_END_REQUEST, req.requestId) rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus, protocolStatus) rec.contentLength = FCGI_EndRequestBody_LEN self.writeRecord(rec) if remove: del self._requests[req.requestId] if __debug__: _debug(2, 'end_request: flags = %d' % req.flags) if not (req.flags & FCGI_KEEP_CONN) and not self._requests: self._cleanupSocket() self._keepGoing = False def _do_get_values(self, inrec): """Handle an FCGI_GET_VALUES request from the web server.""" outrec = Record(FCGI_GET_VALUES_RESULT) pos = 0 while pos < inrec.contentLength: pos, (name, value) = decode_pair(inrec.contentData, pos) cap = self.server.capability.get(name) if cap is not None: outrec.contentData += encode_pair(name, str(cap)) outrec.contentLength = len(outrec.contentData) self.writeRecord(outrec) def _do_begin_request(self, inrec): """Handle an FCGI_BEGIN_REQUEST from the web server.""" role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData) req = self.server.request_class(self, self._inputStreamClass) req.requestId, req.role, req.flags = inrec.requestId, role, flags req.aborted = False if not self._multiplexed and self._requests: # Can't multiplex requests. self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False) else: self._requests[inrec.requestId] = req def _do_abort_request(self, inrec): """ Handle an FCGI_ABORT_REQUEST from the web server. We just mark a flag in the associated Request. """ req = self._requests.get(inrec.requestId) if req is not None: req.aborted = True def _start_request(self, req): """Run the request.""" # Not multiplexed, so run it inline. req.run() def _do_params(self, inrec): """ Handle an FCGI_PARAMS Record. If the last FCGI_PARAMS Record is received, start the request. """ req = self._requests.get(inrec.requestId) if req is not None: if inrec.contentLength: pos = 0 while pos < inrec.contentLength: pos, (name, value) = decode_pair(inrec.contentData, pos) req.params[name] = value else: self._start_request(req) def _do_stdin(self, inrec): """Handle the FCGI_STDIN stream.""" req = self._requests.get(inrec.requestId) if req is not None: req.stdin.add_data(inrec.contentData) def _do_data(self, inrec): """Handle the FCGI_DATA stream.""" req = self._requests.get(inrec.requestId) if req is not None: req.data.add_data(inrec.contentData) def _do_unknown_type(self, inrec): """Handle an unknown request type. Respond accordingly.""" outrec = Record(FCGI_UNKNOWN_TYPE) outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type) outrec.contentLength = FCGI_UnknownTypeBody_LEN self.writeRecord(outrec) class MultiplexedConnection(Connection): """ A version of Connection capable of handling multiple requests simultaneously. """ _multiplexed = True _inputStreamClass = MultiplexedInputStream def __init__(self, sock, addr, server): super(MultiplexedConnection, self).__init__(sock, addr, server) # Used to arbitrate access to self._requests. lock = threading.RLock() # Notification is posted everytime a request completes, allowing us # to quit cleanly. self._lock = threading.Condition(lock) def _cleanupSocket(self): # Wait for any outstanding requests before closing the socket. self._lock.acquire() while self._requests: self._lock.wait() self._lock.release() super(MultiplexedConnection, self)._cleanupSocket() def writeRecord(self, rec): # Must use locking to prevent intermingling of Records from different # threads. self._lock.acquire() try: # Probably faster than calling super. ;) rec.write(self._sock) finally: self._lock.release() def end_request(self, req, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE, remove=True): self._lock.acquire() try: super(MultiplexedConnection, self).end_request(req, appStatus, protocolStatus, remove) self._lock.notify() finally: self._lock.release() def _do_begin_request(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_begin_request(inrec) finally: self._lock.release() def _do_abort_request(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_abort_request(inrec) finally: self._lock.release() def _start_request(self, req): thread.start_new_thread(req.run, ()) def _do_params(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_params(inrec) finally: self._lock.release() def _do_stdin(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_stdin(inrec) finally: self._lock.release() def _do_data(self, inrec): self._lock.acquire() try: super(MultiplexedConnection, self)._do_data(inrec) finally: self._lock.release() class Server(object): """ The FastCGI server. Waits for connections from the web server, processing each request. If run in a normal CGI context, it will instead instantiate a CGIRequest and run the handler through there. """ request_class = Request cgirequest_class = CGIRequest # Limits the size of the InputStream's string buffer to this size + the # server's maximum Record size. Since the InputStream is not seekable, # we throw away already-read data once this certain amount has been read. inputStreamShrinkThreshold = 102400 - 8192 def __init__(self, handler=None, maxwrite=8192, bindAddress=None, umask=None, multiplexed=False): """ handler, if present, must reference a function or method that takes one argument: a Request object. If handler is not specified at creation time, Server *must* be subclassed. (The handler method below is abstract.) maxwrite is the maximum number of bytes (per Record) to write to the server. I've noticed mod_fastcgi has a relatively small receive buffer (8K or so). bindAddress, if present, must either be a string or a 2-tuple. If present, run() will open its own listening socket. You would use this if you wanted to run your application as an 'external' FastCGI app. (i.e. the webserver would no longer be responsible for starting your app) If a string, it will be interpreted as a filename and a UNIX socket will be opened. If a tuple, the first element, a string, is the interface name/IP to bind to, and the second element (an int) is the port number. Set multiplexed to True if you want to handle multiple requests per connection. Some FastCGI backends (namely mod_fastcgi) don't multiplex requests at all, so by default this is off (which saves on thread creation/locking overhead). If threads aren't available, this keyword is ignored; it's not possible to multiplex requests at all. """ if handler is not None: self.handler = handler self.maxwrite = maxwrite if thread_available: try: import resource # Attempt to glean the maximum number of connections # from the OS. maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0] except ImportError: maxConns = 100 # Just some made up number. maxReqs = maxConns if multiplexed: self._connectionClass = MultiplexedConnection maxReqs *= 5 # Another made up number. else: self._connectionClass = Connection self.capability = { FCGI_MAX_CONNS: maxConns, FCGI_MAX_REQS: maxReqs, FCGI_MPXS_CONNS: multiplexed and 1 or 0 } else: self._connectionClass = Connection self.capability = { # If threads aren't available, these are pretty much correct. FCGI_MAX_CONNS: 1, FCGI_MAX_REQS: 1, FCGI_MPXS_CONNS: 0 } self._bindAddress = bindAddress self._umask = umask def _setupSocket(self): if self._bindAddress is None: # Run as a normal FastCGI? isFCGI = True sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET, socket.SOCK_STREAM) try: sock.getpeername() except socket.error, e: if e[0] == errno.ENOTSOCK: # Not a socket, assume CGI context. isFCGI = False elif e[0] != errno.ENOTCONN: raise # FastCGI/CGI discrimination is broken on Mac OS X. # Set the environment variable FCGI_FORCE_CGI to "Y" or "y" # if you want to run your app as a simple CGI. (You can do # this with Apache's mod_env [not loaded by default in OS X # client, ha ha] and the SetEnv directive.) if not isFCGI or \ os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'): req = self.cgirequest_class(self) req.run() sys.exit(0) else: # Run as a server oldUmask = None if type(self._bindAddress) is str: # Unix socket sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: os.unlink(self._bindAddress) except OSError: pass if self._umask is not None: oldUmask = os.umask(self._umask) else: # INET socket assert type(self._bindAddress) is tuple assert len(self._bindAddress) == 2 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(self._bindAddress) sock.listen(socket.SOMAXCONN) if oldUmask is not None: os.umask(oldUmask) return sock def _cleanupSocket(self, sock): """Closes the main socket.""" sock.close() def _installSignalHandlers(self): self._oldSIGs = [(x,signal.getsignal(x)) for x in (signal.SIGHUP, signal.SIGINT, signal.SIGTERM)] signal.signal(signal.SIGHUP, self._hupHandler) signal.signal(signal.SIGINT, self._intHandler) signal.signal(signal.SIGTERM, self._intHandler) def _restoreSignalHandlers(self): for signum,handler in self._oldSIGs: signal.signal(signum, handler) def _hupHandler(self, signum, frame): self._hupReceived = True self._keepGoing = False def _intHandler(self, signum, frame): self._keepGoing = False def run(self, timeout=1.0): """ The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if SIGHUP was received, False otherwise. """ web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS') if web_server_addrs is not None: web_server_addrs = map(lambda x: x.strip(), web_server_addrs.split(',')) sock = self._setupSocket() self._keepGoing = True self._hupReceived = False # Install signal handlers. self._installSignalHandlers() while self._keepGoing: try: r, w, e = select.select([sock], [], [], timeout) except select.error, e: if e[0] == errno.EINTR: continue raise if r: try: clientSock, addr = sock.accept() except socket.error, e: if e[0] in (errno.EINTR, errno.EAGAIN): continue raise if web_server_addrs and \ (len(addr) != 2 or addr[0] not in web_server_addrs): clientSock.close() continue # Instantiate a new Connection and begin processing FastCGI # messages (either in a new thread or this thread). conn = self._connectionClass(clientSock, addr, self) thread.start_new_thread(conn.run, ()) self._mainloopPeriodic() # Restore signal handlers. self._restoreSignalHandlers() self._cleanupSocket(sock) return self._hupReceived def _mainloopPeriodic(self): """ Called with just about each iteration of the main loop. Meant to be overridden. """ pass def _exit(self, reload=False): """ Protected convenience method for subclasses to force an exit. Not really thread-safe, which is why it isn't public. """ if self._keepGoing: self._keepGoing = False self._hupReceived = reload def handler(self, req): """ Default handler, which just raises an exception. Unless a handler is passed at initialization time, this must be implemented by a subclass. """ raise NotImplementedError, self.__class__.__name__ + '.handler' def error(self, req): """ Called by Request if an exception occurs within the handler. May and should be overridden. """ import cgitb req.stdout.write('Content-Type: text/html\r\n\r\n' + cgitb.html(sys.exc_info())) class WSGIServer(Server): """ FastCGI server that supports the Web Server Gateway Interface. See <http://www.python.org/peps/pep-0333.html>. """ def __init__(self, application, environ=None, multithreaded=True, **kw): """ environ, if present, must be a dictionary-like object. Its contents will be copied into application's environ. Useful for passing application-specific variables. Set multithreaded to False if your application is not MT-safe. """ if kw.has_key('handler'): del kw['handler'] # Doesn't make sense to let this through super(WSGIServer, self).__init__(**kw) if environ is None: environ = {} self.application = application self.environ = environ self.multithreaded = multithreaded # Used to force single-threadedness self._app_lock = thread.allocate_lock() def handler(self, req): """Special handler for WSGI.""" if req.role != FCGI_RESPONDER: return FCGI_UNKNOWN_ROLE, 0 # Mostly taken from example CGI gateway. environ = req.params environ.update(self.environ) environ['wsgi.version'] = (1,0) environ['wsgi.input'] = req.stdin if self._bindAddress is None: stderr = req.stderr else: stderr = TeeOutputStream((sys.stderr, req.stderr)) environ['wsgi.errors'] = stderr environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \ thread_available and self.multithreaded # Rationale for the following: If started by the web server # (self._bindAddress is None) in either FastCGI or CGI mode, the # possibility of being spawned multiple times simultaneously is quite # real. And, if started as an external server, multiple copies may be # spawned for load-balancing/redundancy. (Though I don't think # mod_fastcgi supports this?) environ['wsgi.multiprocess'] = True environ['wsgi.run_once'] = isinstance(req, CGIRequest) if environ.get('HTTPS', 'off') in ('on', '1'): environ['wsgi.url_scheme'] = 'https' else: environ['wsgi.url_scheme'] = 'http' self._sanitizeEnv(environ) headers_set = [] headers_sent = [] result = None def write(data): assert type(data) is str, 'write() argument must be string' assert headers_set, 'write() before start_response()' if not headers_sent: status, responseHeaders = headers_sent[:] = headers_set found = False for header,value in responseHeaders: if header.lower() == 'content-length': found = True break if not found and result is not None: try: if len(result) == 1: responseHeaders.append(('Content-Length', str(len(data)))) except: pass s = 'Status: %s\r\n' % status for header in responseHeaders: s += '%s: %s\r\n' % header s += '\r\n' req.stdout.write(s) req.stdout.write(data) req.stdout.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: # Re-raise if too late raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None # avoid dangling circular ref else: assert not headers_set, 'Headers already set!' assert type(status) is str, 'Status must be a string' assert len(status) >= 4, 'Status must be at least 4 characters' assert int(status[:3]), 'Status must begin with 3-digit code' assert status[3] == ' ', 'Status must have a space after code' assert type(response_headers) is list, 'Headers must be a list' if __debug__: for name,val in response_headers: assert type(name) is str, 'Header names must be strings' assert type(val) is str, 'Header values must be strings' headers_set[:] = [status, response_headers] return write if not self.multithreaded: self._app_lock.acquire() try: try: result = self.application(environ, start_response) try: for data in result: if data: write(data) if not headers_sent: write('') # in case body was empty finally: if hasattr(result, 'close'): result.close() except socket.error, e: if e[0] != errno.EPIPE: raise # Don't let EPIPE propagate beyond server finally: if not self.multithreaded: self._app_lock.release() return FCGI_REQUEST_COMPLETE, 0 def _sanitizeEnv(self, environ): """Ensure certain values are present, if required by WSGI.""" if not environ.has_key('SCRIPT_NAME'): environ['SCRIPT_NAME'] = '' if not environ.has_key('PATH_INFO'): environ['PATH_INFO'] = '' # If any of these are missing, it probably signifies a broken # server... for name,default in [('REQUEST_METHOD', 'GET'), ('SERVER_NAME', 'localhost'), ('SERVER_PORT', '80'), ('SERVER_PROTOCOL', 'HTTP/1.0')]: if not environ.has_key(name): environ['wsgi.errors'].write('%s: missing FastCGI param %s ' 'required by WSGI!\n' % (self.__class__.__name__, name)) environ[name] = default if __name__ == '__main__': def test_app(environ, start_response): """Probably not the most efficient example.""" import cgi start_response('200 OK', [('Content-Type', 'text/html')]) yield '<html><head><title>Hello World!</title></head>\n' \ '<body>\n' \ '<p>Hello World!</p>\n' \ '<table border="1">' names = environ.keys() names.sort() for name in names: yield '<tr><td>%s</td><td>%s</td></tr>\n' % ( name, cgi.escape(`environ[name]`)) form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=1) if form.list: yield '<tr><th colspan="2">Form data</th></tr>' for field in form.list: yield '<tr><td>%s</td><td>%s</td></tr>\n' % ( field.name, field.value) yield '</table>\n' \ '</body></html>\n' WSGIServer(test_app).run()
Python
#!/usr/bin/env python # coding: utf8 """ RPX Authentication for web2py Developed by Nathan Freeze (Copyright © 2009) Email <nathan@freezable.com> Modified by Massimo Di Pierro This file contains code to allow using RPXNow.com (now Jainrain.com) services with web2py """ import os import re import urllib from gluon import * from gluon.tools import fetch from gluon.storage import Storage import gluon.contrib.simplejson as json class RPXAccount(object): """ from gluon.contrib.login_methods.rpx_account import RPXAccount auth.settings.actions_disabled=['register','change_password', 'request_reset_password'] auth.settings.login_form = RPXAccount(request, api_key="...", domain="...", url = "http://localhost:8000/%s/default/user/login" % request.application) """ def __init__(self, request, api_key="", domain="", url="", embed=True, auth_url="https://rpxnow.com/api/v2/auth_info", language="en", prompt='rpx', on_login_failure=None, ): self.request = request self.api_key = api_key self.embed = embed self.auth_url = auth_url self.domain = domain self.token_url = url self.language = language self.profile = None self.prompt = prompt self.on_login_failure = on_login_failure self.mappings = Storage() dn = {'givenName': '', 'familyName': ''} self.mappings.Facebook = lambda profile, dn=dn:\ dict(registration_id=profile.get("identifier", ""), username=profile.get("preferredUsername", ""), email=profile.get("email", ""), first_name=profile.get("name", dn).get("givenName", ""), last_name=profile.get("name", dn).get("familyName", "")) self.mappings.Google = lambda profile, dn=dn:\ dict(registration_id=profile.get("identifier", ""), username=profile.get("preferredUsername", ""), email=profile.get("email", ""), first_name=profile.get("name", dn).get("givenName", ""), last_name=profile.get("name", dn).get("familyName", "")) self.mappings.default = lambda profile:\ dict(registration_id=profile.get("identifier", ""), username=profile.get("preferredUsername", ""), email=profile.get("email", ""), first_name=profile.get("preferredUsername", ""), last_name='') def get_user(self): request = self.request if request.vars.token: user = Storage() data = urllib.urlencode( dict(apiKey=self.api_key, token=request.vars.token)) auth_info_json = fetch(self.auth_url + '?' + data) auth_info = json.loads(auth_info_json) if auth_info['stat'] == 'ok': self.profile = auth_info['profile'] provider = re.sub('[^\w\-]', '', self.profile['providerName']) user = self.mappings.get( provider, self.mappings.default)(self.profile) return user elif self.on_login_failure: redirect(self.on_login_failure) return None def login_form(self): request = self.request args = request.args if self.embed: JANRAIN_URL = \ "https://%s.rpxnow.com/openid/embed?token_url=%s&language_preference=%s" rpxform = IFRAME( _src=JANRAIN_URL % ( self.domain, self.token_url, self.language), _scrolling="no", _frameborder="no", _style="width:400px;height:240px;") else: JANRAIN_URL = \ "https://%s.rpxnow.com/openid/v2/signin?token_url=%s" rpxform = DIV(SCRIPT(_src="https://rpxnow.com/openid/v2/widget", _type="text/javascript"), SCRIPT("RPXNOW.overlay = true;", "RPXNOW.language_preference = '%s';" % self.language, "RPXNOW.realm = '%s';" % self.domain, "RPXNOW.token_url = '%s';" % self.token_url, "RPXNOW.show();", _type="text/javascript")) return rpxform def use_janrain(auth, filename='private/janrain.key', **kwargs): path = os.path.join(current.request.folder, filename) if os.path.exists(path): request = current.request domain, key = open(path, 'r').read().strip().split(':') host = current.request.env.http_host url = URL('default', 'user', args='login', scheme=True) auth.settings.actions_disabled = \ ['register', 'change_password', 'request_reset_password'] auth.settings.login_form = RPXAccount( request, api_key=key, domain=domain, url=url, **kwargs)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of web2py Web Framework (Copyrighted, 2007-2009). Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>. License: GPL v2 Thanks to Hans Donner <hans.donner@pobox.com> for GaeGoogleAccount. """ from google.appengine.api import users class GaeGoogleAccount(object): """ Login will be done via Google's Appengine login object, instead of web2py's login form. Include in your model (eg db.py):: from gluon.contrib.login_methods.gae_google_account import \ GaeGoogleAccount auth.settings.login_form=GaeGoogleAccount() """ def login_url(self, next="/"): return users.create_login_url(next) def logout_url(self, next="/"): return users.create_logout_url(next) def get_user(self): user = users.get_current_user() if user: return dict(nickname=user.nickname(), email=user.email(), user_id=user.user_id(), source="google account")
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of web2py Web Framework (Copyrighted, 2007-2009). Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>. License: GPL v2 Thanks to Hans Donner <hans.donner@pobox.com> for GaeGoogleAccount. """ from gluon.http import HTTP try: import linkedin except ImportError: raise HTTP(400, "linkedin module not found") class LinkedInAccount(object): """ Login will be done via Google's Appengine login object, instead of web2py's login form. Include in your model (eg db.py):: from gluon.contrib.login_methods.linkedin_account import LinkedInAccount auth.settings.login_form=LinkedInAccount(request,KEY,SECRET,RETURN_URL) """ def __init__(self, request, key, secret, return_url): self.request = request self.api = linkedin.LinkedIn(key, secret, return_url) self.token = result = self.api.requestToken() def login_url(self, next="/"): return self.api.getAuthorizeURL(self.token) def logout_url(self, next="/"): return '' def get_user(self): result = self.request.vars.verifier and self.api.accessToken( verifier=self.request.vars.verifier) if result: profile = self.api.GetProfile() profile = self.api.GetProfile( profile).public_url = "http://www.linkedin.com/in/ozgurv" return dict(first_name=profile.first_name, last_name=profile.last_name, username=profile.id)
Python
#!/usr/bin/env python # coding: utf8 """ Dropbox Authentication for web2py Developed by Massimo Di Pierro (2012) Same License as Web2py License """ # mind here session is dropbox session, not current.session import os import re import urllib from dropbox import client, rest, session from gluon import * from gluon.tools import fetch from gluon.storage import Storage import gluon.contrib.simplejson as json class DropboxAccount(object): """ from gluon.contrib.login_methods.dropbox_account import DropboxAccount auth.settings.actions_disabled=['register','change_password', 'request_reset_password'] auth.settings.login_form = DropboxAccount(request, key="...", secret="...", access_type="...", login_url = "http://localhost:8000/%s/default/user/login" % request.application) when logged in client = auth.settings.login_form.client """ def __init__(self, request, key="", secret="", access_type="app_folder", login_url="", on_login_failure=None, ): self.request = request self.key = key self.secret = secret self.access_type = access_type self.login_url = login_url self.on_login_failure = on_login_failure self.sess = session.DropboxSession( self.key, self.secret, self.access_type) def get_user(self): request = self.request if not current.session.dropbox_request_token: return None elif not current.session.dropbox_access_token: request_token = current.session.dropbox_request_token self.sess.set_request_token(request_token[0], request_token[1]) access_token = self.sess.obtain_access_token(self.sess.token) current.session.dropbox_access_token = \ (access_token.key, access_token.secret) else: access_token = current.session.dropbox_access_token self.sess.set_token(access_token[0], access_token[1]) user = Storage() self.client = client.DropboxClient(self.sess) data = self.client.account_info() display_name = data.get('display_name', '').split(' ', 1) user = dict(email=data.get('email', None), first_name=display_name[0], last_name=display_name[-1], registration_id=data.get('uid', None)) if not user['registration_id'] and self.on_login_failure: redirect(self.on_login_failure) return user def login_form(self): request_token = self.sess.obtain_request_token() current.session.dropbox_request_token = \ (request_token.key, request_token.secret) dropbox_url = self.sess.build_authorize_url(request_token, self.login_url) redirect(dropbox_url) form = IFRAME(_src=dropbox_url, _scrolling="no", _frameborder="no", _style="width:400px;height:240px;") return form def logout_url(self, next="/"): self.sess.unlink() current.session.auth = None return next def get_client(self): access_token = current.session.dropbox_access_token self.sess.set_token(access_token[0], access_token[1]) self.client = client.DropboxClient(self.sess) def put(self, filename, file): if not hasattr(self,'client'): self.get_client() return self.client.put_file(filename, file)['bytes'] def get(self, filename): if not hasattr(self,'client'): self.get_client() return self.client.get_file(filename) def dir(self, path): if not hasattr(self,'client'): self.get_client() return self.client.metadata(path) def use_dropbox(auth, filename='private/dropbox.key', **kwargs): path = os.path.join(current.request.folder, filename) if os.path.exists(path): request = current.request key, secret, access_type = open(path, 'r').read().strip().split(':') host = current.request.env.http_host login_url = "http://%s/%s/default/user/login" % \ (host, request.application) auth.settings.actions_disabled = \ ['register', 'change_password', 'request_reset_password'] auth.settings.login_form = DropboxAccount( request, key=key, secret=secret, access_type=access_type, login_url=login_url, **kwargs)
Python
#!/usr/bin/env python # coding: utf8 """ Oneall Authentication for web2py Developed by Nathan Freeze (Copyright © 2013) Email <nathan@freezable.com> This file contains code to allow using onall.com authentication services with web2py """ import os import base64 from gluon import * from gluon.storage import Storage from gluon.contrib.simplejson import JSONDecodeError from gluon.tools import fetch import gluon.contrib.simplejson as json class OneallAccount(object): """ from gluon.contrib.login_methods.oneall_account import OneallAccount auth.settings.actions_disabled=['register','change_password', 'request_reset_password'] auth.settings.login_form = OneallAccount(request, public_key="...", private_key="...", domain="...", url = "http://localhost:8000/%s/default/user/login" % request.application) """ def __init__(self, request, public_key="", private_key="", domain="", url=None, providers=None, on_login_failure=None): self.request = request self.public_key = public_key self.private_key = private_key self.url = url self.domain = domain self.profile = None self.on_login_failure = on_login_failure self.providers = providers or ["facebook", "google", "yahoo", "openid"] self.mappings = Storage() def defaultmapping(profile): name = profile.get('name',{}) dname = name.get('formatted',profile.get('displayName')) email=profile.get('emails', [{}])[0].get('value') reg_id=profile.get('identity_token','') username=profile.get('preferredUsername',email) first_name=name.get('givenName', dname.split(' ')[0]) last_name=profile.get('familyName',dname.split(' ')[1]) return dict(registration_id=reg_id,username=username,email=email, first_name=first_name,last_name=last_name) self.mappings.default = defaultmapping def get_user(self): request = self.request user = None if request.vars.connection_token: auth_url = "https://%s.api.oneall.com/connections/%s.json" % \ (self.domain, request.vars.connection_token) auth_pw = "%s:%s" % (self.public_key,self.private_key) auth_pw = base64.b64encode(auth_pw) headers = dict(Authorization="Basic %s" % auth_pw) try: auth_info_json = fetch(auth_url,headers=headers) auth_info = json.loads(auth_info_json) data = auth_info['response']['result']['data'] if data['plugin']['key'] == 'social_login': if data['plugin']['data']['status'] == 'success': userdata = data['user'] self.profile = userdata['identity'] source = self.profile['source']['key'] mapping = self.mappings.get(source,self.mappings['default']) user = mapping(self.profile) except (JSONDecodeError, KeyError): pass if user is None and self.on_login_failure: redirect(self.on_login_failure) return user def login_form(self): scheme = self.request.env.wsgi_url_scheme oneall_url = scheme + "://%s.api.oneall.com/socialize/library.js" % self.domain oneall_lib = SCRIPT(_src=oneall_url,_type='text/javascript') container = DIV(_id="oa_social_login_container") widget = SCRIPT('oneall.api.plugins.social_login.build("oa_social_login_container",', '{providers : %s,' % self.providers, 'callback_uri: "%s"});' % self.url, _type="text/javascript") form = DIV(oneall_lib,container,widget) return form def use_oneall(auth, filename='private/oneall.key', **kwargs): path = os.path.join(current.request.folder, filename) if os.path.exists(path): request = current.request domain, public_key, private_key = open(path, 'r').read().strip().split(':') url = URL('default', 'user', args='login', scheme=True) auth.settings.actions_disabled =\ ['register', 'change_password', 'request_reset_password'] auth.settings.login_form = OneallAccount( request, public_key=public_key,private_key=private_key, domain=domain, url=url, **kwargs)
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ BrowserID Authentication for web2py developed by Madhukar R Pai (Copyright 2012) Email <madspai@gmail.com> License : LGPL thanks and credits to the web2py community This custom authenticator allows web2py to authenticate using browserid (https://browserid.org/) BrowserID is a project by Mozilla Labs (http://mozillalabs.com/) to Know how browserid works please visit http://identity.mozilla.com/post/7616727542/introducing-browserid-a-better-way-to-sign-in bottom line BrowserID provides a free, secure, de-centralized, easy to use(for users and developers) login solution. You can use any email id as your login id. Browserid just verifys the email id and lets you login with that id. credits for the doPost jquery function - itsadok (http://stackoverflow.com/users/7581/itsadok) """ import time from gluon import * from gluon.storage import Storage from gluon.tools import fetch import gluon.contrib.simplejson as json class BrowserID(object): """ from gluon.contrib.login_methods.browserid_account import BrowserID auth.settings.login_form = BrowserID(request, audience = "http://127.0.0.1:8000" assertion_post_url = "http://127.0.0.1:8000/%s/default/user/login" % request.application) """ def __init__(self, request, audience="", assertion_post_url="", prompt="BrowserID Login", issuer="browserid.org", verify_url="https://browserid.org/verify", browserid_js="https://browserid.org/include.js", browserid_button="https://browserid.org/i/sign_in_red.png", crypto_js="https://crypto-js.googlecode.com/files/2.2.0-crypto-md5.js", on_login_failure=None, ): self.request = request self.audience = audience self.assertion_post_url = assertion_post_url self.prompt = prompt self.issuer = issuer self.verify_url = verify_url self.browserid_js = browserid_js self.browserid_button = browserid_button self.crypto_js = crypto_js self.on_login_failure = on_login_failure self.asertion_js = """ (function($){$.extend({doPost:function(url,params){var $form=$("<form method='POST'>").attr("action",url); $.each(params,function(name,value){$("<input type='hidden'>").attr("name",name).attr("value",value).appendTo($form)}); $form.appendTo("body");$form.submit()}})})(jQuery); function gotVerifiedEmail(assertion){if(assertion !== null){$.doPost('%s',{'assertion':assertion});}}""" % self.assertion_post_url def get_user(self): request = self.request if request.vars.assertion: audience = self.audience issuer = self.issuer assertion = XML(request.vars.assertion, sanitize=True) verify_data = {'assertion': assertion, 'audience': audience} auth_info_json = fetch(self.verify_url, data=verify_data) j = json.loads(auth_info_json) epoch_time = int(time.time() * 1000) # we need 13 digit epoch time if j["status"] == "okay" and j["audience"] == audience and j['issuer'] == issuer and j['expires'] >= epoch_time: return dict(email=j['email']) elif self.on_login_failure: redirect('http://google.com') else: redirect('http://google.com') return None def login_form(self): request = self.request onclick = "javascript:navigator.id.getVerifiedEmail(gotVerifiedEmail) ; return false" form = DIV(SCRIPT(_src=self.browserid_js, _type="text/javascript"), SCRIPT(_src=self.crypto_js, _type="text/javascript"), A(IMG(_src=self.browserid_button, _alt=self.prompt), _href="#", _onclick=onclick, _class="browserid", _title="Login With BrowserID"), SCRIPT(self.asertion_js)) return form
Python
# -*- coding: utf-8 -*- # # last tinkered with by korylprince at gmail.com on 2012-07-12 # import sys import logging try: import ldap import ldap.filter ldap.set_option(ldap.OPT_REFERRALS, 0) except Exception, e: logging.error('missing ldap, try "easy_install python-ldap"') raise e def ldap_auth(server='ldap', port=None, base_dn='ou=users,dc=domain,dc=com', mode='uid', secure=False, cert_path=None, cert_file=None, bind_dn=None, bind_pw=None, filterstr='objectClass=*', username_attrib='uid', custom_scope='subtree', allowed_groups=None, manage_user=False, user_firstname_attrib='cn:1', user_lastname_attrib='cn:2', user_mail_attrib='mail', manage_groups=False, db=None, group_dn=None, group_name_attrib='cn', group_member_attrib='memberUid', group_filterstr='objectClass=*', logging_level='error'): """ to use ldap login with MS Active Directory: from gluon.contrib.login_methods.ldap_auth import ldap_auth auth.settings.login_methods.append(ldap_auth( mode='ad', server='my.domain.controller', base_dn='ou=Users,dc=domain,dc=com')) to use ldap login with Notes Domino: auth.settings.login_methods.append(ldap_auth( mode='domino',server='my.domino.server')) to use ldap login with OpenLDAP: auth.settings.login_methods.append(ldap_auth( server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com')) to use ldap login with OpenLDAP and subtree search and (optionally) multiple DNs: auth.settings.login_methods.append(ldap_auth( mode='uid_r', server='my.ldap.server', base_dn=['ou=Users,dc=domain,dc=com','ou=Staff,dc=domain,dc=com'])) or (if using CN): auth.settings.login_methods.append(ldap_auth( mode='cn', server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com')) or you can full customize the search for user: auth.settings.login_methods.append(ldap_auth( mode='custom', server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com', username_attrib='uid', custom_scope='subtree')) the custom_scope can be: base, onelevel, subtree. If using secure ldaps:// pass secure=True and cert_path="..." If ldap is using GnuTLS then you need cert_file="..." instead cert_path because cert_path isn't implemented in GnuTLS :( If you need to bind to the directory with an admin account in order to search it then specify bind_dn & bind_pw to use for this. - currently only implemented for Active Directory If you need to restrict the set of allowed users (e.g. to members of a department) then specify an rfc4515 search filter string. - currently only implemented for mode in ['ad', 'company', 'uid_r'] You can manage user attributes first name, last name, email from ldap: auth.settings.login_methods.append(ldap_auth(...as usual..., manage_user=True, user_firstname_attrib='cn:1', user_lastname_attrib='cn:2', user_mail_attrib='mail' )) Where: manage_user - let web2py handle user data from ldap user_firstname_attrib - the attribute containing the user's first name optionally you can specify parts. Example: cn: "John Smith" - 'cn:1'='John' user_lastname_attrib - the attribute containing the user's last name optionally you can specify parts. Example: cn: "John Smith" - 'cn:2'='Smith' user_mail_attrib - the attribute containing the user's email address If you need group control from ldap to web2py app's database feel free to set: auth.settings.login_methods.append(ldap_auth(...as usual..., manage_groups=True, db=db, group_dn='ou=Groups,dc=domain,dc=com', group_name_attrib='cn', group_member_attrib='memberUid', group_filterstr='objectClass=*' )) Where: manage_group - let web2py handle the groups from ldap db - is the database object (need to have auth_user, auth_group, auth_membership) group_dn - the ldap branch of the groups group_name_attrib - the attribute where the group name is stored group_member_attrib - the attribute containing the group members name group_filterstr - as the filterstr but for group select You can restrict login access to specific groups if you specify: auth.settings.login_methods.append(ldap_auth(...as usual..., allowed_groups=[...], group_dn='ou=Groups,dc=domain,dc=com', group_name_attrib='cn', group_member_attrib='memberUid',#use 'member' for Active Directory group_filterstr='objectClass=*' )) Where: allowed_groups - a list with allowed ldap group names group_dn - the ldap branch of the groups group_name_attrib - the attribute where the group name is stored group_member_attrib - the attribute containing the group members name group_filterstr - as the filterstr but for group select If using Active Directory you must specify bind_dn and bind_pw for allowed_groups unless anonymous bind works. You can set the logging level with the "logging_level" parameter, default is "error" and can be set to error, warning, info, debug. """ logger = logging.getLogger('web2py.auth.ldap_auth') if logging_level == 'error': logger.setLevel(logging.ERROR) elif logging_level == 'warning': logger.setLevel(logging.WARNING) elif logging_level == 'info': logger.setLevel(logging.INFO) elif logging_level == 'debug': logger.setLevel(logging.DEBUG) def ldap_auth_aux(username, password, ldap_server=server, ldap_port=port, ldap_basedn=base_dn, ldap_mode=mode, ldap_binddn=bind_dn, ldap_bindpw=bind_pw, secure=secure, cert_path=cert_path, cert_file=cert_file, filterstr=filterstr, username_attrib=username_attrib, custom_scope=custom_scope, manage_user=manage_user, user_firstname_attrib=user_firstname_attrib, user_lastname_attrib=user_lastname_attrib, user_mail_attrib=user_mail_attrib, manage_groups=manage_groups, allowed_groups=allowed_groups, db=db): if password == '': # http://tools.ietf.org/html/rfc4513#section-5.1.2 logger.warning('blank password not allowed') return False logger.debug('mode: [%s] manage_user: [%s] custom_scope: [%s]' ' manage_groups: [%s]' % (str(mode), str(manage_user), str(custom_scope), str(manage_groups))) if manage_user: if user_firstname_attrib.count(':') > 0: (user_firstname_attrib, user_firstname_part) = user_firstname_attrib.split(':', 1) user_firstname_part = (int(user_firstname_part) - 1) else: user_firstname_part = None if user_lastname_attrib.count(':') > 0: (user_lastname_attrib, user_lastname_part) = user_lastname_attrib.split(':', 1) user_lastname_part = (int(user_lastname_part) - 1) else: user_lastname_part = None user_firstname_attrib = ldap.filter.escape_filter_chars( user_firstname_attrib) user_lastname_attrib = ldap.filter.escape_filter_chars( user_lastname_attrib) user_mail_attrib = ldap.filter.escape_filter_chars( user_mail_attrib) try: if allowed_groups: if not is_user_in_allowed_groups(username, password): return False con = init_ldap() if ldap_mode == 'ad': # Microsoft Active Directory if '@' not in username: domain = [] for x in ldap_basedn.split(','): if "DC=" in x.upper(): domain.append(x.split('=')[-1]) username = "%s@%s" % (username, '.'.join(domain)) username_bare = username.split("@")[0] con.set_option(ldap.OPT_PROTOCOL_VERSION, 3) # In cases where ForestDnsZones and DomainDnsZones are found, # result will look like the following: # ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones, # DC=domain,DC=com'] if ldap_binddn: # need to search directory with an admin account 1st con.simple_bind_s(ldap_binddn, ldap_bindpw) else: # credentials should be in the form of username@domain.tld con.simple_bind_s(username, password) # this will throw an index error if the account is not found # in the ldap_basedn requested_attrs = ['sAMAccountName'] if manage_user: requested_attrs.extend([user_firstname_attrib, user_lastname_attrib, user_mail_attrib]) result = con.search_ext_s( ldap_basedn, ldap.SCOPE_SUBTREE, "(&(sAMAccountName=%s)(%s))" % ( ldap.filter.escape_filter_chars(username_bare), filterstr), requested_attrs)[0][1] if not isinstance(result, dict): # result should be a dict in the form # {'sAMAccountName': [username_bare]} logger.warning('User [%s] not found!' % username) return False if ldap_binddn: # We know the user exists & is in the correct OU # so now we just check the password con.simple_bind_s(username, password) username = username_bare if ldap_mode == 'domino': # Notes Domino if "@" in username: username = username.split("@")[0] con.simple_bind_s(username, password) if manage_user: # TODO: sorry I have no clue how to query attrs in domino result = {user_firstname_attrib: username, user_lastname_attrib: None, user_mail_attrib: None} if ldap_mode == 'cn': # OpenLDAP (CN) dn = "cn=" + username + "," + ldap_basedn con.simple_bind_s(dn, password) if manage_user: result = con.search_s(dn, ldap.SCOPE_BASE, "(objectClass=*)", [user_firstname_attrib, user_lastname_attrib, user_mail_attrib])[0][1] if ldap_mode == 'uid': # OpenLDAP (UID) dn = "uid=" + username + "," + ldap_basedn con.simple_bind_s(dn, password) if manage_user: result = con.search_s(dn, ldap.SCOPE_BASE, "(objectClass=*)", [user_firstname_attrib, user_lastname_attrib, user_mail_attrib])[0][1] if ldap_mode == 'company': # no DNs or password needed to search directory dn = "" pw = "" # bind anonymously con.simple_bind_s(dn, pw) # search by e-mail address filter = '(&(mail=%s)(%s))' % ( ldap.filter.escape_filter_chars(username), filterstr) # find the uid attrs = ['uid'] if manage_user: attrs.extend([user_firstname_attrib, user_lastname_attrib, user_mail_attrib]) # perform the actual search company_search_result = con.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, filter, attrs) dn = company_search_result[0][0] result = company_search_result[0][1] # perform the real authentication test con.simple_bind_s(dn, password) if ldap_mode == 'uid_r': # OpenLDAP (UID) with subtree search and multiple DNs if isinstance(ldap_basedn, list): basedns = ldap_basedn else: basedns = [ldap_basedn] filter = '(&(uid=%s)(%s))' % ( ldap.filter.escape_filter_chars(username), filterstr) found = False for basedn in basedns: try: result = con.search_s(basedn, ldap.SCOPE_SUBTREE, filter) if result: user_dn = result[0][0] # Check the password con.simple_bind_s(user_dn, password) found = True break except ldap.LDAPError, detail: (exc_type, exc_value) = sys.exc_info()[:2] logger.warning( "ldap_auth: searching %s for %s resulted in %s: %s\n" % (basedn, filter, exc_type, exc_value) ) if not found: logger.warning('User [%s] not found!' % username) return False result = result[0][1] if ldap_mode == 'custom': # OpenLDAP (username_attrs) with subtree search and # multiple DNs if isinstance(ldap_basedn, list): basedns = ldap_basedn else: basedns = [ldap_basedn] filter = '(&(%s=%s)(%s))' % (username_attrib, ldap.filter.escape_filter_chars( username), filterstr) if custom_scope == 'subtree': ldap_scope = ldap.SCOPE_SUBTREE elif custom_scope == 'base': ldap_scope = ldap.SCOPE_BASE elif custom_scope == 'onelevel': ldap_scope = ldap.SCOPE_ONELEVEL found = False for basedn in basedns: try: result = con.search_s(basedn, ldap_scope, filter) if result: user_dn = result[0][0] # Check the password con.simple_bind_s(user_dn, password) found = True break except ldap.LDAPError, detail: (exc_type, exc_value) = sys.exc_info()[:2] logger.warning( "ldap_auth: searching %s for %s resulted in %s: %s\n" % (basedn, filter, exc_type, exc_value) ) if not found: logger.warning('User [%s] not found!' % username) return False result = result[0][1] if manage_user: logger.info('[%s] Manage user data' % str(username)) try: if user_firstname_part is not None: store_user_firstname = result[user_firstname_attrib][ 0].split(' ', 1)[user_firstname_part] else: store_user_firstname = result[user_firstname_attrib][0] except KeyError, e: store_user_firstname = None try: if user_lastname_part is not None: store_user_lastname = result[user_lastname_attrib][ 0].split(' ', 1)[user_lastname_part] else: store_user_lastname = result[user_lastname_attrib][0] except KeyError, e: store_user_lastname = None try: store_user_mail = result[user_mail_attrib][0] except KeyError, e: store_user_mail = None try: # # user as username # ################# user_in_db = db(db.auth_user.username == username) if user_in_db.count() > 0: user_in_db.update(first_name=store_user_firstname, last_name=store_user_lastname, email=store_user_mail) else: db.auth_user.insert(first_name=store_user_firstname, last_name=store_user_lastname, email=store_user_mail, username=username) except: # # user as email # ############## user_in_db = db(db.auth_user.email == username) if user_in_db.count() > 0: user_in_db.update(first_name=store_user_firstname, last_name=store_user_lastname) else: db.auth_user.insert(first_name=store_user_firstname, last_name=store_user_lastname, email=username) con.unbind() if manage_groups: if not do_manage_groups(username, password): return False return True except ldap.INVALID_CREDENTIALS, e: return False except ldap.LDAPError, e: import traceback logger.warning('[%s] Error in ldap processing' % str(username)) logger.debug(traceback.format_exc()) return False except IndexError, ex: # for AD membership test import traceback logger.warning('[%s] Ldap result indexing error' % str(username)) logger.debug(traceback.format_exc()) return False def is_user_in_allowed_groups(username, password=None, allowed_groups=allowed_groups): """ Figure out if the username is a member of an allowed group in ldap or not """ # # Get all group name where the user is in actually in ldap # ######################################################### ldap_groups_of_the_user = get_user_groups_from_ldap(username, password) # search for allowed group names if type(allowed_groups) != type(list()): allowed_groups = [allowed_groups] for group in allowed_groups: if ldap_groups_of_the_user.count(group) > 0: # Match return True # No match return False def do_manage_groups(username, password=None, db=db): """ Manage user groups Get all user's group from ldap and refresh the already stored ones in web2py's application database or create new groups according to ldap. """ logger.info('[%s] Manage user groups' % str(username)) try: # # Get all group name where the user is in actually in ldap # ######################################################### ldap_groups_of_the_user = get_user_groups_from_ldap( username, password) # # Get all group name where the user is in actually in local db # ############################################################# try: db_user_id = db(db.auth_user.username == username).select( db.auth_user.id).first().id except: try: db_user_id = db(db.auth_user.email == username).select( db.auth_user.id).first().id except AttributeError, e: # # There is no user in local db # We create one # ############################## try: db_user_id = db.auth_user.insert(username=username, first_name=username) except AttributeError, e: db_user_id = db.auth_user.insert(email=username, first_name=username) if not db_user_id: logging.error( 'There is no username or email for %s!' % username) raise db_group_search = db((db.auth_membership.user_id == db_user_id) & (db.auth_user.id == db.auth_membership.user_id) & (db.auth_group.id == db.auth_membership.group_id)) db_groups_of_the_user = list() db_group_id = dict() if db_group_search.count() > 0: for group in db_group_search.select(db.auth_group.id, db.auth_group.role, distinct=True): db_group_id[group.role] = group.id db_groups_of_the_user.append(group.role) logging.debug('db groups of user %s: %s' % (username, str(db_groups_of_the_user))) # # Delete user membership from groups where user is not anymore # ############################################################# for group_to_del in db_groups_of_the_user: if ldap_groups_of_the_user.count(group_to_del) == 0: db((db.auth_membership.user_id == db_user_id) & (db.auth_membership.group_id == \ db_group_id[group_to_del])).delete() # # Create user membership in groups where user is not in already # ############################################################## for group_to_add in ldap_groups_of_the_user: if db_groups_of_the_user.count(group_to_add) == 0: if db(db.auth_group.role == group_to_add).count() == 0: gid = db.auth_group.insert(role=group_to_add, description='Generated from LDAP') else: gid = db(db.auth_group.role == group_to_add).select( db.auth_group.id).first().id db.auth_membership.insert(user_id=db_user_id, group_id=gid) except: logger.warning("[%s] Groups are not managed successfully!" % str(username)) import traceback logger.debug(traceback.format_exc()) return False return True def init_ldap(ldap_server=server, ldap_port=port, ldap_basedn=base_dn, ldap_mode=mode, secure=secure, cert_path=cert_path, cert_file=cert_file): """ Inicialize ldap connection """ logger.info('[%s] Initialize ldap connection' % str(ldap_server)) if secure: if not ldap_port: ldap_port = 636 con = ldap.initialize( "ldaps://" + ldap_server + ":" + str(ldap_port)) if cert_path: con.set_option(ldap.OPT_X_TLS_CACERTDIR, cert_path) if cert_file: con.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file) else: if not ldap_port: ldap_port = 389 con = ldap.initialize( "ldap://" + ldap_server + ":" + str(ldap_port)) return con def get_user_groups_from_ldap(username, password=None, base_dn=base_dn, ldap_binddn=bind_dn, ldap_bindpw=bind_pw, group_dn=group_dn, group_name_attrib=group_name_attrib, group_member_attrib=group_member_attrib, group_filterstr=group_filterstr, ldap_mode=mode): """ Get all group names from ldap where the user is in """ logger.info('[%s] Get user groups from ldap' % str(username)) # # Get all group name where the user is in actually in ldap # ######################################################### # Initialize ldap if not group_dn: group_dn = base_dn con = init_ldap() logger.debug('Username init: [%s]' % username) if ldap_mode == 'ad': # # Get the AD username # #################### if '@' not in username: domain = [] for x in base_dn.split(','): if "DC=" in x.upper(): domain.append(x.split('=')[-1]) username = "%s@%s" % (username, '.'.join(domain)) username_bare = username.split("@")[0] con.set_option(ldap.OPT_PROTOCOL_VERSION, 3) # In cases where ForestDnsZones and DomainDnsZones are found, # result will look like the following: # ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones, # DC=domain,DC=com'] if ldap_binddn: # need to search directory with an admin account 1st con.simple_bind_s(ldap_binddn, ldap_bindpw) logger.debug('Ldap bind connect...') else: # credentials should be in the form of username@domain.tld con.simple_bind_s(username, password) logger.debug('Ldap username connect...') # We have to use the full string username = con.search_ext_s(base_dn, ldap.SCOPE_SUBTREE, "(&(sAMAccountName=%s)(%s))" % (ldap.filter.escape_filter_chars(username_bare), filterstr), ["cn"])[0][0] else: if ldap_binddn: # need to search directory with an bind_dn account 1st con.simple_bind_s(ldap_binddn, ldap_bindpw) else: # bind as anonymous con.simple_bind_s('', '') # search for groups where user is in filter = '(&(%s=%s)(%s))' % (ldap.filter.escape_filter_chars( group_member_attrib ), ldap.filter.escape_filter_chars(username), group_filterstr) group_search_result = con.search_s(group_dn, ldap.SCOPE_SUBTREE, filter, [group_name_attrib]) ldap_groups_of_the_user = list() for group_row in group_search_result: group = group_row[1] if type(group) == dict and group.has_key(group_name_attrib): ldap_groups_of_the_user.extend(group[group_name_attrib]) con.unbind() logger.debug('User groups: %s' % ldap_groups_of_the_user) return list(ldap_groups_of_the_user) if filterstr[0] == '(' and filterstr[-1] == ')': # rfc4515 syntax filterstr = filterstr[1:-1] # parens added again where used return ldap_auth_aux
Python
#!/usr/bin/env python # coding: utf8 """ OpenID authentication for web2py Allowed using OpenID login together with web2py built-in login. By default, to support OpenID login, put this in your db.py >>> from gluon.contrib.login_methods.openid_auth import OpenIDAuth >>> auth.settings.login_form = OpenIDAuth(auth) To show OpenID list in user profile, you can add the following code before the end of function user() of your_app/controllers/default.py + if (request.args and request.args(0) == "profile"): + form = DIV(form, openid_login_form.list_user_openids()) return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration) More detail in the description of the class OpenIDAuth. Requirements: python-openid version 2.2.5 or later Reference: * w2p openID http://w2popenid.appspot.com/init/default/wiki/w2popenid * RPX and web2py auth module http://www.web2pyslices.com/main/slices/take_slice/28 * built-in file: gluon/contrib/login_methods/rpx_account.py * built-in file: gluon/tools.py (Auth class) """ import time from datetime import datetime, timedelta from gluon import * from gluon.storage import Storage, Messages try: import openid.consumer.consumer from openid.association import Association from openid.store.interface import OpenIDStore from openid.extensions.sreg import SRegRequest, SRegResponse from openid.store import nonce from openid.consumer.discover import DiscoveryFailure except ImportError, err: raise ImportError("OpenIDAuth requires python-openid package") DEFAULT = lambda: None class OpenIDAuth(object): """ OpenIDAuth It supports the logout_url, implementing the get_user and login_form for cas usage of gluon.tools.Auth. It also uses the ExtendedLoginForm to allow the OpenIDAuth login_methods combined with the standard logon/register procedure. It uses OpenID Consumer when render the form and begins the OpenID authentication. Example: (put these code after auth.define_tables() in your models.) auth = Auth(globals(), db) # authentication/authorization ... auth.define_tables() # creates all needed tables ... #include in your model after auth has been defined from gluon.contrib.login_methods.openid_auth import OpenIDAuth openid_login_form = OpenIDAuth(request, auth, db) from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm extended_login_form = ExtendedLoginForm(request, auth, openid_login_form, signals=['oid','janrain_nonce']) auth.settings.login_form = extended_login_form """ def __init__(self, auth): self.auth = auth self.db = auth.db request = current.request self.nextvar = '_next' self.realm = 'http://%s' % request.env.http_host self.login_url = URL(r=request, f='user', args=['login']) self.return_to_url = self.realm + self.login_url self.table_alt_logins_name = "alt_logins" if not auth.settings.table_user: raise self.table_user = self.auth.settings.table_user self.openid_expiration = 15 # minutes self.messages = self._define_messages() if not self.table_alt_logins_name in self.db.tables: self._define_alt_login_table() def _define_messages(self): messages = Messages(current.T) messages.label_alt_login_username = 'Sign-in with OpenID: ' messages.label_add_alt_login_username = 'Add a new OpenID: ' messages.submit_button = 'Sign in' messages.submit_button_add = 'Add' messages.a_delete = 'Delete' messages.comment_openid_signin = 'What is OpenID?' messages.comment_openid_help_title = 'Start using your OpenID' messages.comment_openid_help_url = 'http://openid.net/get-an-openid/start-using-your-openid/' messages.openid_fail_discover = 'Failed to discover OpenID service. Check your OpenID or "More about OpenID"?' messages.flash_openid_expired = 'OpenID expired. Please login or authenticate OpenID again. Sorry for the inconvenient.' messages.flash_openid_associated = 'OpenID associated' messages.flash_associate_openid = 'Please login or register an account for this OpenID.' messages.p_openid_not_registered = "This Open ID haven't be registered. " \ + "Please login to associate with it or register an account for it." messages.flash_openid_authenticated = 'OpenID authenticated successfully.' messages.flash_openid_fail_authentication = 'OpenID authentication failed. (Error message: %s)' messages.flash_openid_canceled = 'OpenID authentication canceled by user.' messages.flash_openid_need_setup = 'OpenID authentication needs to be setup by the user with the provider first.' messages.h_openid_login = 'OpenID Login' messages.h_openid_list = 'OpenID List' return messages def _define_alt_login_table(self): """ Define the OpenID login table. Note: oidtype is what I used for our project. We're going to support 'fackbook' and 'plurk' alternate login methods. Otherwise it's always 'openid' and you may not need it. This should be easy to changed. (Just remove the field of "type" and remove the "and db.alt_logins.oidtype == type_" in _find_matched_openid function) """ db = self.db table = db.define_table( self.table_alt_logins_name, Field('username', length=512, default=''), Field('oidtype', length=128, default='openid', readable=False), Field('oiduser', self.table_user, readable=False), ) table.username.requires = IS_NOT_IN_DB(db, table.username) self.table_alt_logins = table def logout_url(self, next): """ Delete the w2popenid record in session as logout """ if current.session.w2popenid: del(current.session.w2popenid) return next def login_form(self): """ Start to process the OpenID response if 'janrain_nonce' in request parameters and not processed yet. Else return the OpenID form for login. """ request = current.request if 'janrain_nonce' in request.vars and not self._processed(): self._process_response() return self.auth() return self._form() def get_user(self): """ It supports the logout_url, implementing the get_user and login_form for cas usage of gluon.tools.Auth. """ request = current.request args = request.args if args[0] == 'logout': return True # Let logout_url got called if current.session.w2popenid: w2popenid = current.session.w2popenid db = self.db if (w2popenid.ok is True and w2popenid.oid): # OpenID authenticated if self._w2popenid_expired(w2popenid): del(current.session.w2popenid) flash = self.messages.flash_openid_expired current.session.warning = flash redirect(self.auth.settings.login_url) oid = self._remove_protocol(w2popenid.oid) alt_login = self._find_matched_openid(db, oid) nextvar = self.nextvar # This OpenID not in the database. If user logged in then add it # into database, else ask user to login or register. if not alt_login: if self.auth.is_logged_in(): # TODO: ask first maybe self._associate_user_openid(self.auth.user, oid) if current.session.w2popenid: del(current.session.w2popenid) current.session.flash = self.messages.flash_openid_associated if nextvar in request.vars: redirect(request.vars[nextvar]) redirect(self.auth.settings.login_next) if nextvar not in request.vars: # no next var, add it and do login again # so if user login or register can go back here to associate the OpenID redirect(URL(r=request, args=['login'], vars={nextvar: self.login_url})) self.login_form = self._form_with_notification() current.session.flash = self.messages.flash_associate_openid return None # need to login or register to associate this openid # Get existed OpenID user user = db( self.table_user.id == alt_login.oiduser).select().first() if user: if current.session.w2popenid: del(current.session.w2popenid) if 'username' in self.table_user.fields(): username = 'username' elif 'email' in self.table_user.fields(): username = 'email' return {username: user[username]} if user else None # login success (almost) return None # just start to login def _find_matched_openid(self, db, oid, type_='openid'): """ Get the matched OpenID for given """ query = ( (db.alt_logins.username == oid) & (db.alt_logins.oidtype == type_)) alt_login = db(query).select().first() # Get the OpenID record return alt_login def _associate_user_openid(self, user, oid): """ Associate the user logged in with given OpenID """ # print "[DB] %s authenticated" % oid self.db.alt_logins.insert(username=oid, oiduser=user.id) def _form_with_notification(self): """ Render the form for normal login with a notice of OpenID authenticated """ form = DIV() # TODO: check when will happen if self.auth.settings.login_form in (self.auth, self): self.auth.settings.login_form = self.auth form = DIV(self.auth()) register_note = DIV(P(self.messages.p_openid_not_registered)) form.components.append(register_note) return lambda: form def _remove_protocol(self, oid): """ Remove https:// or http:// from oid url """ protocol = 'https://' if oid.startswith(protocol): oid = oid[len(protocol):] return oid protocol = 'http://' if oid.startswith(protocol): oid = oid[len(protocol):] return oid return oid def _init_consumerhelper(self): """ Initialize the ConsumerHelper """ if not hasattr(self, "consumerhelper"): self.consumerhelper = ConsumerHelper(current.session, self.db) return self.consumerhelper def _form(self, style=None): form = DIV(H3(self.messages.h_openid_login), self._login_form(style)) return form def _login_form(self, openid_field_label=None, submit_button=None, _next=None, style=None): """ Render the form for OpenID login """ def warning_openid_fail(session): session.warning = messages.openid_fail_discover style = style or """ background-attachment: scroll; background-repeat: no-repeat; background-image: url("http://wiki.openid.net/f/openid-16x16.gif"); background-position: 0% 50%; background-color: transparent; padding-left: 18px; width: 400px; """ style = style.replace("\n", "") request = current.request session = current.session messages = self.messages hidden_next_input = "" if _next == 'profile': profile_url = URL(r=request, f='user', args=['profile']) hidden_next_input = INPUT( _type="hidden", _name="_next", _value=profile_url) form = FORM( openid_field_label or self.messages.label_alt_login_username, INPUT(_type="input", _name="oid", requires=IS_NOT_EMPTY( error_message=messages.openid_fail_discover), _style=style), hidden_next_input, INPUT(_type="submit", _value=submit_button or messages.submit_button), " ", A(messages.comment_openid_signin, _href=messages.comment_openid_help_url, _title=messages.comment_openid_help_title, _class='openid-identifier', _target="_blank"), _action=self.login_url ) if form.accepts(request.vars, session): oid = request.vars.oid consumerhelper = self._init_consumerhelper() url = self.login_url return_to_url = self.return_to_url if not oid: warning_openid_fail(session) redirect(url) try: if '_next' in request.vars: return_to_url = self.return_to_url + \ '?_next=' + request.vars._next url = consumerhelper.begin(oid, self.realm, return_to_url) except DiscoveryFailure: warning_openid_fail(session) redirect(url) return form def _processed(self): """ Check if w2popenid authentication is processed. Return True if processed else False. """ processed = (hasattr(current.session, 'w2popenid') and current.session.w2popenid.ok is True) return processed def _set_w2popenid_expiration(self, w2popenid): """ Set expiration for OpenID authentication. """ w2popenid.expiration = datetime.now( ) + timedelta(minutes=self.openid_expiration) def _w2popenid_expired(self, w2popenid): """ Check if w2popenid authentication is expired. Return True if expired else False. """ return (not w2popenid.expiration) or (datetime.now() > w2popenid.expiration) def _process_response(self): """ Process the OpenID by ConsumerHelper. """ request = current.request request_vars = request.vars consumerhelper = self._init_consumerhelper() process_status = consumerhelper.process_response( request_vars, self.return_to_url) if process_status == "success": w2popenid = current.session.w2popenid user_data = self.consumerhelper.sreg() current.session.w2popenid.ok = True self._set_w2popenid_expiration(w2popenid) w2popenid.user_data = user_data current.session.flash = self.messages.flash_openid_authenticated elif process_status == "failure": flash = self.messages.flash_openid_fail_authentication % consumerhelper.error_message current.session.warning = flash elif process_status == "cancel": current.session.warning = self.messages.flash_openid_canceled elif process_status == "setup_needed": current.session.warning = self.messages.flash_openid_need_setup def list_user_openids(self): messages = self.messages request = current.request if 'delete_openid' in request.vars: self.remove_openid(request.vars.delete_openid) query = self.db.alt_logins.oiduser == self.auth.user.id alt_logins = self.db(query).select() l = [] for alt_login in alt_logins: username = alt_login.username delete_href = URL(r=request, f='user', args=['profile'], vars={'delete_openid': username}) delete_link = A(messages.a_delete, _href=delete_href) l.append(LI(username, " ", delete_link)) profile_url = URL(r=request, f='user', args=['profile']) #return_to_url = self.return_to_url + '?' + self.nextvar + '=' + profile_url openid_list = DIV(H3(messages.h_openid_list), UL(l), self._login_form( _next='profile', submit_button=messages.submit_button_add, openid_field_label=messages.label_add_alt_login_username) ) return openid_list def remove_openid(self, openid): query = self.db.alt_logins.username == openid self.db(query).delete() class ConsumerHelper(object): """ ConsumerHelper knows the python-openid and """ def __init__(self, session, db): self.session = session store = self._init_store(db) self.consumer = openid.consumer.consumer.Consumer(session, store) def _init_store(self, db): """ Initialize Web2pyStore """ if not hasattr(self, "store"): store = Web2pyStore(db) session = self.session if 'w2popenid' not in session: session.w2popenid = Storage() self.store = store return self.store def begin(self, oid, realm, return_to_url): """ Begin the OpenID authentication """ w2popenid = self.session.w2popenid w2popenid.oid = oid auth_req = self.consumer.begin(oid) auth_req.addExtension(SRegRequest(required=['email', 'nickname'])) url = auth_req.redirectURL(return_to=return_to_url, realm=realm) return url def process_response(self, request_vars, return_to_url): """ Complete the process and """ resp = self.consumer.complete(request_vars, return_to_url) if resp: if resp.status == openid.consumer.consumer.SUCCESS: self.resp = resp if hasattr(resp, "identity_url"): self.session.w2popenid.oid = resp.identity_url return "success" if resp.status == openid.consumer.consumer.FAILURE: self.error_message = resp.message return "failure" if resp.status == openid.consumer.consumer.CANCEL: return "cancel" if resp.status == openid.consumer.consumer.SETUP_NEEDED: return "setup_needed" return "no resp" def sreg(self): """ Try to get OpenID Simple Registation http://openid.net/specs/openid-simple-registration-extension-1_0.html """ if self.resp: resp = self.resp sreg_resp = SRegResponse.fromSuccessResponse(resp) return sreg_resp.data if sreg_resp else None else: return None class Web2pyStore(OpenIDStore): """ Web2pyStore This class implements the OpenIDStore interface. OpenID stores take care of persisting nonces and associations. The Janrain Python OpenID library comes with implementations for file and memory storage. Web2pyStore uses the web2py db abstration layer. See the source code docs of OpenIDStore for a comprehensive description of this interface. """ def __init__(self, database): self.database = database self.table_oid_associations_name = 'oid_associations' self.table_oid_nonces_name = 'oid_nonces' self._initDB() def _initDB(self): if self.table_oid_associations_name not in self.database: self.database.define_table(self.table_oid_associations_name, Field('server_url', 'string', length=2047, required=True), Field('handle', 'string', length=255, required=True), Field('secret', 'blob', required=True), Field('issued', 'integer', required=True), Field('lifetime', 'integer', required=True), Field('assoc_type', 'string', length=64, required=True) ) if self.table_oid_nonces_name not in self.database: self.database.define_table(self.table_oid_nonces_name, Field('server_url', 'string', length=2047, required=True), Field('itimestamp', 'integer', required=True), Field('salt', 'string', length=40, required=True) ) def storeAssociation(self, server_url, association): """ Store associations. If there already is one with the same server_url and handle in the table replace it. """ db = self.database query = (db.oid_associations.server_url == server_url) & ( db.oid_associations.handle == association.handle) db(query).delete() db.oid_associations.insert(server_url=server_url, handle=association.handle, secret=association.secret, issued=association.issued, lifetime=association.lifetime, assoc_type=association.assoc_type), 'insert ' * 10 def getAssociation(self, server_url, handle=None): """ Return the association for server_url and handle. If handle is not None return the latests associations for that server_url. Return None if no association can be found. """ db = self.database query = (db.oid_associations.server_url == server_url) if handle: query &= (db.oid_associations.handle == handle) rows = db(query).select(orderby=db.oid_associations.issued) keep_assoc, _ = self._removeExpiredAssocations(rows) if len(keep_assoc) == 0: return None else: assoc = keep_assoc.pop( ) # pop the last one as it should be the latest one return Association(assoc['handle'], assoc['secret'], assoc['issued'], assoc['lifetime'], assoc['assoc_type']) def removeAssociation(self, server_url, handle): db = self.database query = (db.oid_associations.server_url == server_url) & ( db.oid_associations.handle == handle) return db(query).delete() is not None def useNonce(self, server_url, timestamp, salt): """ This method returns Falase if a nonce has been used before or its timestamp is not current. """ db = self.database if abs(timestamp - time.time()) > nonce.SKEW: return False query = (db.oid_nonces.server_url == server_url) & (db.oid_nonces.itimestamp == timestamp) & (db.oid_nonces.salt == salt) if db(query).count() > 0: return False else: db.oid_nonces.insert(server_url=server_url, itimestamp=timestamp, salt=salt) return True def _removeExpiredAssocations(self, rows): """ This helper function is not part of the interface. Given a list of association rows it checks which associations have expired and deletes them from the db. It returns a tuple of the form ([valid_assoc], no_of_expired_assoc_deleted). """ db = self.database keep_assoc = [] remove_assoc = [] t1970 = time.time() for r in rows: if r['issued'] + r['lifetime'] < t1970: remove_assoc.append(r) else: keep_assoc.append(r) for r in remove_assoc: del db.oid_associations[r['id']] return (keep_assoc, len(remove_assoc)) # return tuple (list of valid associations, number of deleted associations) def cleanupNonces(self): """ Remove expired nonce entries from DB and return the number of entries deleted. """ db = self.database query = (db.oid_nonces.itimestamp < time.time() - nonce.SKEW) return db(query).delete() def cleanupAssociations(self): """ Remove expired associations from db and return the number of entries deleted. """ db = self.database query = (db.oid_associations.id > 0) return self._removeExpiredAssocations(db(query).select())[1] # return number of assoc removed def cleanup(self): """ This method should be run periodically to free the db from expired nonce and association entries. """ return self.cleanupNonces(), self.cleanupAssociations()
Python
#!/usr/bin/env python # coding: utf8 """ ExtendedLoginForm is used to extend normal login form in web2py with one more login method. So user can choose the built-in login or extended login methods. """ from gluon import current, DIV class ExtendedLoginForm(object): """ Put extended_login_form under web2py/gluon/contrib/login_methods folder. Then inside your model where defines the auth: auth = Auth(globals(),db) # authentication/authorization ... auth.define_tables() # You might like to put the code after auth.define_tables ... # if the alt_login_form deals with tables of auth. alt_login_form = RPXAccount(request, api_key="...", domain="...", url = "http://localhost:8000/%s/default/user/login" % request.application) extended_login_form = ExtendedLoginForm( auth, alt_login_form, signals=['token']) auth.settings.login_form = extended_login_form Note: Since rpx_account doesn't create the password for the user, you might need to provide a way for user to create password to do normal login. """ def __init__(self, auth, alt_login_form, signals=[], login_arg='login' ): self.auth = auth self.alt_login_form = alt_login_form self.signals = signals self.login_arg = login_arg def get_user(self): """ Delegate the get_user to alt_login_form.get_user. """ if hasattr(self.alt_login_form, 'get_user'): return self.alt_login_form.get_user() return None # let gluon.tools.Auth.get_or_create_user do the rest def login_url(self, next): """ Optional implement for alt_login_form. In normal case, this should be replaced by get_user, and never get called. """ if hasattr(self.alt_login_form, 'login_url'): return self.alt_login_form.login_url(next) return self.auth.settings.login_url def logout_url(self, next): """ Optional implement for alt_login_form. Called if bool(alt_login_form.get_user) is True. If alt_login_form implemented logout_url function, it will return that function call. """ if hasattr(self.alt_login_form, 'logout_url'): return self.alt_login_form.logout_url(next) return next def login_form(self): """ Combine the auth() form with alt_login_form. If signals are set and a parameter in request matches any signals, it will return the call of alt_login_form.login_form instead. So alt_login_form can handle some particular situations, for example, multiple steps of OpenID login inside alt_login_form.login_form. Otherwise it will render the normal login form combined with alt_login_form.login_form. """ request = current.request args = request.args if (self.signals and any([True for signal in self.signals if signal in request.vars]) ): return self.alt_login_form.login_form() self.auth.settings.login_form = self.auth form = DIV(self.auth()) self.auth.settings.login_form = self form.components.append(self.alt_login_form.login_form()) return form
Python
import urllib import urllib2 import base64 def basic_auth(server="http://127.0.0.1"): """ to use basic login with a different server from gluon.contrib.login_methods.basic_auth import basic_auth auth.settings.login_methods.append(basic_auth('http://server')) """ def basic_login_aux(username, password, server=server): key = base64.b64encode(username + ':' + password) headers = {'Authorization': 'Basic ' + key} request = urllib2.Request(server, None, headers) try: urllib2.urlopen(request) return True except (urllib2.URLError, urllib2.HTTPError): return False return basic_login_aux
Python
#!/usr/bin/env python import time from hashlib import md5 from gluon.dal import DAL def motp_auth(db=DAL('sqlite://storage.sqlite'), time_offset=60): """ motp allows you to login with a one time password(OTP) generated on a motp client, motp clients are available for practically all platforms. to know more about OTP visit http://en.wikipedia.org/wiki/One-time_password to know more visit http://motp.sourceforge.net Written by Madhukar R Pai (madspai@gmail.com) License : MIT or GPL v2 thanks and credits to the web2py community to use motp_auth: motp_auth.py has to be located in gluon/contrib/login_methods/ folder first auth_user has to have 2 extra fields - motp_secret and motp_pin for that define auth like shown below: ## after auth = Auth(db) db.define_table( auth.settings.table_user_name, Field('first_name', length=128, default=''), Field('last_name', length=128, default=''), Field('email', length=128, default='', unique=True), # required Field('password', 'password', length=512, # required readable=False, label='Password'), Field('motp_secret',length=512,default='', label='MOTP Seceret'), Field('motp_pin',length=128,default='', label='MOTP PIN'), Field('registration_key', length=512, # required writable=False, readable=False, default=''), Field('reset_password_key', length=512, # required writable=False, readable=False, default=''), Field('registration_id', length=512, # required writable=False, readable=False, default='')) ##validators custom_auth_table = db[auth.settings.table_user_name] # get the custom_auth_table custom_auth_table.first_name.requires = \ IS_NOT_EMPTY(error_message=auth.messages.is_empty) custom_auth_table.last_name.requires = \ IS_NOT_EMPTY(error_message=auth.messages.is_empty) custom_auth_table.password.requires = CRYPT() custom_auth_table.email.requires = [ IS_EMAIL(error_message=auth.messages.invalid_email), IS_NOT_IN_DB(db, custom_auth_table.email)] auth.settings.table_user = custom_auth_table # tell auth to use custom_auth_table ## before auth.define_tables() ##after that: from gluon.contrib.login_methods.motp_auth import motp_auth auth.settings.login_methods.append(motp_auth(db=db)) ##Instructions for using MOTP - after configuring motp for web2py, Install a MOTP client on your phone (android,IOS, java, windows phone, etc) - initialize the motp client (to reset a motp secret type in #**#), During user creation enter the secret generated during initialization into the motp_secret field in auth_user and similarly enter a pre-decided pin into the motp_pin - done.. to login, just generate a fresh OTP by typing in the pin and use the OTP as password ###To Dos### - both motp_secret and pin are stored in plain text! need to have some way of encrypting - web2py stores the password in db on successful login (should not happen) - maybe some utility or page to check the otp would be useful - as of now user field is hardcoded to email. Some way of selecting user table and user field. """ def verify_otp(otp, pin, secret, offset=60): epoch_time = int(time.time()) time_start = int(str(epoch_time - offset)[:-1]) time_end = int(str(epoch_time + offset)[:-1]) for t in range(time_start - 1, time_end + 1): to_hash = str(t) + secret + pin hash = md5(to_hash).hexdigest()[:6] if otp == hash: return True return False def motp_auth_aux(email, password, db=db, offset=time_offset): if db: user_data = db(db.auth_user.email == email).select().first() if user_data: if user_data['motp_secret'] and user_data['motp_pin']: motp_secret = user_data['motp_secret'] motp_pin = user_data['motp_pin'] otp_check = verify_otp( password, motp_pin, motp_secret, offset=offset) if otp_check: return True else: return False else: return False return False return motp_auth_aux
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Written by Michele Comitini <mcm@glisco.it> License: GPL v3 Adds support for x509 authentication. """ from gluon.globals import current from gluon.storage import Storage from gluon.http import HTTP, redirect #requires M2Crypto from M2Crypto import X509 class X509Auth(object): """ Login using x509 cert from client. from gluon.contrib.login_methods.x509_auth import X509Account auth.settings.actions_disabled=['register','change_password', 'request_reset_password','profile'] auth.settings.login_form = X509Auth() """ def __init__(self): self.request = current.request self.ssl_client_raw_cert = self.request.env.ssl_client_raw_cert # rebuild the certificate passed by the env # this is double work, but it is the only way # since we cannot access the web server ssl engine directly if self.ssl_client_raw_cert: x509 = X509.load_cert_string( self.ssl_client_raw_cert, X509.FORMAT_PEM) # extract it from the cert self.serial = self.request.env.ssl_client_serial or ( '%x' % x509.get_serial_number()).upper() subject = x509.get_subject() # Reordering the subject map to a usable Storage map # this allows us a cleaner syntax: # cn = self.subject.cn self.subject = Storage(filter(None, map(lambda x: (x, map(lambda y: y.get_data( ).as_text(), subject.get_entries_by_nid(subject.nid[x]))), subject.nid.keys()))) def login_form(self, **args): raise HTTP(403, 'Login not allowed. No valid x509 crentials') def login_url(self, next="/"): raise HTTP(403, 'Login not allowed. No valid x509 crentials') def logout_url(self, next="/"): return next def get_user(self): '''Returns the user info contained in the certificate. ''' # We did not get the client cert? if not self.ssl_client_raw_cert: return None # Try to reconstruct some useful info for web2py auth machinery p = profile = dict() username = p['username'] = reduce(lambda a, b: '%s | %s' % ( a, b), self.subject.CN or self.subject.commonName) p['first_name'] = reduce(lambda a, b: '%s | %s' % (a, b), self.subject.givenName or username) p['last_name'] = reduce( lambda a, b: '%s | %s' % (a, b), self.subject.surname) p['email'] = reduce(lambda a, b: '%s | %s' % ( a, b), self.subject.Email or self.subject.emailAddress) # IMPORTANT WE USE THE CERT SERIAL AS UNIQUE KEY FOR THE USER p['registration_id'] = self.serial # If the auth table has a field certificate it will be used to # save a PEM encoded copy of the user certificate. p['certificate'] = self.ssl_client_raw_cert return profile
Python
from gluon.contrib.pam import authenticate def pam_auth(): """ to use pam_login: from gluon.contrib.login_methods.pam_auth import pam_auth auth.settings.login_methods.append(pam_auth()) or auth.settings.actions_disabled=[ 'register','change_password','request_reset_password'] auth.settings.login_methods=[pam_auth()] The latter method will not store the user password in auth_user. """ def pam_auth_aux(username, password): return authenticate(username, password) return pam_auth_aux
Python
#!/usr/bin/env python # coding: utf8 """ LoginRadius Authentication for web2py Developed by Nathan Freeze (Copyright © 2013) Email <nathan@freezable.com> This file contains code to allow using loginradius.com authentication services with web2py """ import os from gluon import * from gluon.storage import Storage from gluon.contrib.simplejson import JSONDecodeError from gluon.tools import fetch import gluon.contrib.simplejson as json class LoginRadiusAccount(object): """ from gluon.contrib.login_methods.loginradius_account import LoginRadiusAccount auth.settings.actions_disabled=['register','change_password', 'request_reset_password'] auth.settings.login_form = LoginRadiusAccount(request, api_key="...", api_secret="...", url = "http://localhost:8000/%s/default/user/login" % request.application) """ def __init__(self, request, api_key="", api_secret="", url=None, on_login_failure=None): self.request = request self.api_key = api_key self.api_secret = api_secret self.url = url self.auth_base_url = "https://hub.loginradius.com/UserProfile.ashx/" self.profile = None self.on_login_failure = on_login_failure self.mappings = Storage() def defaultmapping(profile): first_name = profile.get('FirstName') last_name = profile.get('LastName') email = profile.get('Email', [{}])[0].get('Value') reg_id = profile.get('ID', '') username = profile.get('ProfileName', email) return dict(registration_id=reg_id, username=username, email=email, first_name=first_name, last_name=last_name) self.mappings.default = defaultmapping def get_user(self): request = self.request user = None if request.vars.token: try: auth_url = self.auth_base_url + self.api_secret + "/" + request.vars.token json_data = fetch(auth_url, headers={'User-Agent': "LoginRadius - Python - SDK"}) self.profile = json.loads(json_data) provider = self.profile['Provider'] mapping = self.mappings.get(provider, self.mappings['default']) user = mapping(self.profile) except (JSONDecodeError, KeyError): pass if user is None and self.on_login_failure: redirect(self.on_login_failure) return user def login_form(self): loginradius_url = "https://hub.loginradius.com/include/js/LoginRadius.js" loginradius_lib = SCRIPT(_src=loginradius_url, _type='text/javascript') container = DIV(_id="interfacecontainerdiv", _class='interfacecontainerdiv') widget = SCRIPT("""var options={}; options.login=true; LoginRadius_SocialLogin.util.ready(function () { $ui = LoginRadius_SocialLogin.lr_login_settings; $ui.interfacesize = "";$ui.apikey = "%s"; $ui.callback=""; $ui.lrinterfacecontainer ="interfacecontainerdiv"; LoginRadius_SocialLogin.init(options); });""" % self.api_key) form = DIV(container, loginradius_lib, widget) return form def use_loginradius(auth, filename='private/loginradius.key', **kwargs): path = os.path.join(current.request.folder, filename) if os.path.exists(path): request = current.request domain, public_key, private_key = open(path, 'r').read().strip().split(':') url = URL('default', 'user', args='login', scheme=True) auth.settings.actions_disabled = \ ['register', 'change_password', 'request_reset_password'] auth.settings.login_form = LoginRadiusAccount( request, api_key=public_key, api_secret=private_key, url=url, **kwargs)
Python
import smtplib import logging def email_auth(server="smtp.gmail.com:587", domain="@gmail.com", tls_mode=None): """ to use email_login: from gluon.contrib.login_methods.email_auth import email_auth auth.settings.login_methods.append(email_auth("smtp.gmail.com:587", "@gmail.com")) """ def email_auth_aux(email, password, server=server, domain=domain, tls_mode=tls_mode): if domain: if not isinstance(domain, (list, tuple)): domain = [str(domain)] if not [d for d in domain if email[-len(d):] == d]: return False (host, port) = server.split(':') if tls_mode is None: # then auto detect tls_mode = port == '587' try: server = None server = smtplib.SMTP(host, port) server.ehlo() if tls_mode: server.starttls() server.ehlo() server.login(email, password) server.quit() return True except: logging.exception('email_auth() failed') if server: try: server.quit() except: # server might already close connection after error pass return False return email_auth_aux
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of web2py Web Framework (Copyrighted, 2007-2009). Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>. License: GPL v2 Tinkered by Szabolcs Gyuris < szimszo n @ o regpreshaz dot eu> """ from gluon import current, redirect class CasAuth(object): """ Login will be done via Web2py's CAS application, instead of web2py's login form. Include in your model (eg db.py):: from gluon.contrib.login_methods.cas_auth import CasAuth auth.define_tables(username=True) auth.settings.login_form=CasAuth( urlbase = "https://[your CAS provider]/app/default/user/cas", actions=['login','validate','logout']) where urlbase is the actual CAS server url without the login,logout... Enjoy. ###UPDATE### if you want to connect to a CAS version 2 JASIG Server use this: auth.settings.login_form=CasAuth( urlbase = "https://[Your CAS server]/cas", actions = ['login','serviceValidate','logout'], casversion = 2, casusername = "cas:user") where casusername is the xml node returned by CAS server which contains user's username. """ def __init__(self, g=None, # g for backward compatibility ### urlbase="https://web2py.com/cas/cas", actions=['login', 'validate', 'logout'], maps=dict(username=lambda v: v.get('username', v['user']), email=lambda v: v.get('email', None), user_id=lambda v: v['user']), casversion=1, casusername='cas:user' ): self.urlbase = urlbase self.cas_login_url = "%s/%s" % (self.urlbase, actions[0]) self.cas_check_url = "%s/%s" % (self.urlbase, actions[1]) self.cas_logout_url = "%s/%s" % (self.urlbase, actions[2]) self.maps = maps self.casversion = casversion self.casusername = casusername http_host = current.request.env.http_x_forwarded_host if not http_host: http_host = current.request.env.http_host if current.request.env.wsgi_url_scheme in ['https', 'HTTPS']: scheme = 'https' else: scheme = 'http' self.cas_my_url = '%s://%s%s' % ( scheme, http_host, current.request.env.path_info) def login_url(self, next="/"): current.session.token = self._CAS_login() return next def logout_url(self, next="/"): current.session.token = None current.session.auth = None self._CAS_logout() return next def get_user(self): user = current.session.token if user: d = {'source': 'web2py cas'} for key in self.maps: d[key] = self.maps[key](user) return d return None def _CAS_login(self): """ exposed as CAS.login(request) returns a token on success, None on failed authentication """ import urllib self.ticket = current.request.vars.ticket if not current.request.vars.ticket: redirect("%s?service=%s" % (self.cas_login_url, self.cas_my_url)) else: url = "%s?service=%s&ticket=%s" % (self.cas_check_url, self.cas_my_url, self.ticket) data = urllib.urlopen(url).read() if data.startswith('yes') or data.startswith('no'): data = data.split('\n') if data[0] == 'yes': if ':' in data[1]: # for Compatibility with Custom CAS items = data[1].split(':') a = items[0] b = len(items) > 1 and items[1] or a c = len(items) > 2 and items[2] or b else: a = b = c = data[1] return dict(user=a, email=b, username=c) return None import xml.dom.minidom as dom import xml.parsers.expat as expat try: dxml = dom.parseString(data) envelop = dxml.getElementsByTagName( "cas:authenticationSuccess") if len(envelop) > 0: res = dict() for x in envelop[0].childNodes: if x.nodeName.startswith('cas:') and len(x.childNodes): key = x.nodeName[4:].encode('utf8') value = x.childNodes[0].nodeValue.encode('utf8') if not key in res: res[key] = value else: if not isinstance(res[key], list): res[key] = [res[key]] res[key].append(value) return res except expat.ExpatError: pass return None # fallback def _CAS_logout(self): """ exposed CAS.logout() redirects to the CAS logout page """ import urllib redirect("%s?service=%s" % (self.cas_logout_url, self.cas_my_url))
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Loginza.ru authentication for web2py Developed by Vladimir Dronnikov (Copyright © 2011) Email <dronnikov@gmail.com> """ import urllib from gluon.html import * from gluon.tools import fetch from gluon.storage import Storage import gluon.contrib.simplejson as json class Loginza(object): """ from gluon.contrib.login_methods.loginza import Loginza auth.settings.login_form = Loginza(request, url = "http://localhost:8000/%s/default/user/login" % request.application) """ def __init__(self, request, url="", embed=True, auth_url="http://loginza.ru/api/authinfo", language="en", prompt="loginza", on_login_failure=None, ): self.request = request self.token_url = url self.embed = embed self.auth_url = auth_url self.language = language self.prompt = prompt self.profile = None self.on_login_failure = on_login_failure self.mappings = Storage() # TODO: profile.photo is the URL to the picture # Howto download and store it locally? # FIXME: what if email is unique=True self.mappings["http://twitter.com/"] = lambda profile:\ dict(registration_id=profile.get("identity", ""), username=profile.get("nickname", ""), email=profile.get("email", ""), last_name=profile.get("name", "").get("full_name", ""), #avatar = profile.get("photo",""), ) self.mappings["https://www.google.com/accounts/o8/ud"] = lambda profile:\ dict(registration_id=profile.get("identity", ""), username=profile.get("name", "").get("full_name", ""), email=profile.get("email", ""), first_name=profile.get("name", "").get("first_name", ""), last_name=profile.get("name", "").get("last_name", ""), #avatar = profile.get("photo",""), ) self.mappings["http://vkontakte.ru/"] = lambda profile:\ dict(registration_id=profile.get("identity", ""), username=profile.get("name", "").get("full_name", ""), email=profile.get("email", ""), first_name=profile.get("name", "").get("first_name", ""), last_name=profile.get("name", "").get("last_name", ""), #avatar = profile.get("photo",""), ) self.mappings.default = lambda profile:\ dict(registration_id=profile.get("identity", ""), username=profile.get("name", "").get("full_name"), email=profile.get("email", ""), first_name=profile.get("name", "").get("first_name", ""), last_name=profile.get("name", "").get("last_name", ""), #avatar = profile.get("photo",""), ) def get_user(self): request = self.request if request.vars.token: user = Storage() data = urllib.urlencode(dict(token=request.vars.token)) auth_info_json = fetch(self.auth_url + '?' + data) #print auth_info_json auth_info = json.loads(auth_info_json) if auth_info["identity"] is not None: self.profile = auth_info provider = self.profile["provider"] user = self.mappings.get( provider, self.mappings.default)(self.profile) #user["password"] = ??? #user["avatar"] = ??? return user elif self.on_login_failure: redirect(self.on_login_failure) return None def login_form(self): request = self.request args = request.args LOGINZA_URL = "https://loginza.ru/api/widget?lang=%s&token_url=%s&overlay=loginza" if self.embed: form = IFRAME(_src=LOGINZA_URL % (self.language, self.token_url), _scrolling="no", _frameborder="no", _style="width:359px;height:300px;") else: form = DIV( A(self.prompt, _href=LOGINZA_URL % ( self.language, self.token_url), _class="loginza"), SCRIPT(_src="https://s3-eu-west-1.amazonaws.com/s1.loginza.ru/js/widget.js", _type="text/javascript")) return form
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Written by Michele Comitini <mcm@glisco.it> License: LGPL v3 Adds support for OAuth 2.0 authentication to web2py. OAuth 2.0 spec: http://tools.ietf.org/html/rfc6749 """ import time import cgi import urllib2 from urllib import urlencode from gluon import current, redirect, HTTP import json class OAuthAccount(object): """ Login will be done via OAuth Framework, instead of web2py's login form. You need to override the get_user method to match your auth provider needs. Example for facebook in your model (eg db.py):: # define the auth_table before call to auth.define_tables() auth_table = db.define_table( auth.settings.table_user_name, Field('first_name', length=128, default=""), Field('last_name', length=128, default=""), Field('username', length=128, default="", unique=True), Field('password', 'password', length=256, readable=False, label='Password'), Field('registration_key', length=128, default= "", writable=False, readable=False)) auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username) auth.define_tables() CLIENT_ID=\"<put your fb application id here>\" CLIENT_SECRET=\"<put your fb application secret here>\" AUTH_URL="http://..." TOKEN_URL="http://..." # remember to download and install facebook GraphAPI module in your app from facebook import GraphAPI, GraphAPIError from gluon.contrib.login_methods.oauth20_account import OAuthAccount class FaceBookAccount(OAuthAccount): '''OAuth impl for FaceBook''' AUTH_URL="https://graph.facebook.com/oauth/authorize" TOKEN_URL="https://graph.facebook.com/oauth/access_token" def __init__(self): OAuthAccount.__init__(self, client_id=CLIENT_ID, client_secret=CLIENT_SECRET, auth_url=self.AUTH_URL, token_url=self.TOKEN_URL, scope='user_photos,friends_photos') self.graph = None def get_user(self): ''' Returns the user using the Graph API. ''' if not self.accessToken(): return None if not self.graph: self.graph = GraphAPI((self.accessToken())) user = None try: user = self.graph.get_object("me") except GraphAPIError, e: self.session.token = None self.graph = None if user: return dict(first_name = user['first_name'], last_name = user['last_name'], username = user['id']) auth.settings.actions_disabled=['register', 'change_password','request_reset_password','profile'] auth.settings.login_form=FaceBookAccount() Any optional arg in the constructor will be passed asis to remote server for requests. It can be used for the optional"scope" parameters for Facebook. """ def __redirect_uri(self, next=None): """ Build the uri used by the authenticating server to redirect the client back to the page originating the auth request. Appends the _next action to the generated url so the flows continues. """ r = current.request http_host = r.env.http_host if r.env.https == 'on': url_scheme = 'https' else: url_scheme = r.env.wsgi_url_scheme if next: path_info = next else: path_info = r.env.path_info uri = '%s://%s%s' % (url_scheme, http_host, path_info) if r.get_vars and not next: uri += '?' + urlencode(r.get_vars) return uri def __build_url_opener(self, uri): """ Build the url opener for managing HTTP Basic Athentication """ # Create an OpenerDirector with support # for Basic HTTP Authentication... password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(realm=None, uri=uri, user=self.client_id, passwd=self.client_secret) handler = urllib2.HTTPBasicAuthHandler(password_mgr) opener = urllib2.build_opener(handler) return opener def accessToken(self): """ Return the access token generated by the authenticating server. If token is already in the session that one will be used. Otherwise the token is fetched from the auth server. """ if current.session.token and 'expires' in current.session.token: expires = current.session.token['expires'] # reuse token until expiration if expires == 0 or expires > time.time(): return current.session.token['access_token'] code = current.request.vars.code if code: data = dict(client_id=self.client_id, client_secret=self.client_secret, redirect_uri=current.session.redirect_uri, code=code, grant_type='authorization_code' ) open_url = None opener = self.__build_url_opener(self.token_url) try: open_url = opener.open(self.token_url, urlencode(data), self.socket_timeout) except urllib2.HTTPError, e: tmp = e.read() raise Exception(tmp) finally: if current.session.code: del current.session.code # throw it away if open_url: try: data = open_url.read() resp_type = open_url.info().get('Content-Type') # try json style first if not resp_type or resp_type == 'application/json': try: tokendata = json.loads(data) current.session.token = tokendata except Exception, e: raise Exception("Cannot parse oauth server response %s %s" % (data, e)) else: # try facebook style first with x-www-form-encoded tokendata = cgi.parse_qs(data) current.session.token = \ dict([(k, v[-1]) for k, v in tokendata.items()]) if not tokendata: # parsing failed? raise Exception("Cannot parse oauth server response %s" % data) # set expiration absolute time try to avoid broken # implementations where "expires_in" becomes "expires" if 'expires_in' in current.session.token: exps = 'expires_in' elif 'expires' in current.session.token: exps = 'expires' else: exps = None current.session.token['expires'] = exps and \ int(current.session.token[exps]) + \ time.time() finally: opener.close() return current.session.token['access_token'] current.session.token = None return None def __init__(self, g=None, client_id=None, client_secret=None, auth_url=None, token_url=None, socket_timeout=60, **args): """ first argument is unused. Here only for legacy reasons. """ if [client_id, client_secret, auth_url, token_url].count(None) > 0: raise RuntimeError("""Following args are mandatory: client_id, client_secret, auth_url, token_url. """) self.client_id = client_id self.client_secret = client_secret self.auth_url = auth_url self.token_url = token_url self.args = args self.socket_timeout = socket_timeout def login_url(self, next="/"): self.__oauth_login(next) return next def logout_url(self, next="/"): del current.session.token return next def get_user(self): """ Override this method by sublcassing the class. """ if not current.session.token: return None return dict(first_name='Pinco', last_name='Pallino', username='pincopallino') raise NotImplementedError("Must override get_user()") # Following code is never executed. It can be used as example # for overriding in subclasses. if not self.accessToken(): return None if not self.graph: self.graph = GraphAPI((self.accessToken())) user = None try: user = self.graph.get_object("me") except GraphAPIError: current.session.token = None self.graph = None if user: return dict(first_name=user['first_name'], last_name=user['last_name'], username=user['id']) def __oauth_login(self, next): """ This method redirects the user to the authenticating form on authentication server if the authentication code and the authentication token are not available to the application yet. Once the authentication code has been received this method is called to set the access token into the session by calling accessToken() """ token = self.accessToken() if not token: current.session.redirect_uri = self.__redirect_uri(next) data = dict(redirect_uri=current.session.redirect_uri, response_type='code', client_id=self.client_id) if self.args: data.update(self.args) auth_request_url = self.auth_url + "?" + urlencode(data) raise HTTP(302, "You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>", Location=auth_request_url) return
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Written by Michele Comitini <mcm@glisco.it> License: GPL v3 Adds support for OAuth1.0a authentication to web2py. Dependencies: - python-oauth2 (http://github.com/simplegeo/python-oauth2) """ import oauth2 as oauth import cgi from urllib import urlencode from gluon import current class OAuthAccount(object): """ Login will be done via OAuth Framework, instead of web2py's login form. Include in your model (eg db.py):: # define the auth_table before call to auth.define_tables() auth_table = db.define_table( auth.settings.table_user_name, Field('first_name', length=128, default=""), Field('last_name', length=128, default=""), Field('username', length=128, default="", unique=True), Field('password', 'password', length=256, readable=False, label='Password'), Field('registration_key', length=128, default= "", writable=False, readable=False)) auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username) . . . auth.define_tables() . . . CLIENT_ID=\"<put your fb application id here>\" CLIENT_SECRET=\"<put your fb application secret here>\" AUTH_URL="..." TOKEN_URL="..." ACCESS_TOKEN_URL="..." from gluon.contrib.login_methods.oauth10a_account import OAuthAccount auth.settings.login_form=OAuthAccount(globals( ),CLIENT_ID,CLIENT_SECRET, AUTH_URL, TOKEN_URL, ACCESS_TOKEN_URL) """ def __redirect_uri(self, next=None): """Build the uri used by the authenticating server to redirect the client back to the page originating the auth request. Appends the _next action to the generated url so the flows continues. """ r = self.request http_host = r.env.http_host url_scheme = r.env.wsgi_url_scheme if next: path_info = next else: path_info = r.env.path_info uri = '%s://%s%s' % (url_scheme, http_host, path_info) if r.get_vars and not next: uri += '?' + urlencode(r.get_vars) return uri def accessToken(self): """Return the access token generated by the authenticating server. If token is already in the session that one will be used. Otherwise the token is fetched from the auth server. """ if self.session.access_token: # return the token (TODO: does it expire?) return self.session.access_token if self.session.request_token: # Exchange the request token with an authorization token. token = self.session.request_token self.session.request_token = None # Build an authorized client # OAuth1.0a put the verifier! token.set_verifier(self.request.vars.oauth_verifier) client = oauth.Client(self.consumer, token) resp, content = client.request(self.access_token_url, "POST") if str(resp['status']) != '200': self.session.request_token = None self.globals['redirect'](self.globals[ 'URL'](f='user', args='logout')) self.session.access_token = oauth.Token.from_string(content) return self.session.access_token self.session.access_token = None return None def __init__(self, g, client_id, client_secret, auth_url, token_url, access_token_url): self.globals = g self.client_id = client_id self.client_secret = client_secret self.code = None self.request = current.request self.session = current.session self.auth_url = auth_url self.token_url = token_url self.access_token_url = access_token_url # consumer init self.consumer = oauth.Consumer(self.client_id, self.client_secret) def login_url(self, next="/"): self.__oauth_login(next) return next def logout_url(self, next="/"): self.session.request_token = None self.session.access_token = None return next def get_user(self): '''Get user data. Since OAuth does not specify what a user is, this function must be implemented for the specific provider. ''' raise NotImplementedError("Must override get_user()") def __oauth_login(self, next): '''This method redirects the user to the authenticating form on authentication server if the authentication code and the authentication token are not available to the application yet. Once the authentication code has been received this method is called to set the access token into the session by calling accessToken() ''' if not self.accessToken(): # setup the client client = oauth.Client(self.consumer, None) # Get a request token. # oauth_callback *is REQUIRED* for OAuth1.0a # putting it in the body seems to work. callback_url = self.__redirect_uri(next) data = urlencode(dict(oauth_callback=callback_url)) resp, content = client.request(self.token_url, "POST", body=data) if resp['status'] != '200': self.session.request_token = None self.globals['redirect'](self.globals[ 'URL'](f='user', args='logout')) # Store the request token in session. request_token = self.session.request_token = oauth.Token.from_string(content) # Redirect the user to the authentication URL and pass the callback url. data = urlencode(dict(oauth_token=request_token.key, oauth_callback=callback_url)) auth_request_url = self.auth_url + '?' + data HTTP = self.globals['HTTP'] raise HTTP(302, "You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>", Location=auth_request_url) return None
Python