code
stringlengths
1
1.72M
language
stringclasses
1 value
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). """ import os try: # Windows needs stdio set for binary mode for file upload to work. import msvcrt msvcrt.setmode (0, os.O_BINARY) # stdin = 0 msvcrt.setmode (1, os.O_BINARY) # stdout = 1 except ImportError: pass from fckutil import * from fckoutput import * import config as Config class GetFoldersCommandMixin (object): def getFolders(self, resourceType, currentFolder): """ Purpose: command to recieve a list of folders """ # Map the virtual path to our local server serverPath = mapServerFolder(self.userFilesFolder,currentFolder) s = """<Folders>""" # Open the folders node for someObject in os.listdir(serverPath): someObjectPath = mapServerFolder(serverPath, someObject) if os.path.isdir(someObjectPath): s += """<Folder name="%s" />""" % ( convertToXmlAttribute(someObject) ) s += """</Folders>""" # Close the folders node return s class GetFoldersAndFilesCommandMixin (object): def getFoldersAndFiles(self, resourceType, currentFolder): """ Purpose: command to recieve a list of folders and files """ # Map the virtual path to our local server serverPath = mapServerFolder(self.userFilesFolder,currentFolder) # Open the folders / files node folders = """<Folders>""" files = """<Files>""" for someObject in os.listdir(serverPath): someObjectPath = mapServerFolder(serverPath, someObject) if os.path.isdir(someObjectPath): folders += """<Folder name="%s" />""" % ( convertToXmlAttribute(someObject) ) elif os.path.isfile(someObjectPath): size = os.path.getsize(someObjectPath) if size > 0: size = round(size/1024) if size < 1: size = 1 files += """<File name="%s" size="%d" />""" % ( convertToXmlAttribute(someObject), size ) # Close the folders / files node folders += """</Folders>""" files += """</Files>""" return folders + files class CreateFolderCommandMixin (object): def createFolder(self, resourceType, currentFolder): """ Purpose: command to create a new folder """ errorNo = 0; errorMsg =''; if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) newFolder = sanitizeFolderName (newFolder) try: newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder)) self.createServerFolder(newFolderPath) except Exception, e: errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!! if hasattr(e,'errno'): if e.errno==17: #file already exists errorNo=0 elif e.errno==13: # permission denied errorNo = 103 elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name errorNo = 102 else: errorNo = 110 else: errorNo = 102 return self.sendErrorNode ( errorNo, errorMsg ) def createServerFolder(self, folderPath): "Purpose: physically creates a folder on the server" # No need to check if the parent exists, just create all hierachy try: permissions = Config.ChmodOnFolderCreate if not permissions: os.makedirs(folderPath) except AttributeError: #ChmodOnFolderCreate undefined permissions = 0755 if permissions: oldumask = os.umask(0) os.makedirs(folderPath,mode=0755) os.umask( oldumask ) class UploadFileCommandMixin (object): def uploadFile(self, resourceType, currentFolder): """ Purpose: command to upload files to server (same as FileUpload) """ errorNo = 0 if self.request.has_key("NewFile"): # newFile has all the contents we need newFile = self.request.get("NewFile", "") # Get the file name newFileName = newFile.filename newFileName = sanitizeFileName( newFileName ) newFileNameOnly = removeExtension(newFileName) newFileExtension = getExtension(newFileName).lower() allowedExtensions = Config.AllowedExtensions[resourceType] deniedExtensions = Config.DeniedExtensions[resourceType] if (allowedExtensions): # Check for allowed isAllowed = False if (newFileExtension in allowedExtensions): isAllowed = True elif (deniedExtensions): # Check for denied isAllowed = True if (newFileExtension in deniedExtensions): isAllowed = False else: # No extension limitations isAllowed = True if (isAllowed): # Upload to operating system # Map the virtual path to the local server path currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder) i = 0 while (True): newFilePath = os.path.join (currentFolderPath,newFileName) if os.path.exists(newFilePath): i += 1 newFileName = "%s(%d).%s" % ( newFileNameOnly, i, newFileExtension ) errorNo= 201 # file renamed else: # Read file contents and write to the desired path (similar to php's move_uploaded_file) fout = file(newFilePath, 'wb') while (True): chunk = newFile.file.read(100000) if not chunk: break fout.write (chunk) fout.close() if os.path.exists ( newFilePath ): doChmod = False try: doChmod = Config.ChmodOnUpload permissions = Config.ChmodOnUpload except AttributeError: #ChmodOnUpload undefined doChmod = True permissions = 0755 if ( doChmod ): oldumask = os.umask(0) os.chmod( newFilePath, permissions ) os.umask( oldumask ) newFileUrl = combinePaths(self.webUserFilesFolder, currentFolder) + newFileName return self.sendUploadResults( errorNo , newFileUrl, newFileName ) else: return self.sendUploadResults( errorNo = 202, customMsg = "" ) else: return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This is the "File Uploader" for Python """ import os from fckutil import * from fckcommands import * # default command's implementation from fckconnector import FCKeditorConnectorBase # import base connector import config as Config class FCKeditorQuickUpload( FCKeditorConnectorBase, UploadFileCommandMixin, BaseHttpMixin, BaseHtmlMixin): def doResponse(self): "Main function. Process the request, set headers and return a string as response." # Check if this connector is disabled if not(Config.Enabled): return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"") command = 'QuickUpload' # The file type (from the QueryString, by default 'File'). resourceType = self.request.get('Type','File') currentFolder = "/" # Check for invalid paths if currentFolder is None: return self.sendUploadResults(102, '', '', "") # Check if it is an allowed command if ( not command in Config.ConfigAllowedCommands ): return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command ) if ( not resourceType in Config.ConfigAllowedTypes ): return self.sendUploadResults( 1, '', '', 'Invalid type specified' ) # Setup paths self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType] self.webUserFilesFolder = Config.QuickUploadPath[resourceType] if not self.userFilesFolder: # no absolute path given (dangerous...) self.userFilesFolder = mapServerPath(self.environ, self.webUserFilesFolder) # Ensure that the directory exists. if not os.path.exists(self.userFilesFolder): try: self.createServerFoldercreateServerFolder( self.userFilesFolder ) except: return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ") # File upload doesn't have to return XML, so intercept here return self.uploadFile(resourceType, currentFolder) # Running from command line (plain old CGI) if __name__ == '__main__': try: # Create a Connector Instance conn = FCKeditorQuickUpload() data = conn.doResponse() for header in conn.headers: if not header is None: print '%s: %s' % header print print data except: print "Content-Type: text/plain" print import cgi cgi.print_exception()
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Base Connector for Python (CGI and WSGI). See config.py for configuration settings """ import cgi, os from fckutil import * from fckcommands import * # default command's implementation from fckoutput import * # base http, xml and html output mixins import config as Config class FCKeditorConnectorBase( object ): "The base connector class. Subclass it to extend functionality (see Zope example)" def __init__(self, environ=None): "Constructor: Here you should parse request fields, initialize variables, etc." self.request = FCKeditorRequest(environ) # Parse request self.headers = [] # Clean Headers if environ: self.environ = environ else: self.environ = os.environ # local functions def setHeader(self, key, value): self.headers.append ((key, value)) return class FCKeditorRequest(object): "A wrapper around the request object" def __init__(self, environ): if environ: # WSGI self.request = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=1) self.environ = environ else: # plain old cgi self.environ = os.environ self.request = cgi.FieldStorage() if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ: if self.environ['REQUEST_METHOD'].upper()=='POST': # we are in a POST, but GET query_string exists # cgi parses by default POST data, so parse GET QUERY_STRING too self.get_request = cgi.FieldStorage(fp=None, environ={ 'REQUEST_METHOD':'GET', 'QUERY_STRING':self.environ['QUERY_STRING'], }, ) else: self.get_request={} def has_key(self, key): return self.request.has_key(key) or self.get_request.has_key(key) def get(self, key, default=None): if key in self.request.keys(): field = self.request[key] elif key in self.get_request.keys(): field = self.get_request[key] else: return default if hasattr(field,"filename") and field.filename: #file upload, do not convert return value return field else: return field.value
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). See config.py for configuration settings """ import os from fckutil import * from fckcommands import * # default command's implementation from fckoutput import * # base http, xml and html output mixins from fckconnector import FCKeditorConnectorBase # import base connector import config as Config class FCKeditorConnector( FCKeditorConnectorBase, GetFoldersCommandMixin, GetFoldersAndFilesCommandMixin, CreateFolderCommandMixin, UploadFileCommandMixin, BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ): "The Standard connector class." def doResponse(self): "Main function. Process the request, set headers and return a string as response." s = "" # Check if this connector is disabled if not(Config.Enabled): return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.") # Make sure we have valid inputs for key in ("Command","Type","CurrentFolder"): if not self.request.has_key (key): return # Get command, resource type and current folder command = self.request.get("Command") resourceType = self.request.get("Type") currentFolder = getCurrentFolder(self.request.get("CurrentFolder")) # Check for invalid paths if currentFolder is None: if (command == "FileUpload"): return self.sendUploadResults( errorNo = 102, customMsg = "" ) else: return self.sendError(102, "") # Check if it is an allowed command if ( not command in Config.ConfigAllowedCommands ): return self.sendError( 1, 'The %s command isn\'t allowed' % command ) if ( not resourceType in Config.ConfigAllowedTypes ): return self.sendError( 1, 'Invalid type specified' ) # Setup paths if command == "QuickUpload": self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType] self.webUserFilesFolder = Config.QuickUploadPath[resourceType] else: self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType] self.webUserFilesFolder = Config.FileTypesPath[resourceType] if not self.userFilesFolder: # no absolute path given (dangerous...) self.userFilesFolder = mapServerPath(self.environ, self.webUserFilesFolder) # Ensure that the directory exists. if not os.path.exists(self.userFilesFolder): try: self.createServerFolder( self.userFilesFolder ) except: return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ") # File upload doesn't have to return XML, so intercept here if (command == "FileUpload"): return self.uploadFile(resourceType, currentFolder) # Create Url url = combinePaths( self.webUserFilesFolder, currentFolder ) # Begin XML s += self.createXmlHeader(command, resourceType, currentFolder, url) # Execute the command selector = {"GetFolders": self.getFolders, "GetFoldersAndFiles": self.getFoldersAndFiles, "CreateFolder": self.createFolder, } s += selector[command](resourceType, currentFolder) s += self.createXmlFooter() return s # Running from command line (plain old CGI) if __name__ == '__main__': try: # Create a Connector Instance conn = FCKeditorConnector() data = conn.doResponse() for header in conn.headers: print '%s: %s' % header print print data except: print "Content-Type: text/plain" print import cgi cgi.print_exception()
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python and Zope. This code was not tested at all. It just was ported from pre 2.5 release, so for further reference see \editor\filemanager\browser\default\connectors\py\connector.py in previous releases. """ from fckutil import * from connector import * import config as Config class FCKeditorConnectorZope(FCKeditorConnector): """ Zope versiof FCKeditorConnector """ # Allow access (Zope) __allow_access_to_unprotected_subobjects__ = 1 def __init__(self, context=None): """ Constructor """ FCKeditorConnector.__init__(self, environ=None) # call superclass constructor # Instance Attributes self.context = context self.request = FCKeditorRequest(context) def getZopeRootContext(self): if self.zopeRootContext is None: self.zopeRootContext = self.context.getPhysicalRoot() return self.zopeRootContext def getZopeUploadContext(self): if self.zopeUploadContext is None: folderNames = self.userFilesFolder.split("/") c = self.getZopeRootContext() for folderName in folderNames: if (folderName <> ""): c = c[folderName] self.zopeUploadContext = c return self.zopeUploadContext def setHeader(self, key, value): self.context.REQUEST.RESPONSE.setHeader(key, value) def getFolders(self, resourceType, currentFolder): # Open the folders node s = "" s += """<Folders>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["Folder"]): s += """<Folder name="%s" />""" % ( convertToXmlAttribute(name) ) # Close the folders node s += """</Folders>""" return s def getZopeFoldersAndFiles(self, resourceType, currentFolder): folders = self.getZopeFolders(resourceType, currentFolder) files = self.getZopeFiles(resourceType, currentFolder) s = folders + files return s def getZopeFiles(self, resourceType, currentFolder): # Open the files node s = "" s += """<Files>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["File","Image"]): s += """<File name="%s" size="%s" />""" % ( convertToXmlAttribute(name), ((o.get_size() / 1024) + 1) ) # Close the files node s += """</Files>""" return s def findZopeFolder(self, resourceType, folderName): # returns the context of the resource / folder zopeFolder = self.getZopeUploadContext() folderName = self.removeFromStart(folderName, "/") folderName = self.removeFromEnd(folderName, "/") if (resourceType <> ""): try: zopeFolder = zopeFolder[resourceType] except: zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType) zopeFolder = zopeFolder[resourceType] if (folderName <> ""): folderNames = folderName.split("/") for folderName in folderNames: zopeFolder = zopeFolder[folderName] return zopeFolder def createFolder(self, resourceType, currentFolder): # Find out where we are zopeFolder = self.findZopeFolder(resourceType, currentFolder) errorNo = 0 errorMsg = "" if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder) else: errorNo = 102 return self.sendErrorNode ( errorNo, errorMsg ) def uploadFile(self, resourceType, currentFolder, count=None): zopeFolder = self.findZopeFolder(resourceType, currentFolder) file = self.request.get("NewFile", None) fileName = self.getFileName(file.filename) fileNameOnly = self.removeExtension(fileName) fileExtension = self.getExtension(fileName).lower() if (count): nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension) else: nid = fileName title = nid try: zopeFolder.manage_addProduct['OFSP'].manage_addFile( id=nid, title=title, file=file.read() ) except: if (count): count += 1 else: count = 1 return self.zopeFileUpload(resourceType, currentFolder, count) return self.sendUploadResults( 0 ) class FCKeditorRequest(object): "A wrapper around the request object" def __init__(self, context=None): r = context.REQUEST self.request = r def has_key(self, key): return self.request.has_key(key) def get(self, key, default=None): return self.request.get(key, default) """ Running from zope, you will need to modify this connector. If you have uploaded the FCKeditor into Zope (like me), you need to move this connector out of Zope, and replace the "connector" with an alias as below. The key to it is to pass the Zope context in, as we then have a like to the Zope context. ## Script (Python) "connector.py" ##bind container=container ##bind context=context ##bind namespace= ##bind script=script ##bind subpath=traverse_subpath ##parameters=*args, **kws ##title=ALIAS ## import Products.zope as connector return connector.FCKeditorConnectorZope(context=context).doResponse() """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). """ from time import gmtime, strftime import string def escape(text, replace=string.replace): """ Converts the special characters '<', '>', and '&'. RFC 1866 specifies that these characters be represented in HTML as &lt; &gt; and &amp; respectively. In Python 1.5 we use the new string.replace() function for speed. """ text = replace(text, '&', '&amp;') # must be done 1st text = replace(text, '<', '&lt;') text = replace(text, '>', '&gt;') text = replace(text, '"', '&quot;') return text def convertToXmlAttribute(value): if (value is None): value = "" return escape(value) class BaseHttpMixin(object): def setHttpHeaders(self, content_type='text/xml'): "Purpose: to prepare the headers for the xml to return" # Prevent the browser from caching the result. # Date in the past self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT') # always modified self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())) # HTTP/1.1 self.setHeader('Cache-Control','no-store, no-cache, must-revalidate') self.setHeader('Cache-Control','post-check=0, pre-check=0') # HTTP/1.0 self.setHeader('Pragma','no-cache') # Set the response format. self.setHeader( 'Content-Type', content_type + '; charset=utf-8' ) return class BaseXmlMixin(object): def createXmlHeader(self, command, resourceType, currentFolder, url): "Purpose: returns the xml header" self.setHttpHeaders() # Create the XML document header s = """<?xml version="1.0" encoding="utf-8" ?>""" # Create the main connector node s += """<Connector command="%s" resourceType="%s">""" % ( command, resourceType ) # Add the current folder node s += """<CurrentFolder path="%s" url="%s" />""" % ( convertToXmlAttribute(currentFolder), convertToXmlAttribute(url), ) return s def createXmlFooter(self): "Purpose: returns the xml footer" return """</Connector>""" def sendError(self, number, text): "Purpose: in the event of an error, return an xml based error" self.setHttpHeaders() return ("""<?xml version="1.0" encoding="utf-8" ?>""" + """<Connector>""" + self.sendErrorNode (number, text) + """</Connector>""" ) def sendErrorNode(self, number, text): if number != 1: return """<Error number="%s" />""" % (number) else: return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text)) class BaseHtmlMixin(object): def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ): self.setHttpHeaders("text/html") "This is the function that sends the results of the uploading process" "Minified version of the document.domain automatic fix script (#1919)." "The original script can be found at _dev/domain_fix_template.js" return """<script type="text/javascript"> (function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})(); window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s"); </script>""" % { 'errorNumber': errorNo, 'fileUrl': fileUrl.replace ('"', '\\"'), 'fileName': fileName.replace ( '"', '\\"' ) , 'customMsg': customMsg.replace ( '"', '\\"' ), }
Python
#!/usr/bin/env python """ * FCKeditor - The text editor for Internet - http://www.fckeditor.net * Copyright (C) 2003-2010 Frederico Caldeira Knabben * * == BEGIN LICENSE == * * Licensed under the terms of any of the following licenses at your * choice: * * - GNU General Public License Version 2 or later (the "GPL") * http://www.gnu.org/licenses/gpl.html * * - GNU Lesser General Public License Version 2.1 or later (the "LGPL") * http://www.gnu.org/licenses/lgpl.html * * - Mozilla Public License Version 1.1 or later (the "MPL") * http://www.mozilla.org/MPL/MPL-1.1.html * * == END LICENSE == * * Configuration file for the File Manager Connector for Python """ # INSTALLATION NOTE: You must set up your server environment accordingly to run # python scripts. This connector requires Python 2.4 or greater. # # Supported operation modes: # * WSGI (recommended): You'll need apache + mod_python + modpython_gateway # or any web server capable of the WSGI python standard # * Plain Old CGI: Any server capable of running standard python scripts # (although mod_python is recommended for performance) # This was the previous connector version operation mode # # If you're using Apache web server, replace the htaccess.txt to to .htaccess, # and set the proper options and paths. # For WSGI and mod_python, you may need to download modpython_gateway from: # http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this # directory. # SECURITY: You must explicitly enable this "connector". (Set it to "True"). # WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only # authenticated users can access this file or use some kind of session checking. Enabled = False # Path to user files relative to the document root. UserFilesPath = '/userfiles/' # Fill the following value it you prefer to specify the absolute path for the # user files directory. Useful if you are using a virtual directory, symbolic # link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'UserFilesPath' must point to the same directory. # WARNING: GetRootPath may not work in virtual or mod_python configurations, and # may not be thread safe. Use this configuration parameter instead. UserFilesAbsolutePath = '' # Due to security issues with Apache modules, it is recommended to leave the # following setting enabled. ForceSingleExtension = True # What the user can do with this connector ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ] # Allowed Resource Types ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media'] # After file is uploaded, sometimes it is required to change its permissions # so that it was possible to access it at the later time. # If possible, it is recommended to set more restrictive permissions, like 0755. # Set to 0 to disable this feature. # Note: not needed on Windows-based servers. ChmodOnUpload = 0755 # See comments above. # Used when creating folders that does not exist. ChmodOnFolderCreate = 0755 # Do not touch this 3 lines, see "Configuration settings for each Resource Type" AllowedExtensions = {}; DeniedExtensions = {}; FileTypesPath = {}; FileTypesAbsolutePath = {}; QuickUploadPath = {}; QuickUploadAbsolutePath = {}; # Configuration settings for each Resource Type # # - AllowedExtensions: the possible extensions that can be allowed. # If it is empty then any file type can be uploaded. # - DeniedExtensions: The extensions that won't be allowed. # If it is empty then no restrictions are done here. # # For a file to be uploaded it has to fulfill both the AllowedExtensions # and DeniedExtensions (that's it: not being denied) conditions. # # - FileTypesPath: the virtual folder relative to the document root where # these resources will be located. # Attention: It must start and end with a slash: '/' # # - FileTypesAbsolutePath: the physical path to the above folder. It must be # an absolute path. # If it's an empty string then it will be autocalculated. # Useful if you are using a virtual directory, symbolic link or alias. # Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'FileTypesPath' must point to the same directory. # Attention: It must end with a slash: '/' # # # - QuickUploadPath: the virtual folder relative to the document root where # these resources will be uploaded using the Upload tab in the resources # dialogs. # Attention: It must start and end with a slash: '/' # # - QuickUploadAbsolutePath: the physical path to the above folder. It must be # an absolute path. # If it's an empty string then it will be autocalculated. # Useful if you are using a virtual directory, symbolic link or alias. # Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'. # Attention: The above 'QuickUploadPath' must point to the same directory. # Attention: It must end with a slash: '/' AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip'] DeniedExtensions['File'] = [] FileTypesPath['File'] = UserFilesPath + 'file/' FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or '' QuickUploadPath['File'] = FileTypesPath['File'] QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File'] AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png'] DeniedExtensions['Image'] = [] FileTypesPath['Image'] = UserFilesPath + 'image/' FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or '' QuickUploadPath['Image'] = FileTypesPath['Image'] QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image'] AllowedExtensions['Flash'] = ['swf','flv'] DeniedExtensions['Flash'] = [] FileTypesPath['Flash'] = UserFilesPath + 'flash/' FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or '' QuickUploadPath['Flash'] = FileTypesPath['Flash'] QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash'] AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv'] DeniedExtensions['Media'] = [] FileTypesPath['Media'] = UserFilesPath + 'media/' FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or '' QuickUploadPath['Media'] = FileTypesPath['Media'] QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Base Connector for Python (CGI and WSGI). See config.py for configuration settings """ import cgi, os from fckutil import * from fckcommands import * # default command's implementation from fckoutput import * # base http, xml and html output mixins import config as Config class FCKeditorConnectorBase( object ): "The base connector class. Subclass it to extend functionality (see Zope example)" def __init__(self, environ=None): "Constructor: Here you should parse request fields, initialize variables, etc." self.request = FCKeditorRequest(environ) # Parse request self.headers = [] # Clean Headers if environ: self.environ = environ else: self.environ = os.environ # local functions def setHeader(self, key, value): self.headers.append ((key, value)) return class FCKeditorRequest(object): "A wrapper around the request object" def __init__(self, environ): if environ: # WSGI self.request = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=1) self.environ = environ else: # plain old cgi self.environ = os.environ self.request = cgi.FieldStorage() if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ: if self.environ['REQUEST_METHOD'].upper()=='POST': # we are in a POST, but GET query_string exists # cgi parses by default POST data, so parse GET QUERY_STRING too self.get_request = cgi.FieldStorage(fp=None, environ={ 'REQUEST_METHOD':'GET', 'QUERY_STRING':self.environ['QUERY_STRING'], }, ) else: self.get_request={} def has_key(self, key): return self.request.has_key(key) or self.get_request.has_key(key) def get(self, key, default=None): if key in self.request.keys(): field = self.request[key] elif key in self.get_request.keys(): field = self.get_request[key] else: return default if hasattr(field,"filename") and field.filename: #file upload, do not convert return value return field else: return field.value
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). See config.py for configuration settings """ import os from fckutil import * from fckcommands import * # default command's implementation from fckoutput import * # base http, xml and html output mixins from fckconnector import FCKeditorConnectorBase # import base connector import config as Config class FCKeditorConnector( FCKeditorConnectorBase, GetFoldersCommandMixin, GetFoldersAndFilesCommandMixin, CreateFolderCommandMixin, UploadFileCommandMixin, BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ): "The Standard connector class." def doResponse(self): "Main function. Process the request, set headers and return a string as response." s = "" # Check if this connector is disabled if not(Config.Enabled): return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.") # Make sure we have valid inputs for key in ("Command","Type","CurrentFolder"): if not self.request.has_key (key): return # Get command, resource type and current folder command = self.request.get("Command") resourceType = self.request.get("Type") currentFolder = getCurrentFolder(self.request.get("CurrentFolder")) # Check for invalid paths if currentFolder is None: if (command == "FileUpload"): return self.sendUploadResults( errorNo = 102, customMsg = "" ) else: return self.sendError(102, "") # Check if it is an allowed command if ( not command in Config.ConfigAllowedCommands ): return self.sendError( 1, 'The %s command isn\'t allowed' % command ) if ( not resourceType in Config.ConfigAllowedTypes ): return self.sendError( 1, 'Invalid type specified' ) # Setup paths if command == "QuickUpload": self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType] self.webUserFilesFolder = Config.QuickUploadPath[resourceType] else: self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType] self.webUserFilesFolder = Config.FileTypesPath[resourceType] if not self.userFilesFolder: # no absolute path given (dangerous...) self.userFilesFolder = mapServerPath(self.environ, self.webUserFilesFolder) # Ensure that the directory exists. if not os.path.exists(self.userFilesFolder): try: self.createServerFolder( self.userFilesFolder ) except: return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ") # File upload doesn't have to return XML, so intercept here if (command == "FileUpload"): return self.uploadFile(resourceType, currentFolder) # Create Url url = combinePaths( self.webUserFilesFolder, currentFolder ) # Begin XML s += self.createXmlHeader(command, resourceType, currentFolder, url) # Execute the command selector = {"GetFolders": self.getFolders, "GetFoldersAndFiles": self.getFoldersAndFiles, "CreateFolder": self.createFolder, } s += selector[command](resourceType, currentFolder) s += self.createXmlFooter() return s # Running from command line (plain old CGI) if __name__ == '__main__': try: # Create a Connector Instance conn = FCKeditorConnector() data = conn.doResponse() for header in conn.headers: print '%s: %s' % header print print data except: print "Content-Type: text/plain" print import cgi cgi.print_exception()
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Utility functions for the File Manager Connector for Python """ import string, re import os import config as Config # Generic manipulation functions def removeExtension(fileName): index = fileName.rindex(".") newFileName = fileName[0:index] return newFileName def getExtension(fileName): index = fileName.rindex(".") + 1 fileExtension = fileName[index:] return fileExtension def removeFromStart(string, char): return string.lstrip(char) def removeFromEnd(string, char): return string.rstrip(char) # Path functions def combinePaths( basePath, folder ): return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' ) def getFileName(filename): " Purpose: helper function to extrapolate the filename " for splitChar in ["/", "\\"]: array = filename.split(splitChar) if (len(array) > 1): filename = array[-1] return filename def sanitizeFolderName( newFolderName ): "Do a cleanup of the folder name to avoid possible problems" # Remove . \ / | : ? * " < > and control characters return re.sub( '\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]', '_', newFolderName ) def sanitizeFileName( newFileName ): "Do a cleanup of the file name to avoid possible problems" # Replace dots in the name with underscores (only one dot can be there... security issue). if ( Config.ForceSingleExtension ): # remove dots newFileName = re.sub ( '\\.(?![^.]*$)', '_', newFileName ) ; newFileName = newFileName.replace('\\','/') # convert windows to unix path newFileName = os.path.basename (newFileName) # strip directories # Remove \ / | : ? * return re.sub ( '\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]/', '_', newFileName ) def getCurrentFolder(currentFolder): if not currentFolder: currentFolder = '/' # Check the current folder syntax (must begin and end with a slash). if (currentFolder[-1] <> "/"): currentFolder += "/" if (currentFolder[0] <> "/"): currentFolder = "/" + currentFolder # Ensure the folder path has no double-slashes while '//' in currentFolder: currentFolder = currentFolder.replace('//','/') # Check for invalid folder paths (..) if '..' in currentFolder or '\\' in currentFolder: return None # Check for invalid folder paths (..) if re.search( '(/\\.)|(//)|([\\\\:\\*\\?\\""\\<\\>\\|]|[\x00-\x1F]|[\x7f-\x9f])', currentFolder ): return None return currentFolder def mapServerPath( environ, url): " Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to " # This isn't correct but for the moment there's no other solution # If this script is under a virtual directory or symlink it will detect the problem and stop return combinePaths( getRootPath(environ), url ) def mapServerFolder(resourceTypePath, folderPath): return combinePaths ( resourceTypePath , folderPath ) def getRootPath(environ): "Purpose: returns the root path on the server" # WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python # Use Config.UserFilesAbsolutePath instead if environ.has_key('DOCUMENT_ROOT'): return environ['DOCUMENT_ROOT'] else: realPath = os.path.realpath( './' ) selfPath = environ['SCRIPT_FILENAME'] selfPath = selfPath [ : selfPath.rfind( '/' ) ] selfPath = selfPath.replace( '/', os.path.sep) position = realPath.find(selfPath) # This can check only that this script isn't run from a virtual dir # But it avoids the problems that arise if it isn't checked raise realPath if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''): raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".') return realPath[ : position ]
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python (CGI and WSGI). """ from time import gmtime, strftime import string def escape(text, replace=string.replace): """ Converts the special characters '<', '>', and '&'. RFC 1866 specifies that these characters be represented in HTML as &lt; &gt; and &amp; respectively. In Python 1.5 we use the new string.replace() function for speed. """ text = replace(text, '&', '&amp;') # must be done 1st text = replace(text, '<', '&lt;') text = replace(text, '>', '&gt;') text = replace(text, '"', '&quot;') return text def convertToXmlAttribute(value): if (value is None): value = "" return escape(value) class BaseHttpMixin(object): def setHttpHeaders(self, content_type='text/xml'): "Purpose: to prepare the headers for the xml to return" # Prevent the browser from caching the result. # Date in the past self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT') # always modified self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())) # HTTP/1.1 self.setHeader('Cache-Control','no-store, no-cache, must-revalidate') self.setHeader('Cache-Control','post-check=0, pre-check=0') # HTTP/1.0 self.setHeader('Pragma','no-cache') # Set the response format. self.setHeader( 'Content-Type', content_type + '; charset=utf-8' ) return class BaseXmlMixin(object): def createXmlHeader(self, command, resourceType, currentFolder, url): "Purpose: returns the xml header" self.setHttpHeaders() # Create the XML document header s = """<?xml version="1.0" encoding="utf-8" ?>""" # Create the main connector node s += """<Connector command="%s" resourceType="%s">""" % ( command, resourceType ) # Add the current folder node s += """<CurrentFolder path="%s" url="%s" />""" % ( convertToXmlAttribute(currentFolder), convertToXmlAttribute(url), ) return s def createXmlFooter(self): "Purpose: returns the xml footer" return """</Connector>""" def sendError(self, number, text): "Purpose: in the event of an error, return an xml based error" self.setHttpHeaders() return ("""<?xml version="1.0" encoding="utf-8" ?>""" + """<Connector>""" + self.sendErrorNode (number, text) + """</Connector>""" ) def sendErrorNode(self, number, text): if number != 1: return """<Error number="%s" />""" % (number) else: return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text)) class BaseHtmlMixin(object): def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ): self.setHttpHeaders("text/html") "This is the function that sends the results of the uploading process" "Minified version of the document.domain automatic fix script (#1919)." "The original script can be found at _dev/domain_fix_template.js" return """<script type="text/javascript"> (function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})(); window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s"); </script>""" % { 'errorNumber': errorNo, 'fileUrl': fileUrl.replace ('"', '\\"'), 'fileName': fileName.replace ( '"', '\\"' ) , 'customMsg': customMsg.replace ( '"', '\\"' ), }
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector/QuickUpload for Python (WSGI wrapper). See config.py for configuration settings """ from connector import FCKeditorConnector from upload import FCKeditorQuickUpload import cgitb from cStringIO import StringIO # Running from WSGI capable server (recomended) def App(environ, start_response): "WSGI entry point. Run the connector" if environ['SCRIPT_NAME'].endswith("connector.py"): conn = FCKeditorConnector(environ) elif environ['SCRIPT_NAME'].endswith("upload.py"): conn = FCKeditorQuickUpload(environ) else: start_response ("200 Ok", [('Content-Type','text/html')]) yield "Unknown page requested: " yield environ['SCRIPT_NAME'] return try: # run the connector data = conn.doResponse() # Start WSGI response: start_response ("200 Ok", conn.headers) # Send response text yield data except: start_response("500 Internal Server Error",[("Content-type","text/html")]) file = StringIO() cgitb.Hook(file = file).handle() yield file.getvalue()
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This is the "File Uploader" for Python """ import os from fckutil import * from fckcommands import * # default command's implementation from fckconnector import FCKeditorConnectorBase # import base connector import config as Config class FCKeditorQuickUpload( FCKeditorConnectorBase, UploadFileCommandMixin, BaseHttpMixin, BaseHtmlMixin): def doResponse(self): "Main function. Process the request, set headers and return a string as response." # Check if this connector is disabled if not(Config.Enabled): return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"") command = 'QuickUpload' # The file type (from the QueryString, by default 'File'). resourceType = self.request.get('Type','File') currentFolder = "/" # Check for invalid paths if currentFolder is None: return self.sendUploadResults(102, '', '', "") # Check if it is an allowed command if ( not command in Config.ConfigAllowedCommands ): return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command ) if ( not resourceType in Config.ConfigAllowedTypes ): return self.sendUploadResults( 1, '', '', 'Invalid type specified' ) # Setup paths self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType] self.webUserFilesFolder = Config.QuickUploadPath[resourceType] if not self.userFilesFolder: # no absolute path given (dangerous...) self.userFilesFolder = mapServerPath(self.environ, self.webUserFilesFolder) # Ensure that the directory exists. if not os.path.exists(self.userFilesFolder): try: self.createServerFoldercreateServerFolder( self.userFilesFolder ) except: return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ") # File upload doesn't have to return XML, so intercept here return self.uploadFile(resourceType, currentFolder) # Running from command line (plain old CGI) if __name__ == '__main__': try: # Create a Connector Instance conn = FCKeditorQuickUpload() data = conn.doResponse() for header in conn.headers: if not header is None: print '%s: %s' % header print print data except: print "Content-Type: text/plain" print import cgi cgi.print_exception()
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Connector for Python and Zope. This code was not tested at all. It just was ported from pre 2.5 release, so for further reference see \editor\filemanager\browser\default\connectors\py\connector.py in previous releases. """ from fckutil import * from connector import * import config as Config class FCKeditorConnectorZope(FCKeditorConnector): """ Zope versiof FCKeditorConnector """ # Allow access (Zope) __allow_access_to_unprotected_subobjects__ = 1 def __init__(self, context=None): """ Constructor """ FCKeditorConnector.__init__(self, environ=None) # call superclass constructor # Instance Attributes self.context = context self.request = FCKeditorRequest(context) def getZopeRootContext(self): if self.zopeRootContext is None: self.zopeRootContext = self.context.getPhysicalRoot() return self.zopeRootContext def getZopeUploadContext(self): if self.zopeUploadContext is None: folderNames = self.userFilesFolder.split("/") c = self.getZopeRootContext() for folderName in folderNames: if (folderName <> ""): c = c[folderName] self.zopeUploadContext = c return self.zopeUploadContext def setHeader(self, key, value): self.context.REQUEST.RESPONSE.setHeader(key, value) def getFolders(self, resourceType, currentFolder): # Open the folders node s = "" s += """<Folders>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["Folder"]): s += """<Folder name="%s" />""" % ( convertToXmlAttribute(name) ) # Close the folders node s += """</Folders>""" return s def getZopeFoldersAndFiles(self, resourceType, currentFolder): folders = self.getZopeFolders(resourceType, currentFolder) files = self.getZopeFiles(resourceType, currentFolder) s = folders + files return s def getZopeFiles(self, resourceType, currentFolder): # Open the files node s = "" s += """<Files>""" zopeFolder = self.findZopeFolder(resourceType, currentFolder) for (name, o) in zopeFolder.objectItems(["File","Image"]): s += """<File name="%s" size="%s" />""" % ( convertToXmlAttribute(name), ((o.get_size() / 1024) + 1) ) # Close the files node s += """</Files>""" return s def findZopeFolder(self, resourceType, folderName): # returns the context of the resource / folder zopeFolder = self.getZopeUploadContext() folderName = self.removeFromStart(folderName, "/") folderName = self.removeFromEnd(folderName, "/") if (resourceType <> ""): try: zopeFolder = zopeFolder[resourceType] except: zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType) zopeFolder = zopeFolder[resourceType] if (folderName <> ""): folderNames = folderName.split("/") for folderName in folderNames: zopeFolder = zopeFolder[folderName] return zopeFolder def createFolder(self, resourceType, currentFolder): # Find out where we are zopeFolder = self.findZopeFolder(resourceType, currentFolder) errorNo = 0 errorMsg = "" if self.request.has_key("NewFolderName"): newFolder = self.request.get("NewFolderName", None) zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder) else: errorNo = 102 return self.sendErrorNode ( errorNo, errorMsg ) def uploadFile(self, resourceType, currentFolder, count=None): zopeFolder = self.findZopeFolder(resourceType, currentFolder) file = self.request.get("NewFile", None) fileName = self.getFileName(file.filename) fileNameOnly = self.removeExtension(fileName) fileExtension = self.getExtension(fileName).lower() if (count): nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension) else: nid = fileName title = nid try: zopeFolder.manage_addProduct['OFSP'].manage_addFile( id=nid, title=title, file=file.read() ) except: if (count): count += 1 else: count = 1 return self.zopeFileUpload(resourceType, currentFolder, count) return self.sendUploadResults( 0 ) class FCKeditorRequest(object): "A wrapper around the request object" def __init__(self, context=None): r = context.REQUEST self.request = r def has_key(self, key): return self.request.has_key(key) def get(self, key, default=None): return self.request.get(key, default) """ Running from zope, you will need to modify this connector. If you have uploaded the FCKeditor into Zope (like me), you need to move this connector out of Zope, and replace the "connector" with an alias as below. The key to it is to pass the Zope context in, as we then have a like to the Zope context. ## Script (Python) "connector.py" ##bind container=container ##bind context=context ##bind namespace= ##bind script=script ##bind subpath=traverse_subpath ##parameters=*args, **kws ##title=ALIAS ## import Products.zope as connector return connector.FCKeditorConnectorZope(context=context).doResponse() """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Sample page. """ import cgi import os # Ensure that the fckeditor.py is included in your classpath import fckeditor # Tell the browser to render html print "Content-Type: text/html" print "" # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Sample</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> <h1>FCKeditor - Python - Sample 1</h1> This sample displays a normal HTML form with an FCKeditor with full features enabled. <hr> <form action="sampleposteddata.py" method="post" target="_blank"> """ # This is the real work try: sBasePath = os.environ.get("SCRIPT_NAME") sBasePath = sBasePath[0:sBasePath.find("_samples")] oFCKeditor = fckeditor.FCKeditor('FCKeditor1') oFCKeditor.BasePath = sBasePath oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>""" print oFCKeditor.Create() except Exception, e: print e print """ <br> <input type="submit" value="Submit"> </form> """ # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This page lists the data posted by a form. """ import cgi import os # Tell the browser to render html print "Content-Type: text/html" print "" try: # Create a cgi object form = cgi.FieldStorage() except Exception, e: print e # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Samples - Posted Data</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> """ # This is the real work print """ <h1>FCKeditor - Samples - Posted Data</h1> This page lists all data posted by the form. <hr> <table border="1" cellspacing="0" id="outputSample"> <colgroup><col width="80"><col></colgroup> <thead> <tr> <th>Field Name</th> <th>Value</th> </tr> </thead> """ for key in form.keys(): try: value = form[key].value print """ <tr> <th>%s</th> <td><pre>%s</pre></td> </tr> """ % (cgi.escape(key), cgi.escape(value)) except Exception, e: print e print "</table>" # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == Sample page. """ import cgi import os # Ensure that the fckeditor.py is included in your classpath import fckeditor # Tell the browser to render html print "Content-Type: text/html" print "" # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Sample</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> <h1>FCKeditor - Python - Sample 1</h1> This sample displays a normal HTML form with an FCKeditor with full features enabled. <hr> <form action="sampleposteddata.py" method="post" target="_blank"> """ # This is the real work try: sBasePath = os.environ.get("SCRIPT_NAME") sBasePath = sBasePath[0:sBasePath.find("_samples")] oFCKeditor = fckeditor.FCKeditor('FCKeditor1') oFCKeditor.BasePath = sBasePath oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>""" print oFCKeditor.Create() except Exception, e: print e print """ <br> <input type="submit" value="Submit"> </form> """ # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
#!/usr/bin/env python """ FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2010 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This page lists the data posted by a form. """ import cgi import os # Tell the browser to render html print "Content-Type: text/html" print "" try: # Create a cgi object form = cgi.FieldStorage() except Exception, e: print e # Document header print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>FCKeditor - Samples - Posted Data</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="robots" content="noindex, nofollow"> <link href="../sample.css" rel="stylesheet" type="text/css" /> </head> <body> """ # This is the real work print """ <h1>FCKeditor - Samples - Posted Data</h1> This page lists all data posted by the form. <hr> <table border="1" cellspacing="0" id="outputSample"> <colgroup><col width="80"><col></colgroup> <thead> <tr> <th>Field Name</th> <th>Value</th> </tr> </thead> """ for key in form.keys(): try: value = form[key].value print """ <tr> <th>%s</th> <td><pre>%s</pre></td> </tr> """ % (cgi.escape(key), cgi.escape(value)) except Exception, e: print e print "</table>" # For testing your environments #print "<hr>" #for key in os.environ.keys(): # print "%s: %s<br>" % (key, os.environ.get(key, "")) #print "<hr>" # Document footer print """ </body> </html> """
Python
#-*- coding: utf-8 -*- from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response from django.template.loader import get_template from django.template import RequestContext from google.appengine.ext import db from django.conf import settings from models.main import App_user, Post, Comment, Tag from smart_pager import Pager from logics.main import get_user, set_user from django.utils import simplejson import re, datetime def index(request, page = 1): page = int(page) if page < 1: page = 1 page_size = 10 total = Post.count() post_list = Post.get_list(page, page_size) pager = Pager(total, page_size, page, page_list_num = 8) return render_to_response('index.html', { 'post_list': post_list, 'pager': pager, }, RequestContext(request)) def show_post(request, post_key): post = db.get(post_key) #上一篇 和 下一篇 pre_post = Post.all().order('offset').filter('offset > ', post.offset).get() next_post = Post.all().order('-offset').filter('offset < ', post.offset).get() return render_to_response('show_post.html', { 'post': post, 'pre_post': pre_post, 'next_post': next_post, 'user': get_user(), 'comment_list': post.get_comment_list(), }, RequestContext(request)) def add_comment(request): post_key = request.POST['post_key'].strip() post = db.get(post_key) author = get_user() nickname = request.POST['nickname'].strip() email = request.POST['email'].strip() link = request.POST['link'].strip() if not link: link = None content = request.POST['content'].strip() if not nickname or not email or not content: return HttpResponse(simplejson.dumps({'state': 'err', 'msg': u'请完整填写昵称、电子邮件和内容'})) if not re.match(r'^\w+((_-\w+)|(\.\w+))*\@[A-Za-z0-9]+((\.|-)[A-Za-z0-9]+)*\.[A-Za-z0-9]+$', email): return HttpResponse(simplejson.dumps({'state': 'err', 'msg': u'邮箱地址格式不正确'})) ip = request.META["REMOTE_ADDR"] if author: Comment.auth_add(post, content, ip, author_nickname = nickname, link = link) else: Comment.anonymous_add(post, content, ip, email, nickname, link = link) return HttpResponse(simplejson.dumps({'state': 'ok', 'msg': u'评论发表成功'})) def atom(request): post_list = Post.get_list(1, 10) return render_to_response('atom.xml', { 'post_list': post_list, 'blog_updated': datetime.datetime.utcnow(), }, RequestContext(request), mimetype = 'application/atom+xml') def sitemap(request): post_list = Post.get_list(1, 200) return render_to_response('sitemap.xml', { 'post_list': post_list, 'blog_updated': datetime.datetime.utcnow(), }, RequestContext(request), mimetype="text/xml; charset=utf-8")
Python
#-*- coding: utf-8 -*- from django.http import HttpResponse, HttpResponseRedirect from google.appengine.api import images from google.appengine.ext import db from models.main import App_user, Post, Comment, Tag from common.view_decorator import role_required from logics.main import get_user, set_user import datetime import django from django.conf import settings import ai def get_django_version(): #(1, 0, 'alpha') return django.VERSION[0] class Images(db.Model): uploader = db.ReferenceProperty(App_user) content = db.BlobProperty() image_type = db.StringProperty() add_time = db.DateTimeProperty() @role_required('admin') def upload_image(request): img = Images() img_file = request.FILES.get('img') if not img_file: return HttpResponse('{status:"Please select your image."}') img.uploader = get_user() img.add_time = datetime.datetime.utcnow() if get_django_version() >= 1: content = img_file.read() file_path = img_file.name else: content = img_file['content'] file_path = img_file['filename'] img.content = db.Blob(content) img.image_type = ai.get_content_type(file_path) key = img.put() return HttpResponse('{status:"UPLOADED",image_url: "/rpc/img/?img_id=%s"}' % (key)) def img(request): img = db.get(request.GET["img_id"]) if img and img.content: return HttpResponse(img.content, mimetype=img.image_type) else: return HttpResponse("No image")
Python
#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
Python
import logging, os, sys # Google App Engine imports. from google.appengine.ext.webapp import util # Remove the standard version of Django. for k in [k for k in sys.modules if k.startswith('django')]: del sys.modules[k] # Force sys.path to have our own directory first, in case we want to import # from it. #sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) # Must set this env var *before* importing any part of Django os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' libs_path = os.path.abspath('libs') django_path = os.path.abspath('libs/django.zip') sys.path.insert(0, django_path) sys.path.insert(0, libs_path) import django.core.handlers.wsgi ####################################################3 import django.core.signals import django.db import django.dispatch.dispatcher def log_exception(*args, **kwds): logging.exception('Exception in request:') # Log errors. #django.dispatch.dispatcher.connect( # log_exception, django.core.signals.got_request_exception) # Unregister the rollback event handler. #django.dispatch.dispatcher.disconnect( # django.db._rollback_on_exception, # django.core.signals.got_request_exception) ########################################################3 def main(): if django_path not in sys.path: sys.path.insert(0, django_path) if libs_path not in sys.path: sys.path.insert(0, libs_path) # Create a Django application for WSGI. application = django.core.handlers.wsgi.WSGIHandler() # Run the WSGI CGI handler with that application. util.run_wsgi_app(application) if __name__ == '__main__': main()
Python
#-*- coding: utf-8 -*- import os, sys, tempfile has_set = False BASE_DIR = os.path.dirname(os.path.dirname(__file__)) import logging logging.basicConfig(format='%(asctime)s %(levelname)8s %(message)s', stream=sys.stdout) #logging.basicConfig(format='%(asctime)s %(levelname)8s %(message)s', # filename=BASE_DIR+'/logs/send_feeds.logs', # filemode='a') logging.getLogger().setLevel(logging.NOTSET) def set_app_evn(): global has_set if has_set: return has_set = True os.chdir(BASE_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(BASE_DIR, 'libs')) sys.path.insert(0, 'C:/Program Files/Google/google_appengine') #设置app engine环境变量 # from google.appengine.api import apiproxy_stub_map # from google.appengine.api import datastore_file_stub # # app_id = 'myspace-sexy-girls' # os.environ['APPLICATION_ID'] = app_id # datastore_path = os.path.join(tempfile.gettempdir(),'dev_appserver.datastore') # history_path = os.path.join(tempfile.gettempdir(),'dev_appserver.datastore.history') # require_indexes = False # # apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap() # datastore = datastore_file_stub.DatastoreFileStub( # app_id, datastore_path, history_path, require_indexes=require_indexes) # apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore) ####################33end app_id = '3kkk-me' os.environ['APPLICATION_ID'] = app_id ###############django环境变量 django_path = os.path.join(BASE_DIR, 'libs/django.zip') sys.path.insert(0, django_path) from django.core.management import setup_environ import settings setup_environ(settings) ################3end set_app_evn() if __name__ == '__main__': import code code.interact('App Engine interactive console', None, locals())
Python
#-*- coding: utf-8 -*- import set_app_evn from set_app_evn import BASE_DIR import ai import Image, ImageFilter, ImageEnhance def register(opener): reg_info = { 'user': 'aksdj990sddddf', 'passwd': 'wushiyito', 'passwdAgain': 'wushiyito', 'answer': 'asdfasdfa', 'question': 'sadfasdf', 'questionbak': 'sadfasdf', 'birthCityId': '2', 'birthCountyId': '5101', 'birthProvinceId': '1', 'birthdayDate': '6', 'birthdayMonth': '7', 'birthdayYear': '2002', 'bru': '', 'checkbox': 'checkbox', 'cityId': '2', 'countyId': '5101', 'from': '', 'mailDomain': '@sohu.com', 'provinceId': '1', 'sex': '0', 'textarea': '', 'vcode': 'dksh', 'vcodeEn': 'lwqIB%2BRTQVn4%2F6fzTSAGyPwTgbIrRlro', 'xpt': '', } { 'cfsk': 'RPDBwE5epxPukX0FRCFPnqfwEFV4lH3w', 'ycka': 'EyJ7uuzzod%2FujWBr377ZBvwTgbIrRlro', 'ueby': 'oWA7JqpHK%2F%2BlmU%2BQfVsCzoY8BZ60ctPB', 'xdyb': 'H77Sk8qEEjsrIza58YQvLPwTgbIrRlro', 'dksh': 'lwqIB%2BRTQVn4%2F6fzTSAGyPwTgbIrRlro', } #ai.fetch('http://blog.sohu.com/login/reg.do', opener = opener) reg_headers = [('Referer', 'http://blog.sohu.com/login/reg.do')] res = ai.fetch('http://blog.sohu.com/login/regnew.do', headers = reg_headers, \ opener = opener, fields = reg_info) print res['headers'], res['data'] #http://blog.sohu.com/login/regBlog.do?bru=&domain=asdfsdfsdfa&from=&name=title def vcode_hack(): image_name = 'd:/rand.jpg' im = Image.open(image_name) im = im.filter(ImageFilter.MedianFilter()) enhancer = ImageEnhance.Contrast(im) im = enhancer.enhance(2) im = im.convert('1') im.show() if __name__ == '__main__': #opener = ai.get_opener() #register(opener) vcode_hack()
Python
#-*- coding: utf-8 -*- from threading import local _thread_locals = local() def get_user(): try: return _thread_locals.user except: return None def set_user(u): _thread_locals.user = u
Python
#-*- coding: utf-8 -*- from google.appengine.ext import db import datetime import generalcounter from logics.main import get_user, set_user import re class App_user(db.Model): email = db.EmailProperty(required = True) nickname = db.StringProperty(required = True) link = db.StringProperty() level = db.IntegerProperty() password = db.StringProperty() is_verified = db.BooleanProperty(required = True) comment_count = db.IntegerProperty(default=0) post_count = db.IntegerProperty(default=0) add_time = db.DateTimeProperty(required = True) class Post(db.Model): offset = db.IntegerProperty(required = True) #递增 title = db.StringProperty(required = True) content = db.TextProperty(required = True) author = db.ReferenceProperty(App_user, collection_name = 'add_post_set', required = True) summary = db.TextProperty() add_time = db.DateTimeProperty(required = True) modify_time = db.DateTimeProperty() modify_user = db.ReferenceProperty(App_user, collection_name = 'modify_post_set') comment_count = db.IntegerProperty(default=0) tags = db.StringListProperty() is_delete = db.BooleanProperty(default = False) delete_user = db.ReferenceProperty(App_user, collection_name = 'delete_post_set') @classmethod def all(cls): #now = datetime.datetime.utcnow() return super(Post, cls).all().filter('is_delete', False) @classmethod def count(cls): return int(generalcounter.get_count('post')) @classmethod def add(cls, title, content, summary = None, add_time = None): u = get_user() count = cls.count() if add_time is None: add_time = datetime.datetime.utcnow() p = cls(parent =u, offset = count+1, title = title, content = content, \ author = u, add_time = add_time) if summary: p.summary = summary u.post_count += 1 def txn(): p.put() u.put() db.run_in_transaction(txn) generalcounter.increment('post') def modify(self, title, content, summary = None, add_time = None): u = get_user() self.title = title self.content = content self.summary = summary if add_time: self.add_time = add_time self.modify_time = datetime.datetime.utcnow() self.modify_user = u self.put() def delete(self): u = get_user() def txn(): #super(Post, self).delete() self.is_delete = True self.delete_user = u self.author.post_count -= 1 self.put() self.author.put() db.run_in_transaction(txn) generalcounter.decrement('post') @classmethod def get_list(cls, page, page_size): q = cls.all() q.order('-offset') return q.fetch(page_size, offset = page_size*(page-1)) def get_comment_list(self): return Comment.all().filter('post', self).order('add_time').fetch(100) def to_atom_xml(self): sep = '<hr class="more">' return re.sub('&(?!amp;)', '&amp;', self.content).replace(sep, '') class Comment(db.Model): email = db.EmailProperty() nickname = db.StringProperty() author = db.ReferenceProperty(App_user) post = db.ReferenceProperty(Post, required = True) link = db.StringProperty() content = db.TextProperty() ip = db.StringProperty() add_time = db.DateTimeProperty() is_delete = db.BooleanProperty(default = False) delete_user = db.ReferenceProperty(App_user, collection_name = 'delete_comment_set') @classmethod def all(cls): return super(Comment, cls).all().filter('is_delete', False) @classmethod def auth_add(cls, post, content, ip, author_nickname = None, link = None): return cls._add(post, content, ip, author_email = None, \ author_nickname = author_nickname, link = link) @classmethod def anonymous_add(cls, post, content, ip, author_email, author_nickname, link = None): return cls._add(post, content, ip, author_email = author_email, \ author_nickname = author_nickname, link = link) @classmethod def _add(cls, post, content, ip, author_email = None, author_nickname = None, link = None): u = get_user() if author_email is None: #登录用户发帖 c = cls(parent = post, email = u.email, nickname = u.nickname, author = u, post = post, \ content = content, ip = ip, add_time = datetime.datetime.utcnow()) if link and u.link != link: u.link = link if author_nickname and author_nickname != u.nickname: u.nickname = author_nickname c.nickname = author_nickname else: c = cls(parent = post, email = author_email, nickname = author_nickname, post = post, \ content = content, ip = ip, add_time = datetime.datetime.utcnow()) if link: c.link = link def txn(): c.put() post.comment_count += 1 if author_email is None: u.comment_count += 1 u.put() post.put() db.run_in_transaction(txn) def delete(self): u = get_user() def txn(): self.is_delete = True self.delete_user = u self.post.comment_count -= 1 self.put() self.post.put() if self.author: self.author.comment_count -= 1 self.author.put() db.run_in_transaction(txn) class Tag(db.Model): tag = db.StringProperty() post_count = db.IntegerProperty(default=0) class Sidebar(db.Model): title = db.StringProperty(required = True) content = db.TextProperty(required = True) order = db.IntegerProperty(required = True, default= 0) add_time = db.DateTimeProperty(auto_now_add = True)
Python
#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
Python
from django.conf.urls.defaults import * urlpatterns = patterns('views.main', ('^$', 'index'), ('^(?P<page>\d+)/$', 'index'), ('^p/(?P<post_key>.+)/$', 'show_post'), ('^add_comment/$', 'add_comment'), ('^atom.xml$', 'atom'), ('^sitemap.xml$', 'sitemap'), (r'^admin/', include('admin.urls')), ) urlpatterns += patterns('', (r'^logout/$', 'common.auth.logout'), (r'^rpc/upload_image/$', 'views.rpc.upload_image'), (r'^rpc/img/$', 'views.rpc.img'), )
Python
#-*- coding: utf-8 -*- import os, sys, os.path VERSION = '0.001' LOCAL = False DEBUG = True TEMPLATE_DEBUG = DEBUG APPEND_SLASH = True ROOT_PATH = os.path.dirname(__file__) if LOCAL: BASE_URL = 'http://localhost:8080' DOMAIN = 'localhost' else: BASE_URL = 'http://www.3kkk.me' DOMAIN = 'www.3kkk.me' BLOG_AUTHOR = "3kkk-me" BLOG_TITLE = "3kkk-me's self-inspection" BLOG_DESC = u"我的心随风飘荡,抓不住你我" ADMINS = ( ('perol.chen', 'perol.chen@gmail.com'), ) MANAGERS = ADMINS # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Asia/Shanghai' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'zh-cn' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = False # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = ROOT_PATH + "/static/" # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = BASE_URL + '/static' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = '-nc*k(=13$ak$&sc_#2fu$9p_vwt(ckv=hy*0qr(4%jrr)ceap' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', 'django.core.context_processors.media', 'common.context_processors.bind_settings', ) MIDDLEWARE_CLASSES = ( 'common.domain_trans.Domain_transMiddleware', 'django.middleware.common.CommonMiddleware', #'django.contrib.sessions.middleware.SessionMiddleware', #'django.contrib.auth.middleware.AuthenticationMiddleware', #'django.middleware.doc.XViewMiddleware', 'appengine_utilities.django-middleware.middleware.SessionMiddleware', 'common.auth.AuthMiddleware', ) ROOT_URLCONF = 'urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ROOT_PATH + '/templates', ) INSTALLED_APPS = ( 'common', 'admin', )
Python
from django.conf.urls.defaults import * urlpatterns = patterns('admin.views', ('^$', 'index'), ('^left/$', 'left'), ('^post_list/$', 'post_list'), ('^add_post/', 'add_post'), ('^comment_list/$', 'comment_list'), ('^user_list/', 'user_list'), ('^add_post_post/', 'add_post_post'), ('^del_post/', 'del_post'), ('^edit_post/', 'edit_post'), ('^edit_post_post/', 'edit_post_post'), ('^comment_list/$', 'comment_list'), ('^del_comment/', 'del_comment'), ('^user_list/$', 'user_list'), ('^sidebar_list/$', 'sidebar_list'), ('^sidebar_add/$', 'sidebar_add'), ('^sidebar_add_post/$', 'sidebar_add_post'), ('^sidebar_edit/$', 'sidebar_edit'), ('^sidebar_edit_post/$', 'sidebar_edit_post'), ('^sidebar_delete/$', 'sidebar_delete'), )
Python
#-*- coding: utf-8 -*- from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response from django.template.loader import get_template from django.template import RequestContext from google.appengine.ext import db from common.view_decorator import role_required from models.main import App_user, Post, Comment, Tag, Sidebar from smart_pager import Pager import datetime from common.time_utils import parse_time @role_required('admin') def index(request): return render_to_response('admin/index.html', { }, RequestContext(request)) @role_required('admin') def left(request): return render_to_response('admin/left.html', { }, RequestContext(request)) @role_required('admin') def add_post(request): return render_to_response('admin/add_post.html', { }, RequestContext(request)) @role_required('admin') def add_post_post(request): title = request.POST['title'].strip() content = request.POST['content'].strip() sep = '<hr class="more">' sep_index = content.find(sep) if sep_index != -1: summary = content[0:sep_index] else: summary = None add_time_date = request.POST.get('add_time_date').strip() add_time_time = request.POST.get('add_time_time').strip() if add_time_date and add_time_time: add_time = parse_time(add_time_date + ' '+ add_time_time) else: add_time = None Post.add(title, content, summary = summary, add_time = add_time) return HttpResponse('ok') @role_required('admin') def post_list(request): page = int(request.GET.get('page', 1)) if page < 1: page = 1 page_size = 15 total = Post.count() post_list = Post.get_list(page, page_size) pager = Pager(total, page_size, page, page_list_num = 10) return render_to_response('admin/post_list.html', { 'post_list': post_list, 'pager': pager, }, RequestContext(request)) @role_required('admin') def del_post(request): post_key = request.POST['post_key'] post = db.get(post_key) post.delete() return HttpResponse('ok') @role_required('admin') def edit_post(request): post_key = request.GET['post_key'] post = db.get(post_key) return render_to_response('admin/edit_post.html', { 'post': post, }, RequestContext(request)) @role_required('admin') def edit_post_post(request): post_key = request.GET['post_key'] post = db.get(post_key) title = request.POST['title'].strip() content = request.POST['content'].strip() sep = '<hr class="more">' sep_index = content.find(sep) if sep_index != -1: summary = content[0:sep_index] else: summary = None add_time_date = request.POST.get('add_time_date').strip() add_time_time = request.POST.get('add_time_time').strip() if add_time_date and add_time_time: add_time = parse_time(add_time_date + ' '+ add_time_time) else: add_time = None post.modify(title, content, summary = summary, add_time = add_time) return HttpResponse('ok') @role_required('admin') def comment_list(request): page = int(request.GET.get('page', 1)) if page < 1: page = 1 comment_list = Comment.all().order('-add_time').fetch(150) return render_to_response('admin/comment_list.html', { 'comment_list': comment_list, }, RequestContext(request)) @role_required('admin') def del_comment(request): commnet_key = request.POST['commnet_key'] comment = db.get(commnet_key) comment.delete() return HttpResponse('ok') @role_required('admin') def user_list(request): page = int(request.GET.get('page', 1)) if page < 1: page = 1 user_list = App_user.all().order('-add_time').fetch(150) return render_to_response('admin/user_list.html', { 'user_list': user_list, }, RequestContext(request)) @role_required('admin') def sidebar_list(request): sidebar_list = Sidebar.all().order('-add_time').fetch(100) return render_to_response('admin/sidebar_list.html', { 'sidebar_list': sidebar_list, }, RequestContext(request)) @role_required('admin') def sidebar_add(request): return render_to_response('admin/sidebar_add.html', { }, RequestContext(request)) @role_required('admin') def sidebar_edit(request): item_key = request.GET['item_key'] sidebar = db.get(item_key) return render_to_response('admin/sidebar_edit.html', { 'sidebar': sidebar }, RequestContext(request)) @role_required('admin') def sidebar_edit_post(request): title = request.POST['title'] content = request.POST['content'] order = int(request.POST['order']) item_key = request.GET['item_key'] sidebar = db.get(item_key) sidebar.title = title sidebar.content = content sidebar.order = order sidebar.put() return HttpResponseRedirect('/admin/sidebar_list/') @role_required('admin') def sidebar_delete(request): item_key = request.POST['item_key'] sidebar = db.get(item_key) sidebar.delete() return HttpResponse('ok') @role_required('admin') def sidebar_add_post(request): title = request.POST['title'] content = request.POST['content'] order = int(request.POST['order']) s = Sidebar(title = title, content = content, order = order) s.put() return HttpResponseRedirect('/admin/sidebar_list/')
Python
''' Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' import os, cgi, __main__ from google.appengine.ext.webapp import template import wsgiref.handlers from google.appengine.ext import webapp from google.appengine.api import memcache from google.appengine.ext import db from appengine_utilities import cron class MainPage(webapp.RequestHandler): def get(self): c = cron.Cron() query = cron._AppEngineUtilities_Cron.all() results = query.fetch(1000) template_values = {"cron_entries" : results} path = os.path.join(os.path.dirname(__file__), 'templates/scheduler_form.html') self.response.out.write(template.render(path, template_values)) def post(self): if str(self.request.get('action')) == 'Add': cron.Cron().add_cron(str(self.request.get('cron_entry'))) elif str(self.request.get('action')) == 'Delete': entry = db.get(db.Key(str(self.request.get('key')))) entry.delete() query = cron._AppEngineUtilities_Cron.all() results = query.fetch(1000) template_values = {"cron_entries" : results} path = os.path.join(os.path.dirname(__file__), 'templates/scheduler_form.html') self.response.out.write(template.render(path, template_values)) def main(): application = webapp.WSGIApplication( [('/gaeutilities/', MainPage)], debug=True) wsgiref.handlers.CGIHandler().run(application) if __name__ == "__main__": main()
Python
""" Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # main python imports import os import time import datetime import random import md5 import Cookie import pickle import __main__ from time import strftime # google appengine imports from google.appengine.ext import db from google.appengine.api import memcache #django simplejson import, used for flash from django.utils import simplejson from rotmodel import ROTModel # settings, if you have these set elsewhere, such as your django settings file, # you'll need to adjust the values to pull from there. class _AppEngineUtilities_Session(ROTModel): """ Model for the sessions in the datastore. This contains the identifier and validation information for the session. """ sid = db.StringListProperty() ip = db.StringProperty() ua = db.StringProperty() last_activity = db.DateTimeProperty(auto_now=True) class _AppEngineUtilities_SessionData(ROTModel): """ Model for the session data in the datastore. """ session = db.ReferenceProperty(_AppEngineUtilities_Session) keyname = db.StringProperty() content = db.BlobProperty() class _DatastoreWriter(object): def put(self, keyname, value, session): """ Insert a keyname/value pair into the datastore for the session. Args: keyname: The keyname of the mapping. value: The value of the mapping. """ keyname = session._validate_key(keyname) if value is None: raise ValueError('You must pass a value to put.') # datestore write trumps cookie. If there is a cookie value # with this keyname, delete it so we don't have conflicting # entries. if session.cookie_vals.has_key(keyname): del(session.cookie_vals[keyname]) session.output_cookie[session.cookie_name + '_data'] = \ simplejson.dumps(session.cookie_vals) print session.output_cookie.output() sessdata = session._get(keyname=keyname) if sessdata is None: sessdata = _AppEngineUtilities_SessionData() sessdata.session = session.session sessdata.keyname = keyname sessdata.content = pickle.dumps(value) # UNPICKLING CACHE session.cache[keyname] = pickle.dumps(value) session.cache[keyname] = value sessdata.put() session._set_memcache() class _CookieWriter(object): def put(self, keyname, value, session): """ Insert a keyname/value pair into the datastore for the session. Args: keyname: The keyname of the mapping. value: The value of the mapping. """ keyname = session._validate_key(keyname) if value is None: raise ValueError('You must pass a value to put.') # Use simplejson for cookies instead of pickle. session.cookie_vals[keyname] = value # update the requests session cache as well. session.cache[keyname] = value session.output_cookie[session.cookie_name + '_data'] = \ simplejson.dumps(session.cookie_vals) print session.output_cookie.output() class Session(object): """ Sessions used to maintain user presence between requests. Sessions store a unique id as a cookie in the browser and referenced in a datastore object. This maintains user presence by validating requests as visits from the same browser. You can add extra data to the session object by using it as a dictionary object. Values can be any python object that can be pickled. For extra performance, session objects are also store in memcache and kept consistent with the datastore. This increases the performance of read requests to session data. """ COOKIE_NAME = 'appengine-utilities-session-sid' # session token DEFAULT_COOKIE_PATH = '/' SESSION_EXPIRE_TIME = 7200 # sessions are valid for 7200 seconds (2 hours) CLEAN_CHECK_PERCENT = 50 # By default, 50% of all requests will clean the database INTEGRATE_FLASH = True # integrate functionality from flash module? CHECK_IP = True # validate sessions by IP CHECK_USER_AGENT = True # validate sessions by user agent SET_COOKIE_EXPIRES = True # Set to True to add expiration field to cookie SESSION_TOKEN_TTL = 5 # Number of seconds a session token is valid for. UPDATE_LAST_ACTIVITY = 60 # Number of seconds that may pass before # last_activity is updated WRITER = "datastore" # Use the datastore writer by default. cookie is the # other option. def __init__(self, cookie_path=DEFAULT_COOKIE_PATH, cookie_name=COOKIE_NAME, session_expire_time=SESSION_EXPIRE_TIME, clean_check_percent=CLEAN_CHECK_PERCENT, integrate_flash=INTEGRATE_FLASH, check_ip=CHECK_IP, check_user_agent=CHECK_USER_AGENT, set_cookie_expires=SET_COOKIE_EXPIRES, session_token_ttl=SESSION_TOKEN_TTL, last_activity_update=UPDATE_LAST_ACTIVITY, writer=WRITER): """ Initializer Args: cookie_name: The name for the session cookie stored in the browser. session_expire_time: The amount of time between requests before the session expires. clean_check_percent: The percentage of requests the will fire off a cleaning routine that deletes stale session data. integrate_flash: If appengine-utilities flash utility should be integrated into the session object. check_ip: If browser IP should be used for session validation check_user_agent: If the browser user agent should be used for sessoin validation. set_cookie_expires: True adds an expires field to the cookie so it saves even if the browser is closed. session_token_ttl: Number of sessions a session token is valid for before it should be regenerated. """ self.cookie_path = cookie_path self.cookie_name = cookie_name self.session_expire_time = session_expire_time self.integrate_flash = integrate_flash self.check_user_agent = check_user_agent self.check_ip = check_ip self.set_cookie_expires = set_cookie_expires self.session_token_ttl = session_token_ttl self.last_activity_update = last_activity_update self.writer = writer # make sure the page is not cached in the browser self.no_cache_headers() # Check the cookie and, if necessary, create a new one. self.cache = {} string_cookie = os.environ.get('HTTP_COOKIE', '') self.cookie = Cookie.SimpleCookie() self.output_cookie = Cookie.SimpleCookie() self.cookie.load(string_cookie) try: self.cookie_vals = \ simplejson.loads(self.cookie[self.cookie_name + '_data'].value) # sync self.cache and self.cookie_vals which will make those # values available for all gets immediately. for k in self.cookie_vals: self.cache[k] = self.cookie_vals[k] self.output_cookie[self.cookie_name + '_data'] = self.cookie[self.cookie_name + '_data'] # sync the input cookie with the output cookie except: self.cookie_vals = {} if writer == "cookie": pass else: self.sid = None new_session = True # do_put is used to determine if a datastore write should # happen on this request. do_put = False # check for existing cookie if self.cookie.get(cookie_name): self.sid = self.cookie[cookie_name].value self.session = self._get_session() # will return None if # sid expired if self.session: new_session = False if new_session: # start a new session self.session = _AppEngineUtilities_Session() self.session.put() self.sid = self.new_sid() if 'HTTP_USER_AGENT' in os.environ: self.session.ua = os.environ['HTTP_USER_AGENT'] else: self.session.ua = None if 'REMOTE_ADDR' in os.environ: self.session.ip = os.environ['REMOTE_ADDR'] else: self.session.ip = None self.session.sid = [self.sid] # do put() here to get the session key key = self.session.put() else: # check the age of the token to determine if a new one # is required duration = datetime.timedelta(seconds=self.session_token_ttl) session_age_limit = datetime.datetime.now() - duration if self.session.last_activity < session_age_limit: self.sid = self.new_sid() if len(self.session.sid) > 2: self.session.sid.remove(self.session.sid[0]) self.session.sid.append(self.sid) do_put = True else: self.sid = self.session.sid[-1] # check if last_activity needs updated ula = datetime.timedelta(seconds=self.last_activity_update) if datetime.datetime.now() > self.session.last_activity + ula: do_put = True self.output_cookie[cookie_name] = self.sid self.output_cookie[cookie_name]['path'] = cookie_path # UNPICKLING CACHE self.cache['sid'] = pickle.dumps(self.sid) self.cache['sid'] = self.sid if do_put: if self.sid != None or self.sid != "": self.session.put() if self.set_cookie_expires: if not self.output_cookie.has_key(cookie_name + '_data'): self.output_cookie[cookie_name + '_data'] = "" self.output_cookie[cookie_name + '_data']['expires'] = \ self.session_expire_time print self.output_cookie.output() # fire up a Flash object if integration is enabled if self.integrate_flash: import flash self.flash = flash.Flash(cookie=self.cookie) # randomly delete old stale sessions in the datastore (see # CLEAN_CHECK_PERCENT variable) if random.randint(1, 100) < clean_check_percent: self._clean_old_sessions() def new_sid(self): """ Create a new session id. """ sid = str(self.session.key()) + md5.new(repr(time.time()) + \ str(random.random())).hexdigest() return sid def _get_session(self): """ Get the user's session from the datastore """ query = _AppEngineUtilities_Session.all() query.filter('sid', self.sid) if self.check_user_agent: query.filter('ua', os.environ['HTTP_USER_AGENT']) if self.check_ip: query.filter('ip', os.environ['REMOTE_ADDR']) results = query.fetch(1) if len(results) is 0: return None else: sessionAge = datetime.datetime.now() - results[0].last_activity if sessionAge.seconds > self.session_expire_time: results[0].delete() return None return results[0] def _get(self, keyname=None): """ Return all of the SessionData object data from the datastore onlye, unless keyname is specified, in which case only that instance of SessionData is returned. Important: This does not interact with memcache and pulls directly from the datastore. This also does not get items from the cookie store. Args: keyname: The keyname of the value you are trying to retrieve. """ query = _AppEngineUtilities_SessionData.all() query.filter('session', self.session) if keyname != None: query.filter('keyname =', keyname) results = query.fetch(1000) if len(results) is 0: return None if keyname != None: return results[0] return results def _validate_key(self, keyname): """ Validate the keyname, making sure it is set and not a reserved name. """ if keyname is None: raise ValueError('You must pass a keyname for the session' + \ ' data content.') elif keyname in ('sid', 'flash'): raise ValueError(keyname + ' is a reserved keyname.') if type(keyname) != type([str, unicode]): return str(keyname) return keyname def _put(self, keyname, value): """ Insert a keyname/value pair into the datastore for the session. Args: keyname: The keyname of the mapping. value: The value of the mapping. """ if self.writer == "datastore": writer = _DatastoreWriter() else: writer = _CookieWriter() writer.put(keyname, value, self) def _delete_session(self): """ Delete the session and all session data. """ if hasattr(self, "session"): sessiondata = self._get() # delete from datastore if sessiondata is not None: for sd in sessiondata: sd.delete() # delete from memcache memcache.delete('sid-'+str(self.session.key())) # delete the session now that all items that reference it are deleted. self.session.delete() # unset any cookie values that may exist self.cookie_vals = {} self.cache = {} self.output_cookie[self.cookie_name + '_data'] = \ simplejson.dumps(self.cookie_vals) print self.output_cookie.output() # if the event class has been loaded, fire off the sessionDeleted event if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('sessionDelete') def delete(self): """ Delete the current session and start a new one. This is useful for when you need to get rid of all data tied to a current session, such as when you are logging out a user. """ self._delete_session() @classmethod def delete_all_sessions(cls): """ Deletes all sessions and session data from the data store and memcache: NOTE: This is not fully developed. It also will not delete any cookie data as this does not work for each incoming request. Keep this in mind if you are using the cookie writer. """ all_sessions_deleted = False all_data_deleted = False while not all_sessions_deleted: query = _AppEngineUtilities_Session.all() results = query.fetch(75) if len(results) is 0: all_sessions_deleted = True else: for result in results: memcache.delete('sid-' + str(result.key())) result.delete() while not all_data_deleted: query = _AppEngineUtilities_SessionData.all() results = query.fetch(75) if len(results) is 0: all_data_deleted = True else: for result in results: result.delete() def _clean_old_sessions(self): """ Delete expired sessions from the datastore. This is only called for CLEAN_CHECK_PERCENT percent of requests because it could be rather intensive. """ duration = datetime.timedelta(seconds=self.session_expire_time) session_age = datetime.datetime.now() - duration query = _AppEngineUtilities_Session.all() query.filter('last_activity <', session_age) results = query.fetch(50) for result in results: data_query = _AppEngineUtilities_SessionData.all() data_query.filter('session', result) data_results = data_query.fetch(1000) for data_result in data_results: data_result.delete() memcache.delete('sid-'+str(result.key())) result.delete() # Implement Python container methods def __getitem__(self, keyname): """ Get item from session data. keyname: The keyname of the mapping. """ # flash messages don't go in the datastore if self.integrate_flash and (keyname == 'flash'): return self.flash.msg if keyname in self.cache: # UNPICKLING CACHE return pickle.loads(str(self.cache[keyname])) return self.cache[keyname] if keyname in self.cookie_vals: return self.cookie_vals[keyname] if hasattr(self, "session"): mc = memcache.get('sid-'+str(self.session.key())) if mc is not None: if keyname in mc: return mc[keyname] data = self._get(keyname) if data: #UNPICKLING CACHE self.cache[keyname] = data.content self.cache[keyname] = pickle.loads(data.content) self._set_memcache() return pickle.loads(data.content) else: raise KeyError(str(keyname)) raise KeyError(str(keyname)) def __setitem__(self, keyname, value): """ Set item in session data. Args: keyname: They keyname of the mapping. value: The value of mapping. """ if self.integrate_flash and (keyname == 'flash'): self.flash.msg = value else: keyname = self._validate_key(keyname) self.cache[keyname] = value # self._set_memcache() # commented out because this is done in the datestore put return self._put(keyname, value) def __delitem__(self, keyname): """ Delete item from session data. Args: keyname: The keyname of the object to delete. """ bad_key = False sessdata = self._get(keyname = keyname) if sessdata is None: bad_key = True else: sessdata.delete() if keyname in self.cookie_vals: del self.cookie_vals[keyname] bad_key = False self.output_cookie[self.cookie_name + '_data'] = \ simplejson.dumps(self.cookie_vals) print self.output_cookie.output() if bad_key: raise KeyError(str(keyname)) if keyname in self.cache: del self.cache[keyname] self._set_memcache() def __len__(self): """ Return size of session. """ # check memcache first if hasattr(self, "session"): mc = memcache.get('sid-'+str(self.session.key())) if mc is not None: return len(mc) + len(self.cookie_vals) results = self._get() if results is not None: return len(results) + len(self.cookie_vals) else: return 0 return len(self.cookie_vals) def __contains__(self, keyname): """ Check if an item is in the session data. Args: keyname: The keyname being searched. """ try: r = self.__getitem__(keyname) except KeyError: return False return True def __iter__(self): """ Iterate over the keys in the session data. """ # try memcache first if hasattr(self, "session"): mc = memcache.get('sid-'+str(self.session.key())) if mc is not None: for k in mc: yield k else: for k in self._get(): yield k.keyname for k in self.cookie_vals: yield k def __str__(self): """ Return string representation. """ #if self._get(): return '{' + ', '.join(['"%s" = "%s"' % (k, self[k]) for k in self]) + '}' #else: # return [] def _set_memcache(self): """ Set a memcache object with all the session data. Optionally you can add a key and value to the memcache for put operations. """ # Pull directly from the datastore in order to ensure that the # information is as up to date as possible. if self.writer == "datastore": data = {} sessiondata = self._get() if sessiondata is not None: for sd in sessiondata: data[sd.keyname] = pickle.loads(sd.content) memcache.set('sid-'+str(self.session.key()), data, \ self.session_expire_time) def cycle_key(self): """ Changes the session id. """ self.sid = self.new_sid() if len(self.session.sid) > 2: self.session.sid.remove(self.session.sid[0]) self.session.sid.append(self.sid) def flush(self): """ Delete's the current session, creating a new one. """ self._delete_session() self.__init__() def no_cache_headers(self): """ Adds headers, avoiding any page caching in the browser. Useful for highly dynamic sites. """ print "Expires: Tue, 03 Jul 2001 06:00:00 GMT" print strftime("Last-Modified: %a, %d %b %y %H:%M:%S %Z") print "Cache-Control: no-store, no-cache, must-revalidate, max-age=0" print "Cache-Control: post-check=0, pre-check=0" print "Pragma: no-cache" def clear(self): """ Remove all items """ sessiondata = self._get() # delete from datastore if sessiondata is not None: for sd in sessiondata: sd.delete() # delete from memcache memcache.delete('sid-'+str(self.session.key())) self.cache = {} self.cookie_vals = {} self.output_cookie[self.cookie_name + '_data'] = \ simplejson.dumps(self.cookie_vals) print self.output_cookie.output() def has_key(self, keyname): """ Equivalent to k in a, use that form in new code """ return self.__contains__(keyname) def items(self): """ A copy of list of (key, value) pairs """ op = {} for k in self: op[k] = self[k] return op def keys(self): """ List of keys. """ l = [] for k in self: l.append(k) return l def update(*dicts): """ Updates with key/value pairs from b, overwriting existing keys, returns None """ for dict in dicts: for k in dict: self._put(k, dict[k]) return None def values(self): """ A copy list of values. """ v = [] for k in self: v.append(self[k]) return v def get(self, keyname, default = None): """ a[k] if k in a, else x """ try: return self.__getitem__(keyname) except KeyError: if default is not None: return default return None def setdefault(self, keyname, default = None): """ a[k] if k in a, else x (also setting it) """ try: return self.__getitem__(keyname) except KeyError: if default is not None: self.__setitem__(keyname, default) return default return None @classmethod def check_token(cls, cookie_name=COOKIE_NAME, delete_invalid=True): """ Retrieves the token from a cookie and validates that it is a valid token for an existing cookie. Cookie validation is based on the token existing on a session that has not expired. This is useful for determining if datastore or cookie writer should be used in hybrid implementations. Args: cookie_name: Name of the cookie to check for a token. delete_invalid: If the token is not valid, delete the session cookie, to avoid datastore queries on future requests. Returns True/False """ string_cookie = os.environ.get('HTTP_COOKIE', '') cookie = Cookie.SimpleCookie() cookie.load(string_cookie) if cookie.has_key(cookie_name): query = _AppEngineUtilities_Session.all() query.filter('sid', cookie[cookie_name].value) results = query.fetch(1) if len(results) > 0: return True else: if delete_invalid: output_cookie = Cookie.SimpleCookie() output_cookie[cookie_name] = cookie[cookie_name] output_cookie[cookie_name]['expires'] = 0 print output_cookie.output() return False
Python
""" Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os import cgi import re import datetime import pickle from google.appengine.ext import db from google.appengine.api import urlfetch from google.appengine.api import memcache APPLICATION_PORT = '8080' CRON_PORT = '8081' class _AppEngineUtilities_Cron(db.Model): """ Model for the tasks in the datastore. This contains the scheduling and url information, as well as a field that sets the next time the instance should run. """ cron_entry = db.StringProperty() next_run = db.DateTimeProperty() cron_compiled = db.BlobProperty() url = db.LinkProperty() class Cron(object): """ Cron is a scheduling utility built for appengine, modeled after crontab for unix systems. While true scheduled tasks are not possible within the Appengine environment currently, this is an attmempt to provide a request based alternate. You configure the tasks in an included interface, and the import the class on any request you want capable of running tasks. On each request where Cron is imported, the list of tasks that need to be run will be pulled and run. A task is a url within your application. It's important to make sure that these requests fun quickly, or you could risk timing out the actual request. See the documentation for more information on configuring your application to support Cron and setting up tasks. """ def __init__(self): # Check if any tasks need to be run query = _AppEngineUtilities_Cron.all() query.filter('next_run <= ', datetime.datetime.now()) results = query.fetch(1000) if len(results) > 0: one_second = datetime.timedelta(seconds = 1) before = datetime.datetime.now() for r in results: if re.search(':' + APPLICATION_PORT, r.url): r.url = re.sub(':' + APPLICATION_PORT, ':' + CRON_PORT, r.url) #result = urlfetch.fetch(r.url) diff = datetime.datetime.now() - before if int(diff.seconds) < 1: if memcache.add(str(r.key), "running"): result = urlfetch.fetch(r.url) r.next_run = self._get_next_run(pickle.loads(r.cron_compiled)) r.put() memcache.delete(str(r.key)) else: break def add_cron(self, cron_string): cron = cron_string.split(" ") if len(cron) is not 6: raise ValueError, 'Invalid cron string. Format: * * * * * url' cron = { 'min': cron[0], 'hour': cron[1], 'day': cron[2], 'mon': cron[3], 'dow': cron[4], 'url': cron[5], } cron_compiled = self._validate_cron(cron) next_run = self._get_next_run(cron_compiled) cron_entry = _AppEngineUtilities_Cron() cron_entry.cron_entry = cron_string cron_entry.next_run = next_run cron_entry.cron_compiled = pickle.dumps(cron_compiled) cron_entry.url = cron["url"] cron_entry.put() def _validate_cron(self, cron): """ Parse the field to determine whether it is an integer or lists, also converting strings to integers where necessary. If passed bad values, raises a ValueError. """ parsers = { 'dow': self._validate_dow, 'mon': self._validate_mon, 'day': self._validate_day, 'hour': self._validate_hour, 'min': self._validate_min, 'url': self. _validate_url, } for el in cron: parse = parsers[el] cron[el] = parse(cron[el]) return cron def _validate_type(self, v, t): """ Validates that the number (v) passed is in the correct range for the type (t). Raise ValueError, if validation fails. Valid ranges: day of week = 0-7 month = 1-12 day = 1-31 hour = 0-23 minute = 0-59 All can * which will then return the range for that entire type. """ if t == "dow": if v >= 0 and v <= 7: return [v] elif v == "*": return "*" else: raise ValueError, "Invalid day of week." elif t == "mon": if v >= 1 and v <= 12: return [v] elif v == "*": return range(1, 12) else: raise ValueError, "Invalid month." elif t == "day": if v >= 1 and v <= 31: return [v] elif v == "*": return range(1, 31) else: raise ValueError, "Invalid day." elif t == "hour": if v >= 0 and v <= 23: return [v] elif v == "*": return range(0, 23) else: raise ValueError, "Invalid hour." elif t == "min": if v >= 0 and v <= 59: return [v] elif v == "*": return range(0, 59) else: raise ValueError, "Invalid minute." def _validate_list(self, l, t): """ Validates a crontab list. Lists are numerical values seperated by a comma with no spaces. Ex: 0,5,10,15 Arguments: l: comma seperated list of numbers t: type used for validation, valid values are dow, mon, day, hour, min """ elements = l.split(",") return_list = [] # we have a list, validate all of them for e in elements: if "-" in e: return_list.extend(self._validate_range(e, t)) else: try: v = int(e) self._validate_type(v, t) return_list.append(v) except: raise ValueError, "Names are not allowed in lists." # return a list of integers return return_list def _validate_range(self, r, t): """ Validates a crontab range. Ranges are 2 numerical values seperated by a dash with no spaces. Ex: 0-10 Arguments: r: dash seperated list of 2 numbers t: type used for validation, valid values are dow, mon, day, hour, min """ elements = r.split('-') # a range should be 2 elements if len(elements) is not 2: raise ValueError, "Invalid range passed: " + str(r) # validate the minimum and maximum are valid for the type for e in elements: self._validate_type(int(e), t) # return a list of the numbers in the range. # +1 makes sure the end point is included in the return value return range(int(elements[0]), int(elements[1]) + 1) def _validate_step(self, s, t): """ Validates a crontab step. Steps are complicated. They can be based on a range 1-10/2 or just step through all valid */2. When parsing times you should always check for step first and see if it has a range or not, before checking for ranges because this will handle steps of ranges returning the final list. Steps of lists is not supported. Arguments: s: slash seperated string t: type used for validation, valid values are dow, mon, day, hour, min """ elements = s.split('/') # a range should be 2 elements if len(elements) is not 2: raise ValueError, "Invalid step passed: " + str(s) try: step = int(elements[1]) except: raise ValueError, "Invalid step provided " + str(s) r_list = [] # if the first element is *, use all valid numbers if elements[0] is "*" or elements[0] is "": r_list.extend(self._validate_type('*', t)) # check and see if there is a list of ranges elif "," in elements[0]: ranges = elements[0].split(",") for r in ranges: # if it's a range, we need to manage that if "-" in r: r_list.extend(self._validate_range(r, t)) else: try: r_list.extend(int(r)) except: raise ValueError, "Invalid step provided " + str(s) elif "-" in elements[0]: r_list.extend(self._validate_range(elements[0], t)) return range(r_list[0], r_list[-1] + 1, step) def _validate_dow(self, dow): """ """ # if dow is * return it. This is for date parsing where * does not mean # every day for crontab entries. if dow is "*": return dow days = { 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6, # per man crontab sunday can be 0 or 7. 'sun': [0, 7], } if dow in days: dow = days[dow] return [dow] # if dow is * return it. This is for date parsing where * does not mean # every day for crontab entries. elif dow is "*": return dow elif "/" in dow: return(self._validate_step(dow, "dow")) elif "," in dow: return(self._validate_list(dow, "dow")) elif "-" in dow: return(self._validate_range(dow, "dow")) else: valid_numbers = range(0, 8) if not int(dow) in valid_numbers: raise ValueError, "Invalid day of week " + str(dow) else: return [int(dow)] def _validate_mon(self, mon): months = { 'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12, } if mon in months: mon = months[mon] return [mon] elif mon is "*": return range(1, 13) elif "/" in mon: return(self._validate_step(mon, "mon")) elif "," in mon: return(self._validate_list(mon, "mon")) elif "-" in mon: return(self._validate_range(mon, "mon")) else: valid_numbers = range(1, 13) if not int(mon) in valid_numbers: raise ValueError, "Invalid month " + str(mon) else: return [int(mon)] def _validate_day(self, day): if day is "*": return range(1, 32) elif "/" in day: return(self._validate_step(day, "day")) elif "," in day: return(self._validate_list(day, "day")) elif "-" in day: return(self._validate_range(day, "day")) else: valid_numbers = range(1, 31) if not int(day) in valid_numbers: raise ValueError, "Invalid day " + str(day) else: return [int(day)] def _validate_hour(self, hour): if hour is "*": return range(0, 24) elif "/" in hour: return(self._validate_step(hour, "hour")) elif "," in hour: return(self._validate_list(hour, "hour")) elif "-" in hour: return(self._validate_range(hour, "hour")) else: valid_numbers = range(0, 23) if not int(hour) in valid_numbers: raise ValueError, "Invalid hour " + str(hour) else: return [int(hour)] def _validate_min(self, min): if min is "*": return range(0, 60) elif "/" in min: return(self._validate_step(min, "min")) elif "," in min: return(self._validate_list(min, "min")) elif "-" in min: return(self._validate_range(min, "min")) else: valid_numbers = range(0, 59) if not int(min) in valid_numbers: raise ValueError, "Invalid min " + str(min) else: return [int(min)] def _validate_url(self, url): # kludge for issue 842, right now we use request headers # to set the host. if url[0] is not "/": url = "/" + url url = 'http://' + str(os.environ['HTTP_HOST']) + url return url # content below is for when that issue gets fixed #regex = re.compile("^(http|https):\/\/([a-z0-9-]\.+)*", re.IGNORECASE) #if regex.match(url) is not None: # return url #else: # raise ValueError, "Invalid url " + url def _calc_month(self, next_run, cron): while True: if cron["mon"][-1] < next_run.month: next_run = next_run.replace(year=next_run.year+1, \ month=cron["mon"][0], \ day=1,hour=0,minute=0) else: if next_run.month in cron["mon"]: return next_run else: one_month = datetime.timedelta(months=1) next_run = next_run + one_month def _calc_day(self, next_run, cron): # start with dow as per cron if dow and day are set # then dow is used if it comes before day. If dow # is *, then ignore it. if str(cron["dow"]) != str("*"): # convert any integers to lists in order to easily compare values m = next_run.month while True: if next_run.month is not m: next_run = next_run.replace(hour=0, minute=0) next_run = self._calc_month(next_run, cron) if next_run.weekday() in cron["dow"] or next_run.day in cron["day"]: return next_run else: one_day = datetime.timedelta(days=1) next_run = next_run + one_day else: m = next_run.month while True: if next_run.month is not m: next_run = next_run.replace(hour=0, minute=0) next_run = self._calc_month(next_run, cron) # if cron["dow"] is next_run.weekday() or cron["day"] is next_run.day: if next_run.day in cron["day"]: return next_run else: one_day = datetime.timedelta(days=1) next_run = next_run + one_day def _calc_hour(self, next_run, cron): m = next_run.month d = next_run.day while True: if next_run.month is not m: next_run = next_run.replace(hour=0, minute=0) next_run = self._calc_month(next_run, cron) if next_run.day is not d: next_run = next_run.replace(hour=0) next_run = self._calc_day(next_run, cron) if next_run.hour in cron["hour"]: return next_run else: m = next_run.month d = next_run.day one_hour = datetime.timedelta(hours=1) next_run = next_run + one_hour def _calc_minute(self, next_run, cron): one_minute = datetime.timedelta(minutes=1) m = next_run.month d = next_run.day h = next_run.hour while True: if next_run.month is not m: next_run = next_run.replace(minute=0) next_run = self._calc_month(next_run, cron) if next_run.day is not d: next_run = next_run.replace(minute=0) next_run = self._calc_day(next_run, cron) if next_run.hour is not h: next_run = next_run.replace(minute=0) next_run = self._calc_day(next_run, cron) if next_run.minute in cron["min"]: return next_run else: m = next_run.month d = next_run.day h = next_run.hour next_run = next_run + one_minute def _get_next_run(self, cron): one_minute = datetime.timedelta(minutes=1) # go up 1 minute because it shouldn't happen right when added now = datetime.datetime.now() + one_minute next_run = now.replace(second=0, microsecond=0) # start with month, which will also help calculate year next_run = self._calc_month(next_run, cron) next_run = self._calc_day(next_run, cron) next_run = self._calc_hour(next_run, cron) next_run = self._calc_minute(next_run, cron) return next_run
Python
""" Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from google.appengine.ext import db from cache import Cache class Paginator(object): """ This class is used for maintaining pagination objects. """ @classmethod def get(cls, count=10, q_filters={}, search=None, start=None, model=None, \ order='ASC', order_by='__key__'): """ get queries the database on model, starting with key, ordered by order. It receives count + 1 items, returning count and setting a next field to the count + 1 item key. It then reverses the sort, and grabs count objects, returning the last as a the previous. Arguments: count: The amount of entries to pull on query q_filter: The filter value (optional) search: Search is used for SearchableModel searches start: The key to start the page from model: The Model object to query against. This is not a string, it must be a Model derived object. order: The order in which to pull the values. order_by: The attribute to order results by. This defaults to __key__ Returns a dict: { 'next': next_key, 'prev': prev_key, 'items': entities_pulled } """ # argument validation if model == None: raise ValueError('You must pass a model to query') # a valid model object will have a gql method. if callable(model.gql) == False: raise TypeError('model must be a valid model object.') # cache check cache_string = "gae_paginator_" for q_filter in q_filters: cache_string = cache_string + q_filter + "_" + q_filters[q_filter] + "_" cache_string = cache_string + "index" c = Cache() if c.has_key(cache_string): return c[cache_string] # build query query = model.all() if len(q_filters) > 0: for q_filter in q_filters: query.filter(q_filter + " = ", q_filters[q_filter]) if start: if order.lower() == "DESC".lower(): query.filter(order_by + " <", start) else: query.filter(order_by + " >", start) if search: query.search(search) if order.lower() == "DESC".lower(): query.order("-" + order_by) else: query.order(order_by) results = query.fetch(count + 1) if len(results) == count + 1: next = getattr(results[count - 1], order_by) # reverse the query to get the value for previous if start is not None: rquery = model.all() for q_filter in q_filters: rquery.filter(q_filter + " = ", q_filters[q_filter]) if search: query.search(search) if order.lower() == "DESC".lower(): rquery.order(order_by) else: rquery.order("-" + order_by) rresults = rquery.fetch(count) previous = getattr(results[0], order_by) else: previous = None else: next = None return { "results": results, "next": next, "previous": previous }
Python
import Cookie import os from appengine_utilities import sessions class SessionMiddleware(object): TEST_COOKIE_NAME = 'testcookie' TEST_COOKIE_VALUE = 'worked' def process_request(self, request): """ Check to see if a valid session token exists, if not, then use a cookie only session. It's up to the application to convert the session to a datastore session. Once this has been done, the session will continue to use the datastore unless the writer is set to "cookie". Setting the session to use the datastore is as easy as resetting request.session anywhere if your application. Example: from common.appengine_utilities import sessions request.session = sessions.Session() """ self.request = request if sessions.Session.check_token(): request.session = sessions.Session() else: request.session = sessions.Session(writer="cookie") request.session.set_test_cookie = self.set_test_cookie request.session.test_cookie_worked = self.test_cookie_worked request.session.delete_test_cookie = self.delete_test_cookie request.session.save = self.save def set_test_cookie(self): string_cookie = os.environ.get('HTTP_COOKIE', '') self.cookie = Cookie.SimpleCookie() self.cookie.load(string_cookie) self.cookie[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE print self.cookie def test_cookie_worked(self): string_cookie = os.environ.get('HTTP_COOKIE', '') self.cookie = Cookie.SimpleCookie() self.cookie.load(string_cookie) return self.cookie.get(self.TEST_COOKIE_NAME) def delete_test_cookie(self): string_cookie = os.environ.get('HTTP_COOKIE', '') self.cookie = Cookie.SimpleCookie() self.cookie.load(string_cookie) self.cookie[self.TEST_COOKIE_NAME] = '' self.cookie[self.TEST_COOKIE_NAME]['path'] = '/' self.cookie[self.TEST_COOKIE_NAME]['expires'] = 0 def save(self): self.request.session = sessions.Session()
Python
""" Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import __main__ class Event(object): """ Event is a simple publish/subscribe based event dispatcher It sets itself to the __main__ function. In order to use it, you must import it and __main__ """ def __init__(self): self.events = [] def subscribe(self, event, callback, args = None): """ This method will subscribe a callback function to an event name. """ if not {"event": event, "callback": callback, "args": args, } \ in self.events: self.events.append({"event": event, "callback": callback, \ "args": args, }) def unsubscribe(self, event, callback, args = None): """ This method will unsubscribe a callback from an event. """ if {"event": event, "callback": callback, "args": args, }\ in self.events: self.events.remove({"event": event, "callback": callback,\ "args": args, }) def fire_event(self, event = None): """ This method is what a method uses to fire an event, initiating all registered callbacks """ for e in self.events: if e["event"] == event: if type(e["args"]) == type([]): e["callback"](*e["args"]) elif type(e["args"]) == type({}): e["callback"](**e["args"]) elif e["args"] == None: e["callback"]() else: e["callback"](e["args"]) """ Assign to the event class to __main__ """ __main__.AEU_Events = Event()
Python
""" Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os import sys import Cookie import pickle from time import strftime from django.utils import simplejson COOKIE_NAME = 'appengine-utilities-flash' class Flash(object): """ Send messages to the user between pages. When you instantiate the class, the attribute 'msg' will be set from the cookie, and the cookie will be deleted. If there is no flash cookie, 'msg' will default to None. To set a flash message for the next page, simply set the 'msg' attribute. Example psuedocode: if new_entity.put(): flash = Flash() flash.msg = 'Your new entity has been created!' return redirect_to_entity_list() Then in the template on the next page: {% if flash.msg %} <div class="flash-msg">{{ flash.msg }}</div> {% endif %} """ def __init__(self, cookie=None): """ Load the flash message and clear the cookie. """ self.no_cache_headers() # load cookie if cookie is None: browser_cookie = os.environ.get('HTTP_COOKIE', '') self.cookie = Cookie.SimpleCookie() self.cookie.load(browser_cookie) else: self.cookie = cookie # check for flash data if self.cookie.get(COOKIE_NAME): # set 'msg' attribute cookie_val = self.cookie[COOKIE_NAME].value # we don't want to trigger __setattr__(), which creates a cookie try: self.__dict__['msg'] = simplejson.loads(cookie_val) except: # not able to load the json, so do not set message. This should # catch for when the browser doesn't delete the cookie in time for # the next request, and only blanks out the content. pass # clear the cookie self.cookie[COOKIE_NAME] = '' self.cookie[COOKIE_NAME]['path'] = '/' self.cookie[COOKIE_NAME]['expires'] = 0 print self.cookie[COOKIE_NAME] else: # default 'msg' attribute to None self.__dict__['msg'] = None def __setattr__(self, name, value): """ Create a cookie when setting the 'msg' attribute. """ if name == 'cookie': self.__dict__['cookie'] = value elif name == 'msg': self.__dict__['msg'] = value self.__dict__['cookie'][COOKIE_NAME] = simplejson.dumps(value) self.__dict__['cookie'][COOKIE_NAME]['path'] = '/' print self.cookie else: raise ValueError('You can only set the "msg" attribute.') def no_cache_headers(self): """ Adds headers, avoiding any page caching in the browser. Useful for highly dynamic sites. """ print "Expires: Tue, 03 Jul 2001 06:00:00 GMT" print strftime("Last-Modified: %a, %d %b %y %H:%M:%S %Z") print "Cache-Control: no-store, no-cache, must-revalidate, max-age=0" print "Cache-Control: post-check=0, pre-check=0" print "Pragma: no-cache"
Python
""" Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from google.appengine.ext import db class ROTModel(db.Model): """ ROTModel overrides the db.Model put function, having it retry up to 3 times when it encounters a datastore timeout. This is to try an maximize the chance the data makes it into the datastore when attempted. If it fails, it raises the db.Timeout error and the calling application will need to handle that. """ def put(self): count = 0 while count < 3: try: return db.Model.put(self) except db.Timeout: count += 1 else: raise db.Timeout()
Python
""" Copyright (c) 2008, appengine-utilities project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the appengine-utilities project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # main python imports import datetime import pickle import random import __main__ # google appengine import from google.appengine.ext import db from google.appengine.api import memcache # settings DEFAULT_TIMEOUT = 3600 # cache expires after one hour (3600 sec) CLEAN_CHECK_PERCENT = 50 # 15% of all requests will clean the database MAX_HITS_TO_CLEAN = 100 # the maximum number of cache hits to clean on attempt class _AppEngineUtilities_Cache(db.Model): # It's up to the application to determine the format of their keys cachekey = db.StringProperty() createTime = db.DateTimeProperty(auto_now_add=True) timeout = db.DateTimeProperty() value = db.BlobProperty() class Cache(object): """ Cache is used for storing pregenerated output and/or objects in the Big Table datastore to minimize the amount of queries needed for page displays. The idea is that complex queries that generate the same results really should only be run once. Cache can be used to store pregenerated value made from queries (or other calls such as urlFetch()), or the query objects themselves. """ def __init__(self, clean_check_percent = CLEAN_CHECK_PERCENT, max_hits_to_clean = MAX_HITS_TO_CLEAN, default_timeout = DEFAULT_TIMEOUT): """ Initializer Args: clean_check_percent: how often cache initialization should run the cache cleanup max_hits_to_clean: maximum number of stale hits to clean default_timeout: default length a cache item is good for """ self.clean_check_percent = clean_check_percent self.max_hits_to_clean = max_hits_to_clean self.default_timeout = default_timeout if random.randint(1, 100) < self.clean_check_percent: self._clean_cache() if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheInitialized') def _clean_cache(self): """ _clean_cache is a routine that is run to find and delete cache items that are old. This helps keep the size of your over all datastore down. """ query = _AppEngineUtilities_Cache.all() query.filter('timeout < ', datetime.datetime.now()) results = query.fetch(self.max_hits_to_clean) db.delete(results) #for result in results: # result.delete() def _validate_key(self, key): if key == None: raise KeyError def _validate_value(self, value): if value == None: raise ValueError def _validate_timeout(self, timeout): if timeout == None: timeout = datetime.datetime.now() +\ datetime.timedelta(seconds=DEFAULT_TIMEOUT) if type(timeout) == type(1): timeout = datetime.datetime.now() + \ datetime.timedelta(seconds = timeout) if type(timeout) != datetime.datetime: raise TypeError if timeout < datetime.datetime.now(): raise ValueError return timeout def add(self, key = None, value = None, timeout = None): """ add adds an entry to the cache, if one does not already exist. """ self._validate_key(key) self._validate_value(value) timeout = self._validate_timeout(timeout) if key in self: raise KeyError cacheEntry = _AppEngineUtilities_Cache() cacheEntry.cachekey = key cacheEntry.value = pickle.dumps(value) cacheEntry.timeout = timeout cacheEntry.put() memcache_timeout = timeout - datetime.datetime.now() memcache.set('cache-'+key, value, int(memcache_timeout.seconds)) if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheAdded') def set(self, key = None, value = None, timeout = None): """ add adds an entry to the cache, overwriting an existing value if one already exists. """ self._validate_key(key) self._validate_value(value) timeout = self._validate_timeout(timeout) cacheEntry = self._read(key) if not cacheEntry: cacheEntry = _AppEngineUtilities_Cache() cacheEntry.cachekey = key cacheEntry.value = pickle.dumps(value) cacheEntry.timeout = timeout cacheEntry.put() memcache_timeout = timeout - datetime.datetime.now() memcache.set('cache-'+key, value, int(memcache_timeout.seconds)) if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheSet') def _read(self, key = None): """ _read returns a cache object determined by the key. It's set to private because it returns a db.Model object, and also does not handle the unpickling of objects making it not the best candidate for use. The special method __getitem__ is the preferred access method for cache data. """ query = _AppEngineUtilities_Cache.all() query.filter('cachekey', key) query.filter('timeout > ', datetime.datetime.now()) results = query.fetch(1) if len(results) is 0: return None return results[0] if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheReadFromDatastore') if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheRead') def delete(self, key = None): """ Deletes a cache object determined by the key. """ memcache.delete('cache-'+key) result = self._read(key) if result: if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheDeleted') result.delete() def get(self, key): """ get is used to return the cache value associated with the key passed. """ mc = memcache.get('cache-'+key) if mc: if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheReadFromMemcache') if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheRead') return mc result = self._read(key) if result: timeout = result.timeout - datetime.datetime.now() # print timeout.seconds memcache.set('cache-'+key, pickle.loads(result.value), int(timeout.seconds)) return pickle.loads(result.value) else: raise KeyError def get_many(self, keys): """ Returns a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict. """ dict = {} for key in keys: value = self.get(key) if value is not None: dict[key] = val return dict def __getitem__(self, key): """ __getitem__ is necessary for this object to emulate a container. """ return self.get(key) def __setitem__(self, key, value): """ __setitem__ is necessary for this object to emulate a container. """ return self.set(key, value) def __delitem__(self, key): """ Implement the 'del' keyword """ return self.delete(key) def __contains__(self, key): """ Implements "in" operator """ try: r = self.__getitem__(key) except KeyError: return False return True def has_key(self, keyname): """ Equivalent to k in a, use that form in new code """ return self.__contains__(keyname)
Python
#-*- coding: utf-8 -*- from google.appengine.api import users from models.main import App_user from django.http import HttpResponse, HttpResponseRedirect import datetime from logics.main import get_user, set_user def logout(request): url = request.GET.get('url', '/') request.session['email'] = '' return HttpResponseRedirect(users.create_logout_url(url)) class AuthMiddleware(object): def process_request(self, request): #先判断session user_email = request.session.get('email') app_user = None if user_email: app_user = App_user.all().filter('email', user_email).get() else: u = users.get_current_user() if u: user_email = u.email() nickname = u.nickname() is_admin = users.is_current_user_admin() if is_admin: level = 10 else: level = 1 app_user = App_user.all().filter('email', user_email).get() if app_user is None: app_user = App_user(email = user_email, nickname = nickname, \ is_verified = True, add_time = datetime.datetime.utcnow()) app_user.level = level app_user.put() request.session['email'] = user_email request.user = app_user set_user(app_user)
Python
from django.conf import settings def bind_settings(request): return { 'settings': settings }
Python
#-*- coding: utf-8 -*- from django.http import HttpResponsePermanentRedirect from django.conf import settings class Domain_transMiddleware(object): def process_request(self, request): base_url = settings.BASE_URL domain = settings.DOMAIN if request.META['SERVER_NAME']!='localhost' and request.META['SERVER_NAME']!=domain: if request.META['QUERY_STRING']: new_url = base_url + request.path +'?'+ request.META['QUERY_STRING'] else: new_url = base_url + request.path return HttpResponsePermanentRedirect(new_url)
Python
#-*- coding: utf-8 -*- from django import template from models.main import App_user, Post, Comment, Tag, Sidebar from django.template import Node, NodeList, Template, Context, Variable register = template.Library() class Sidebar_c(template.Node): def __init__(self, nodelist_loop): self.nodelist_loop = nodelist_loop def render(self, context): nodelist = NodeList() sidebar_list = Sidebar.all().order('order').fetch(100) context.push() for sidebar in sidebar_list: context.push() context['sidebar'] = sidebar for node in self.nodelist_loop: nodelist.append(node.render(context)) context.pop() context.pop() return nodelist.render(context) @register.tag(name="sidebar_list") def do_sidebar(parser, token): """ {% sidebar_list %} """ nodelist_loop = parser.parse(('endsidebar_list',)) parser.delete_first_token() return Sidebar_c(nodelist_loop) class Space_line_lessNode(Node): def __init__(self, nodelist): self.nodelist = nodelist def render(self, context): from django.utils.html import strip_spaces_between_tags html = strip_spaces_between_tags(self.nodelist.render(context).strip()) html = html.replace('\r\n', '').replace('\n', '') return html @register.tag(name="space_line_less") def space_line_less(parser, token): nodelist = parser.parse(('endspace_line_less',)) parser.delete_first_token() return Space_line_lessNode(nodelist)
Python
#coding=utf-8 from django import template from django.template.defaultfilters import stringfilter import datetime register = template.Library() from common import time_utils from common.utils import process_html import hashlib from google.appengine.api import users def make_list(start, end): """ {{ start|make_list:end}} """ return range(start, end) register.filter('make_list', make_list) def human_time(target_time): return time_utils.human_time(target_time) register.filter('human_time', human_time) def format_time(target_time): return time_utils.time_str(target_time) register.filter('format_time', format_time) def rfc3339_time_str(dt): return time_utils.rfc3339_time_str(dt) register.filter('rfc3339_time_str', rfc3339_time_str) def date_str(target_time): return time_utils.date_str(target_time) register.filter('date_str', date_str) def only_time_str(target_time): return time_utils.only_time_str(target_time) register.filter('only_time_str', only_time_str) def mod(value, divisor = 2): """ {{ value|make_list:divisor}} """ return (value%divisor == 0) register.filter('mod', mod) def logout_url(url): return '/logout/?url=%s' % url register.filter('logout_url', logout_url) def login_url(url): return users.create_login_url(url) register.filter('login_url', login_url) def process_html_show(html): return process_html(html) register.filter('process_html_show', process_html_show) def md5(s): return hashlib.md5(s).hexdigest() register.filter('md5', md5) def cover_space(s, autoescape=None): return s.replace(' ', '&nbsp;') cover_space.is_safe = True cover_space.needs_autoescape = True register.filter('cover_space', cover_space)
Python
#-*- coding: utf-8 -*- import traceback, sys, cStringIO import datetime, time, cgi import pytz from django.conf import settings def get_utc_time(dt = None): if dt is None: dt = datetime.datetime.utcnow() if dt.tzinfo is None: dt = dt.replace(tzinfo = pytz.utc) if dt.tzname()!='UTC': dt = dt.astimezone(pytz.utc) return dt def get_local_time(dt = None): utc_dt = get_utc_time(dt) l_u_dt = utc_dt.astimezone(pytz.timezone(settings.TIME_ZONE)) return l_u_dt def date_str(dt = None): if dt is None: dt = datetime.datetime.utcnow() if isinstance(dt, datetime.datetime): dt = get_local_time(dt) return dt.strftime('%Y-%m-%d') def time_str(dt = None): dt = get_local_time(dt) return dt.strftime('%Y-%m-%d %H:%M:%S %z') def rfc3339_time_str(dt = None): dt = get_utc_time(dt) return dt.strftime('%Y-%m-%dT%H:%M:%SZ') def only_time_str(dt = None): dt = get_local_time(dt) return dt.strftime('%H:%M:%S') def get_time(dt = None): utc_dt = get_utc_time(dt) t = (utc_dt - datetime(1970, 1, 1, 0, 0, tzinfo = pytz.utc)) t = float(t.days*86400+t.seconds+t.microseconds/1000000.0) return t def parse_date(d): return datetime.date(*map(int, d.split('-'))) def parse_time(d): t = time.strptime(d, '%Y-%m-%d %H:%M:%S') dt = datetime.datetime(*(t[0:6]), **({'tzinfo':get_local_time().tzinfo})) dt = get_utc_time(dt) return dt def human_time(target_time): now = datetime.datetime.utcnow() tdelta = now - target_time if tdelta.days < 0: ret_time = u"就在刚才" else: if tdelta.days > 0: ret_time = u"%d天前" % tdelta.days elif (tdelta.seconds / (60*60)) > 0: ret_time = u"%d小时前" % (tdelta.seconds / (60*60)) elif (tdelta.seconds / (60)) > 0: ret_time = u"%d分钟前" % (tdelta.seconds / 60) else: ret_time = u"就在刚才" return ret_time
Python
#-*- coding: utf-8 -*- import traceback, sys, cStringIO import datetime, time, cgi import pytz, re from django.conf import settings from BeautifulSoup import BeautifulSoup def get_err(): f = cStringIO.StringIO( ) traceback.print_exc(file=f) return f.getvalue( ) def print_err(): sys.stderr.write('err time: '+str(datetime.datetime.utcnow())) traceback.print_exc(file=sys.stderr) def gbk2utf8(s): if type(s) == unicode: return s.encode('utf8') return s.decode('gb18030').encode('utf8') def utf82gbk(s): if type(s) == unicode: return s.encode('gb18030') return s.decode('utf8').encode('gb18030') def get_mod(module_name): mod = __import__(module_name) components = module_name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod def escape_dict(d): for k in d: if isinstance(d[k], str): d[k] = cgi.escape(d[k]) return d def ipdumps(ip): if ip: p = [int(a) for a in ip.split('.')] return p[3]+ p[2]*256+ p[1]*256*256+ p[0]*256*256*256 else: return None def process_html(html): code_tag = re.compile('\s*<pre name="code" class="([^"]+)">', re.MULTILINE) soup = BeautifulSoup(html) clean_html = '' for section in soup.contents: txt = unicode(section) matchobj = re.match(code_tag, txt) if matchobj: clean_html += re.sub(r'<br />', "\n", txt) else: clean_html += txt return clean_html
Python
from google.appengine.api import users from logics.main import get_user, set_user from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden def role_required(role): def wrapper(handler_method): def check_login(request, *args, **kwargs): user = get_user() if not user: if request.method != 'GET': return HttpResponseForbidden() else: return HttpResponseRedirect(users.create_login_url(request.path)) elif role == "user" or (role == "admin" and user.level == 10): return handler_method(request, *args, **kwargs) else: if request.method == 'GET': return HttpResponseForbidden() else: return HttpResponseForbidden() # User didn't meet role. return check_login return wrapper
Python
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2008, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.0.7a" __copyright__ = "Copyright (c) 2004-2008 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import markupbase import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" # First, the classes that represent markup elements. class PageElement: """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.contents.index(self) if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: # We're replacing this element with one of its siblings. index = self.parent.contents.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: self.parent.contents.remove(self) except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if (isinstance(newChild, basestring) or isinstance(newChild, unicode)) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent != None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent == self: index = self.find(newChild) if index and index < position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name else: # Build a SoupStrainer strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i: i = i.next yield i def nextSiblingGenerator(self): i = self while i: i = i.nextSibling yield i def previousGenerator(self): i = self while i: i = i.previous yield i def previousSiblingGenerator(self): i = self while i: i = i.previousSibling yield i def parentGenerator(self): i = self while i: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs == None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isString(val): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" contents = [i for i in self.contents] for i in contents: if isinstance(i, Tag): i.decompose() else: i.extract() self.extract() def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): for i in range(0, len(self.contents)): yield self.contents[i] raise StopIteration def recursiveChildGenerator(self): stack = [(self, 0)] while stack: tag, start = stack.pop() if isinstance(tag, Tag): for i in range(start, len(tag.contents)): a = tag.contents[i] yield a if isinstance(a, Tag) and tag.contents: if i < len(tag.contents) - 1: stack.append((tag, i+1)) stack.append((a, 0)) break raise StopIteration # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isString(attrs): kwargs['class'] = attrs attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if isList(markup) and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isString(markup): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst == True and type(matchAgainst) == types.BooleanType: result = markup != None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isString(markup): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif isList(matchAgainst): result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isString(markup): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def isList(l): """Convenience method that works with all 2.x versions of Python to determine whether or not something is listlike.""" return hasattr(l, '__iter__') \ or (type(l) in (types.ListType, types.TupleType)) def isString(s): """Convenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.""" try: return isinstance(s, unicode) or isinstance(s, basestring) except NameError: return isinstance(s, str) def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif isList(portion): #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not isList(self.markupMassage): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ or methodName.find('do_') == 0: return SGMLParser.__getattr__(self, methodName) elif methodName.find('__') != 0: return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() # Tags with just one string-owning child get the child as a # 'string' property, so that soup.tag.string is shorthand for # soup.tag.contents[0] if len(self.currentTag.contents) == 1 and \ isinstance(self.currentTag.contents[0], NavigableString): self.currentTag.string = self.currentTag.contents[0] #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers != None and p.name in nestingResetTriggers) \ or (nestingResetTriggers == None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base']) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center'] #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big'] I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if type(sub) == types.TupleType: if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
Python
#!/usr/bin/python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ An interactive, stateful AJAX shell that runs Python code on the server. Part of http://code.google.com/p/google-app-engine-samples/. May be run as a standalone app or in an existing app as an admin-only handler. Can be used for system administration tasks, as an interactive way to try out APIs, or as a debugging aid during development. The logging, os, sys, db, and users modules are imported automatically. Interpreter state is stored in the datastore so that variables, function definitions, and other values in the global and local namespaces can be used across commands. To use the shell in your app, copy shell.py, static/*, and templates/* into your app's source directory. Then, copy the URL handlers from app.yaml into your app.yaml. TODO: unit tests! """ import logging import new import os import pickle import sys import traceback import types import wsgiref.handlers from google.appengine.api import users from google.appengine.ext import db from google.appengine.ext import webapp from google.appengine.ext.webapp import template # Set to True if stack traces should be shown in the browser, etc. _DEBUG = True # The entity kind for shell sessions. Feel free to rename to suit your app. _SESSION_KIND = '_Shell_Session' # Types that can't be pickled. UNPICKLABLE_TYPES = ( types.ModuleType, types.TypeType, types.ClassType, types.FunctionType, ) # Unpicklable statements to seed new sessions with. INITIAL_UNPICKLABLES = [ 'import logging', 'import os', 'import sys', 'from google.appengine.ext import db', 'from google.appengine.api import users', ] class Session(db.Model): """A shell session. Stores the session's globals. Each session globals is stored in one of two places: If the global is picklable, it's stored in the parallel globals and global_names list properties. (They're parallel lists to work around the unfortunate fact that the datastore can't store dictionaries natively.) If the global is not picklable (e.g. modules, classes, and functions), or if it was created by the same statement that created an unpicklable global, it's not stored directly. Instead, the statement is stored in the unpicklables list property. On each request, before executing the current statement, the unpicklable statements are evaluated to recreate the unpicklable globals. The unpicklable_names property stores all of the names of globals that were added by unpicklable statements. When we pickle and store the globals after executing a statement, we skip the ones in unpicklable_names. Using Text instead of string is an optimization. We don't query on any of these properties, so they don't need to be indexed. """ global_names = db.ListProperty(db.Text) globals = db.ListProperty(db.Blob) unpicklable_names = db.ListProperty(db.Text) unpicklables = db.ListProperty(db.Text) def set_global(self, name, value): """Adds a global, or updates it if it already exists. Also removes the global from the list of unpicklable names. Args: name: the name of the global to remove value: any picklable value """ blob = db.Blob(pickle.dumps(value)) if name in self.global_names: index = self.global_names.index(name) self.globals[index] = blob else: self.global_names.append(db.Text(name)) self.globals.append(blob) self.remove_unpicklable_name(name) def remove_global(self, name): """Removes a global, if it exists. Args: name: string, the name of the global to remove """ if name in self.global_names: index = self.global_names.index(name) del self.global_names[index] del self.globals[index] def globals_dict(self): """Returns a dictionary view of the globals. """ return dict((name, pickle.loads(val)) for name, val in zip(self.global_names, self.globals)) def add_unpicklable(self, statement, names): """Adds a statement and list of names to the unpicklables. Also removes the names from the globals. Args: statement: string, the statement that created new unpicklable global(s). names: list of strings; the names of the globals created by the statement. """ self.unpicklables.append(db.Text(statement)) for name in names: self.remove_global(name) if name not in self.unpicklable_names: self.unpicklable_names.append(db.Text(name)) def remove_unpicklable_name(self, name): """Removes a name from the list of unpicklable names, if it exists. Args: name: string, the name of the unpicklable global to remove """ if name in self.unpicklable_names: self.unpicklable_names.remove(name) class FrontPageHandler(webapp.RequestHandler): """Creates a new session and renders the shell.html template. """ def get(self): # set up the session. TODO: garbage collect old shell sessions session_key = self.request.get('session') if session_key: session = Session.get(session_key) else: # create a new session session = Session() session.unpicklables = [db.Text(line) for line in INITIAL_UNPICKLABLES] session_key = session.put() template_file = os.path.join(os.path.dirname(__file__), '../templates', 'shell.html') session_url = '/?session=%s' % session_key vars = { 'server_software': os.environ['SERVER_SOFTWARE'], 'python_version': sys.version, 'session': str(session_key), 'user': users.get_current_user(), 'login_url': users.create_login_url(session_url), 'logout_url': users.create_logout_url(session_url), } rendered = webapp.template.render(template_file, vars, debug=_DEBUG) self.response.out.write(rendered) class StatementHandler(webapp.RequestHandler): """Evaluates a python statement in a given session and returns the result. """ def get(self): self.response.headers['Content-Type'] = 'text/plain' # extract the statement to be run statement = self.request.get('statement') if not statement: return # the python compiler doesn't like network line endings statement = statement.replace('\r\n', '\n') # add a couple newlines at the end of the statement. this makes # single-line expressions such as 'class Foo: pass' evaluate happily. statement += '\n\n' # log and compile the statement up front try: logging.info('Compiling and evaluating:\n%s' % statement) compiled = compile(statement, '<string>', 'single') except: self.response.out.write(traceback.format_exc()) return # create a dedicated module to be used as this statement's __main__ statement_module = new.module('__main__') # use this request's __builtin__, since it changes on each request. # this is needed for import statements, among other things. import __builtin__ statement_module.__builtins__ = __builtin__ # load the session from the datastore session = Session.get(self.request.get('session')) # swap in our custom module for __main__. then unpickle the session # globals, run the statement, and re-pickle the session globals, all # inside it. old_main = sys.modules.get('__main__') try: sys.modules['__main__'] = statement_module statement_module.__name__ = '__main__' # re-evaluate the unpicklables for code in session.unpicklables: exec code in statement_module.__dict__ # re-initialize the globals for name, val in session.globals_dict().items(): try: statement_module.__dict__[name] = val except: msg = 'Dropping %s since it could not be unpickled.\n' % name self.response.out.write(msg) logging.warning(msg + traceback.format_exc()) session.remove_global(name) # run! old_globals = dict(statement_module.__dict__) try: old_stdout = sys.stdout old_stderr = sys.stderr try: sys.stdout = self.response.out sys.stderr = self.response.out exec compiled in statement_module.__dict__ finally: sys.stdout = old_stdout sys.stderr = old_stderr except: self.response.out.write(traceback.format_exc()) return # extract the new globals that this statement added new_globals = {} for name, val in statement_module.__dict__.items(): if name not in old_globals or val != old_globals[name]: new_globals[name] = val if True in [isinstance(val, UNPICKLABLE_TYPES) for val in new_globals.values()]: # this statement added an unpicklable global. store the statement and # the names of all of the globals it added in the unpicklables. session.add_unpicklable(statement, new_globals.keys()) logging.debug('Storing this statement as an unpicklable.') else: # this statement didn't add any unpicklables. pickle and store the # new globals back into the datastore. for name, val in new_globals.items(): if not name.startswith('__'): session.set_global(name, val) finally: sys.modules['__main__'] = old_main session.put() def main(): libs_path = os.path.abspath('./') django_path = os.path.abspath('django.zip') if django_path not in sys.path: sys.path.insert(0, django_path) if libs_path not in sys.path: sys.path.insert(0, libs_path) application = webapp.WSGIApplication( [('/shell/', FrontPageHandler), ('/shell/shell/', StatementHandler)], debug=_DEBUG) wsgiref.handlers.CGIHandler().run(application) if __name__ == '__main__': main()
Python
#-*- encoding: utf-8 -*- __all__ = ['fetch', 'get_opener', 'get_content_type'] """ One fetch web page tool with cookie and encode. History: 2008-05-20 16:00: ADD upload file provide 2008-06-04 17:12: add headers the fetch result 2008-06-06 14:17: add socks proxy provide 2009-04-29 15:58: remove common_refer """ import urllib2, cookielib, urllib, mimetypes, socket, httplib #from socks_proxy_inject import ProxyProcessor #import socks_proxy as socks def get_opener(): #socks_proxy=ProxyProcessor('127.0.0.1', 7070, socks.PROXY_TYPE_SOCKS5) #proxy_handler=urllib2.ProxyHandler({'http': '12.24.45.100:80'}) cookie_handler = urllib2.HTTPCookieProcessor(cookielib.CookieJar()) return urllib2.build_opener(cookie_handler) common_opener = get_opener() def open_request(url,data=None,headers=None,opener=None): if headers is None: headers = [] #if data:data=urllib.urlencode(data) request=urllib2.Request(url,data) request.add_header('User-Agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)') request.add_header("Accept-Encoding","gzip") #request.add_header('Connection','Keep-Alive') for k,v in headers: request.add_header(k,v) #if option.has_key('referer'): #request.add_header('Referer',option['referer']) #print 'start open server...' res=(opener or common_opener).open(request) #print 'open server end' return res def get_content_type(filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' def encode_multipart_formdata(fields, files): """ refer from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/572202 fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(urllib.quote_plus(str(value))) for (key, filename, value) in files: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, urllib.quote_plus(filename))) L.append('Content-Type: %s' % get_content_type(filename)) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body def fetch(url,encode=False,headers=None,opener=None,fields=None, files=None): """ 这样设置 headers=[] 的默认值有很大的问题,因为默认值只在函数建立的时候建立,以后都不会变化,那么header就会越来越长了 """ if headers is None: headers = [] data=None if files: t, d=encode_multipart_formdata(fields, files) headers.append(('Content-Type', t)) headers.append(('Content-Length', str(len(d)))) data=d elif fields: data=urllib.urlencode(fields) f=open_request(url,data,headers,opener) result={} #print 'start read data...' result['data'] = f.read() #print 'read data end' if hasattr(f, 'headers'): if f.headers.get('content-encoding', '') == 'gzip': import StringIO,gzip result['data'] = gzip.GzipFile(fileobj=StringIO.StringIO(result['data'])).read() #check encoding if encode: charset=None if hasattr(f, 'headers'): ct=f.headers.get('content-type') #print ct if ct: ct=ct.lower() i=ct.find('charset=') if i!=-1: charset=ct[i+len('charset='):] if charset=="gb2312": charset='gb18030' if charset: try: result['data']=result['data'].decode(charset) except: result['data']=deal_invalid_character(result['data'],charset) result['data']=result['data'].decode(charset) else: result['data']=encode_by_html(result['data']) if hasattr(f, 'url'): result['url'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status f.close() result['headers']=f.headers return result def encode_by_html(html): #<meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> import re mt=re.search(r'''<meta[^<>]+http-equiv[\s="']+Content-Type[^<>]*>''',html,re.I) charset=None if mt: mt=mt.group().lower() mmt=re.search(r'''charset\s*=\s*([^'"\s<>]+)''',mt) if mmt: charset=mmt.group(1) if charset=='gb2312': charset='gb18030' #print charset if not charset: return html.decode('us-ascii') else: try: return html.decode(charset) except Exception,err: html=deal_invalid_character(html,charset) return html.decode(charset) def deal_invalid_character(html,charset): import re while 1: try: html.decode(charset) except Exception,err: err_str=str(err) if err_str.find("codec can't decode bytes in position"): err_mt=re.search(r'(\d+)-(\d+)',err_str) if err_mt: html=html[0:int(err_mt.group(1))]+html[int(err_mt.group(1))+1:] else: return html else: return html else: return html def getHeaders(url,data=None,headers={}): f=open_request(url,data,headers) if hasattr(f, 'headers'): ct=f.headers.get('content-type') #'text/html; charset=UTF-8' f.close() else: ct=None return ct if __name__=='__main__': fields=[('albumId', '3364')] fdata=file("D:\IMG_0022.JPG","rb").read() files=[('Filedata','IMG_0022.JPG',fdata)] d=fetch('http://2.com:8000/album/uploadPic.do',fields=fields, files=files) print d
Python
#-*- coding: utf-8 -*- import socks_proxy as socks import browser_agent as ai import urllib2, os, threading, time def get_one_proxy_opener(p): #proxy=urllib2.ProxyHandler({'http':p[0]}) cookie=urllib2.HTTPCookieProcessor() #opener=urllib2.build_opener(proxy, cookie) opener=urllib2.build_opener(cookie) return opener def get_proxy_process(p): # ( #['23', '124.128.224.2', '1080', 'SOCKS4', '\xc5\xb7\xd6\xde', #'ProxyCN', '05-13', '20:00 #', '1.003', 'whois'], #4.812000036239624) p=p[0] if p[3]=='SOCKS5': proxy=ai.ProxyProcessor(p[1], int(p[2]), socks.PROXY_TYPE_SOCKS5) elif p[3]=='SOCKS4': proxy=ai.ProxyProcessor(p[1], int(p[2]), socks.PROXY_TYPE_SOCKS4) elif p[3]=='HTTP': proxy=ai.ProxyProcessor(p[1], int(p[2]), socks.PROXY_TYPE_HTTP) return proxy def test_proxy(s, p, success_proxys, thread_list): print 'test : ', p p=p.split() try: if p[3]=='SOCKS5': proxy=ai.ProxyProcessor(p[1], int(p[2]), socks.PROXY_TYPE_SOCKS5) elif p[3]=='SOCKS4': proxy=ai.ProxyProcessor(p[1], int(p[2]), socks.PROXY_TYPE_SOCKS4) elif p[3]=='HTTP': proxy=ai.ProxyProcessor(p[1], int(p[2]), socks.PROXY_TYPE_HTTP) opener=urllib2.build_opener(proxy) st=time.time() #res=ai.fetch('http://union.rekoo.com/test_proxy.do', fields=[('a','a')], opener=opener) res=ai.fetch('http://kf.51.com/help/contact.php', opener=opener) et=time.time() #print res['data'] #if res['data']=='HTTP_X_FORWARDED_FOR: ': if res['data'].find(u'联系方式'.encode('gbk'))!=-1: success_proxys.append((p, et-st)) #return res['data']=='HTTP_X_FORWARDED_FOR: ' except Exception,e: print e finally: s.release() thread_list.remove(threading.currentThread()) print -1 def get_proxys(): BASE_PATH=os.path.dirname(__file__).replace('\\','/') if BASE_PATH: BASE_PATH+='/' proxys=open(BASE_PATH+'proxy.txt').readlines() s=threading.Semaphore(50) success_proxys=[] #成功的代理 thread_list=[] for p in proxys: s.acquire(True) thr=threading.Thread(target=test_proxy, args=(s, p, success_proxys, thread_list)) thr.start() thread_list.append(thr) total_wait_time=0 while 1: print 'test proxy : '+'#'*20, len(thread_list) if len(thread_list)==0: break alive=False for thr in thread_list: if thr.isAlive(): alive=True if alive: time.sleep(2) total_wait_time+=2 if total_wait_time>30: break else: break #success_proxys.sort((lambda a,b:cmp(a[1], b[1]))) #print success_proxys return success_proxys if __name__=='__main__': get_proxys()
Python
_CS_IDLE = 'Idle' import urllib2, httplib from urllib2 import URLError, addinfourl import socket import socks_proxy as socks def _httplib_connect(self): """Connect to the host and port specified in __init__.""" msg = "getaddrinfo returns an empty list" for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: #hacked if hasattr(self, '_socks'): self.sock =self._socks else: self.sock = socket.socket(af, socktype, proto) if self.debuglevel > 0: print "connect: (%s, %s)" % (self.host, self.port) #print 'start connect server ...' self.sock.connect(sa) #print 'connect server end' except socket.error, msg: if self.debuglevel > 0: print 'connect fail:', (self.host, self.port) if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg httplib.HTTPConnection.connect=_httplib_connect def _do_open(self, http_class, req): host = req.get_host() if not host: raise URLError('no host given') h = http_class(host) # will parse host:port #add if hasattr(req, '_socks'): h._socks=req._socks #end h.set_debuglevel(self._debuglevel) headers = dict(req.headers) headers.update(req.unredirected_hdrs) headers["Connection"] = "close" headers = dict( (name.title(), val) for name, val in headers.items()) try: h.request(req.get_method(), req.get_selector(), req.data, headers) r = h.getresponse() except socket.error, err: # XXX what error? raise URLError(err) r.recv = r.read fp = socket._fileobject(r) resp = addinfourl(fp, r.msg, req.get_full_url()) resp.code = r.status resp.msg = r.reason return resp urllib2.AbstractHTTPHandler.do_open=_do_open class ProxyProcessor(urllib2.BaseHandler): def __init__(self, host, port, p_type=None): self._host=host self._port=port self._p_type=(p_type or socks.PROXY_TYPE_SOCKS5) def http_request(self, request): s = socks.socksocket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0) s.setproxy(self._p_type, self._host, self._port) request._socks=s return request def http_response(self, request, response): return response https_request = http_request https_response = http_response
Python
from browser_agent import *
Python
"""SocksiPy - Python SOCKS module. Version 1.00 Copyright 2006 Dan-Haim. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Dan Haim nor the names of his contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. This module provides a standard socket-like interface for Python for tunneling connections through SOCKS proxies. """ import socket import struct import time PROXY_TYPE_SOCKS4 = 1 PROXY_TYPE_SOCKS5 = 2 PROXY_TYPE_HTTP = 3 _defaultproxy = None _orgsocket = socket.socket class ProxyError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class GeneralProxyError(ProxyError): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class Socks5AuthError(ProxyError): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class Socks5Error(ProxyError): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class Socks4Error(ProxyError): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class HTTPError(ProxyError): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class SockTimeoutError(ProxyError): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) _generalerrors = ("success", "invalid data", "not connected", "not available", "bad proxy type", "bad input") _socks5errors = ("succeeded", "general SOCKS server failure", "connection not allowed by ruleset", "Network unreachable", "Host unreachable", "Connection refused", "TTL expired", "Command not supported", "Address type not supported", "Unknown error") _socks5autherrors = ("succeeded", "authentication is required", "all offered authentication methods were rejected", "unknown username or invalid password", "unknown error") _socks4errors = ("request granted", "request rejected or failed", "request rejected because SOCKS server cannot connect to identd on the client", "request rejected because the client program and identd report different user-ids", "unknown error") def setdefaultproxy(proxytype=None,addr=None,port=None,rdns=True,username=None,password=None): """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) Sets a default proxy which all further socksocket objects will use, unless explicitly changed. """ global _defaultproxy _defaultproxy = (proxytype,addr,port,rdns,username,password) class socksocket(socket.socket): """socksocket([family[, type[, proto]]]) -> socket object Open a SOCKS enabled socket. The parameters are the same as those of the standard socket init. In order for SOCKS to work, you must specify family=AF_INET, type=SOCK_STREAM and proto=0. """ def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None): _orgsocket.__init__(self,family,type,proto,_sock) if _defaultproxy != None: self.__proxy = _defaultproxy else: self.__proxy = (None, None, None, None, None, None) self.__proxysockname = None self.__proxypeername = None def __recvall(self, bytes): """__recvall(bytes) -> data Receive EXACTLY the number of bytes requested from the socket. Blocks until the required number of bytes have been received. """ data = "" #hack by perol.chen #add timeout st=time.time() timeout=(bytes/1024.0)*10 if timeout<15: timeout=15 #pt=None while len(data) < bytes: data = data + self.recv(bytes-len(data)) et=time.time() #if not pt or et-pt>5: # pt=et # print et-st, len(data), data if et-st>timeout: raise SockTimeoutError('rev data: '+str(bytes)+': '+data+'|') #print 'time out...' #break return data def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None): """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) Sets the proxy to be used. proxytype - The type of the proxy to be used. Three types are supported: PROXY_TYPE_SOCKS4 (including socks4a), PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP addr - The address of the server (IP or DNS). port - The port of the server. Defaults to 1080 for SOCKS servers and 8080 for HTTP proxy servers. rdns - Should DNS queries be preformed on the remote side (rather than the local side). The default is True. Note: This has no effect with SOCKS4 servers. username - Username to authenticate with to the server. The default is no authentication. password - Password to authenticate with to the server. Only relevant when username is also provided. """ self.__proxy = (proxytype,addr,port,rdns,username,password) def __negotiatesocks5(self,destaddr,destport): """__negotiatesocks5(self,destaddr,destport) Negotiates a connection through a SOCKS5 server. """ # First we'll send the authentication packages we support. if (self.__proxy[4]!=None) and (self.__proxy[5]!=None): # The username/password details were supplied to the # setproxy method so we support the USERNAME/PASSWORD # authentication (in addition to the standard none). self.sendall("\x05\x02\x00\x02") else: # No username/password were entered, therefore we # only support connections with no authentication. self.sendall("\x05\x01\x00") # We'll receive the server's response to determine which # method was selected chosenauth = self.__recvall(2) if chosenauth[0] != "\x05": self.close() raise GeneralProxyError((1,_generalerrors[1])) # Check the chosen authentication method if chosenauth[1] == "\x00": # No authentication is required pass elif chosenauth[1] == "\x02": # Okay, we need to perform a basic username/password # authentication. self.sendall("\x01" + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.proxy[5])) + self.__proxy[5]) authstat = self.__recvall(2) if authstat[0] != "\x01": # Bad response self.close() raise GeneralProxyError((1,_generalerrors[1])) if authstat[1] != "\x00": # Authentication failed self.close() raise Socks5AuthError,((3,_socks5autherrors[3])) # Authentication succeeded else: # Reaching here is always bad self.close() if chosenauth[1] == "\xFF": raise Socks5AuthError((2,_socks5autherrors[2])) else: raise GeneralProxyError((1,_generalerrors[1])) # Now we can request the actual connection req = "\x05\x01\x00" # If the given destination address is an IP address, we'll # use the IPv4 address request even if remote resolving was specified. try: ipaddr = socket.inet_aton(destaddr) req = req + "\x01" + ipaddr except socket.error: # Well it's not an IP number, so it's probably a DNS name. if self.__proxy[3]==True: # Resolve remotely ipaddr = None req = req + "\x03" + chr(len(destaddr)) + destaddr else: # Resolve locally ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) req = req + "\x01" + ipaddr req = req + struct.pack(">H",destport) self.sendall(req) # Get the response resp = self.__recvall(4) if resp[0] != "\x05": self.close() raise GeneralProxyError((1,_generalerrors[1])) elif resp[1] != "\x00": # Connection failed self.close() if ord(resp[1])<=8: raise Socks5Error(ord(resp[1]),_generalerrors[ord(resp[1])]) else: raise Socks5Error(9,_generalerrors[9]) # Get the bound address/port elif resp[3] == "\x01": boundaddr = self.__recvall(4) elif resp[3] == "\x03": resp = resp + self.recv(1) boundaddr = self.__recvall(resp[4]) else: self.close() raise GeneralProxyError((1,_generalerrors[1])) boundport = struct.unpack(">H",self.__recvall(2))[0] self.__proxysockname = (boundaddr,boundport) if ipaddr != None: self.__proxypeername = (socket.inet_ntoa(ipaddr),destport) else: self.__proxypeername = (destaddr,destport) def getproxysockname(self): """getsockname() -> address info Returns the bound IP address and port number at the proxy. """ return self.__proxysockname def getproxypeername(self): """getproxypeername() -> address info Returns the IP and port number of the proxy. """ return _orgsocket.getpeername(self) def getpeername(self): """getpeername() -> address info Returns the IP address and port number of the destination machine (note: getproxypeername returns the proxy) """ return self.__proxypeername def __negotiatesocks4(self,destaddr,destport): """__negotiatesocks4(self,destaddr,destport) Negotiates a connection through a SOCKS4 server. """ # Check if the destination address provided is an IP address rmtrslv = False try: ipaddr = socket.inet_aton(destaddr) except socket.error: # It's a DNS name. Check where it should be resolved. if self.__proxy[3]==True: ipaddr = "\x00\x00\x00\x01" rmtrslv = True else: ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) # Construct the request packet req = "\x04\x01" + struct.pack(">H",destport) + ipaddr # The username parameter is considered userid for SOCKS4 if self.__proxy[4] != None: req = req + self.__proxy[4] req = req + "\x00" # DNS name if remote resolving is required # NOTE: This is actually an extension to the SOCKS4 protocol # called SOCKS4A and may not be supported in all cases. if rmtrslv==True: req = req + destaddr + "\x00" #print 'start send all, ',req self.sendall(req) #print 'send end' # Get the response from the server #print 'start rec...' resp = self.__recvall(8) #print resp if resp[0] != "\x00": # Bad data self.close() raise GeneralProxyError((1,_generalerrors[1])) if resp[1] != "\x5A": # Server returned an error self.close() if ord(resp[1]) in (91,92,93): self.close() raise Socks4Error((ord(resp[1]),_socks4errors[ord(resp[1])-90])) else: raise Socks4Error((94,_socks4errors[4])) # Get the bound address/port self.__proxysockname = (socket.inet_ntoa(resp[4:]),struct.unpack(">H",resp[2:4])[0]) if rmtrslv != None: self.__proxypeername = (socket.inet_ntoa(ipaddr),destport) else: self.__proxypeername = (destaddr,destport) def __negotiatehttp(self,destaddr,destport): """__negotiatehttp(self,destaddr,destport) Negotiates a connection through an HTTP server. """ # If we need to resolve locally, we do this now if self.__proxy[3] == False: addr = socket.gethostbyname(destaddr) else: addr = destaddr self.sendall("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n") # We read the response until we get the string "\r\n\r\n" resp = self.recv(1) while resp.find("\r\n\r\n")==-1: resp = resp + self.recv(1) # We just need the first line to check if the connection # was successful statusline = resp.splitlines()[0].split(" ",2) if statusline[0] not in ("HTTP/1.0","HTTP/1.1"): self.close() raise GeneralProxyError((1,_generalerrors[1])) try: statuscode = int(statusline[1]) except ValueError: self.close() raise GeneralProxyError((1,_generalerrors[1])) if statuscode != 200: self.close() raise HTTPError((statuscode,statusline[2])) self.__proxysockname = ("0.0.0.0",0) self.__proxypeername = (addr,destport) def connect(self,destpair): """connect(self,despair) Connects to the specified destination through a proxy. destpar - A tuple of the IP/DNS address and the port number. (identical to socket's connect). To select the proxy server use setproxy(). """ # Do a minimal input check first if (type(destpair) in (list,tuple)==False) or (len(destpair)<2) or (type(destpair[0])!=str) or (type(destpair[1])!=int): raise GeneralProxyError((5,_generalerrors[5])) if self.__proxy[0] == PROXY_TYPE_SOCKS5: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 1080 _orgsocket.connect(self,(self.__proxy[1],portnum)) self.__negotiatesocks5(destpair[0],destpair[1]) elif self.__proxy[0] == PROXY_TYPE_SOCKS4: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 1080 #print 'org connect start ...' _orgsocket.connect(self,(self.__proxy[1],portnum)) #print 'org connect end, __negotiatesocks4 start...' self.__negotiatesocks4(destpair[0],destpair[1]) #print '__negotiatesocks4 end' elif self.__proxy[0] == PROXY_TYPE_HTTP: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 8080 _orgsocket.connect(self,(self.__proxy[1],portnum)) self.__negotiatehttp(destpair[0],destpair[1]) elif self.__proxy[0] == None: _orgsocket.connect(self,(destpair[0],destpair[1])) else: raise GeneralProxyError((4,_generalerrors[4]))
Python
#! /usr/bin/env python # coding=utf-8 ############################################################################# # # # File: fetch.py # # # # Copyright (C) 2008-2009 Du XiaoGang <dugang@188.com> # # # # Home: http://gappproxy.googlecode.com # # # # This file is part of GAppProxy. # # # # GAppProxy is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as # # published by the Free Software Foundation, either version 3 of the # # License, or (at your option) any later version. # # # # GAppProxy is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with GAppProxy. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################# import wsgiref.handlers, urlparse, StringIO, logging, base64, zlib from google.appengine.ext import webapp from google.appengine.api import urlfetch from google.appengine.api import urlfetch_errors # from accesslog import logAccess class MainHandler(webapp.RequestHandler): Software = 'GAppProxy/1.0.0 beta' # hop to hop header should not be forwarded HtohHdrs= ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] def myError(self, status, description, encodeResponse): # header self.response.out.write('HTTP/1.1 %d %s\r\n' % (status, description)) self.response.out.write('Server: %s\r\n' % self.Software) self.response.out.write('Content-Type: text/html\r\n') self.response.out.write('\r\n') # body content = '<h1>Fetch Server Error</h1><p>Error Code: %d<p>Message: %s' % (status, description) if encodeResponse == 'base64': self.response.out.write(base64.b64encode(content)) elif encodeResponse == 'compress': self.response.out.write(zlib.compress(content)) else: self.response.out.write(content) def post(self): try: # get post data origMethod = self.request.get('method') origPath = self.request.get('encoded_path') if origPath != '': origPath = base64.b64decode(origPath) else: origPath = self.request.get('path') origHeaders = self.request.get('headers') encodeResponse = self.request.get('encodeResponse') origPostData = self.request.get('postdata') # check method if origMethod != 'GET' and origMethod != 'HEAD' \ and origMethod != 'POST': # forbid self.myError(590, 'Invalid local proxy, Method not allowed.', encodeResponse) return if origMethod == 'GET': method = urlfetch.GET elif origMethod == 'HEAD': method = urlfetch.HEAD elif origMethod == 'POST': method = urlfetch.POST # check path (scm, netloc, path, params, query, _) = urlparse.urlparse(origPath) if (scm.lower() != 'http' and scm.lower() != 'https') or not netloc: self.myError(590, 'Invalid local proxy, Unsupported Scheme.', encodeResponse) return # create new path newPath = urlparse.urlunparse((scm, netloc, path, params, query, '')) # make new headers newHeaders = {} contentLength = 0 si = StringIO.StringIO(origHeaders) while True: line = si.readline() line = line.strip() if line == '': break # parse line (name, _, value) = line.partition(':') name = name.strip() value = value.strip() if name.lower() in self.HtohHdrs: # don't forward continue newHeaders[name] = value if name.lower() == 'content-length': contentLength = int(value) # predined header newHeaders['Connection'] = 'close' # check post data if contentLength != 0: if contentLength != len(origPostData): self.myError(590, 'Invalid local proxy, Wrong length of post data.', encodeResponse) return else: origPostData = '' if origPostData != '' and origMethod != 'POST': self.myError(590, 'Invalid local proxy, Inconsistent method and data.', encodeResponse) return except Exception, e: self.myError(591, 'Fetch server error, %s.' % str(e), encodeResponse) return # fetch, try 3 times for _ in range(3): try: resp = urlfetch.fetch(newPath, origPostData, method, newHeaders, False, False) break except urlfetch_errors.ResponseTooLargeError: self.myError(591, 'Fetch server error, Sorry, Google\'s limit, file size up to 1MB.', encodeResponse) return except Exception: continue else: self.myError(591, 'Fetch server error, The target server may be down or not exist. Another possibility: try to request the URL directly.', encodeResponse) return # forward self.response.headers['Content-Type'] = 'application/octet-stream' # status line self.response.out.write('HTTP/1.1 %d %s\r\n' % (resp.status_code, \ self.response.http_status_message(resp.status_code))) # headers # default Content-Type is text textContent = True for header in resp.headers: if header.strip().lower() in self.HtohHdrs: # don't forward continue ## there may have some problems on multi-cookie process in urlfetch. #if header.lower() == 'set-cookie': # logging.info('O %s: %s' % (header, resp.headers[header])) # scs = resp.headers[header].split(',') # for sc in scs: # logging.info('N %s: %s' % (header, sc.strip())) # self.response.out.write('%s: %s\r\n' % (header, sc.strip())) # continue # other self.response.out.write('%s: %s\r\n' % (header, resp.headers[header])) # check Content-Type if header.lower() == 'content-type': if resp.headers[header].lower().find('text') == -1: # not text textContent = False self.response.out.write('\r\n') # need encode? if encodeResponse == 'base64': self.response.out.write(base64.b64encode(resp.content)) elif encodeResponse == 'compress': # only compress when Content-Type is text/xxx if textContent: self.response.out.write(zlib.compress(resp.content)) else: self.response.out.write(resp.content) else: self.response.out.write(resp.content) # log #logAccess(netloc, self.request.remote_addr) def get(self): self.response.headers['Content-Type'] = 'text/html; charset=utf-8' self.response.out.write( \ ''' <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>GAppProxy已经在工作了</title> </head> <body> <table width="800" border="0" align="center"> <tr><td align="center"><hr></td></tr> <tr><td align="center"> <b><h1>%s 已经在工作了</h1></b> </td></tr> <tr><td align="center"><hr></td></tr> <tr><td align="center"> GAppProxy是一个开源的HTTP Proxy软件,使用Python编写,运行于Google App Engine平台上. </td></tr> <tr><td align="center"><hr></td></tr> <tr><td align="center"> 更多相关介绍,请参考<a href="http://gappproxy.googlecode.com/">GAppProxy项目主页</a>. </td></tr> <tr><td align="center"><hr></td></tr> <tr><td align="center"> <img src="http://code.google.com/appengine/images/appengine-silver-120x30.gif" alt="Powered by Google App Engine" /> </td></tr> <tr><td align="center"><hr></td></tr> </table> </body> </html> ''' % self.Software) def main(): application = webapp.WSGIApplication([('/fetch/', MainHandler)]) wsgiref.handlers.CGIHandler().run(application) if __name__ == '__main__': main()
Python
'''Base classes and helpers for building zone specific tzinfo classes''' from datetime import datetime, timedelta, tzinfo from bisect import bisect_right try: set except NameError: from sets import Set as set import pytz __all__ = [] _timedelta_cache = {} def memorized_timedelta(seconds): '''Create only one instance of each distinct timedelta''' try: return _timedelta_cache[seconds] except KeyError: delta = timedelta(seconds=seconds) _timedelta_cache[seconds] = delta return delta _epoch = datetime.utcfromtimestamp(0) _datetime_cache = {0: _epoch} def memorized_datetime(seconds): '''Create only one instance of each distinct datetime''' try: return _datetime_cache[seconds] except KeyError: # NB. We can't just do datetime.utcfromtimestamp(seconds) as this # fails with negative values under Windows (Bug #90096) dt = _epoch + timedelta(seconds=seconds) _datetime_cache[seconds] = dt return dt _ttinfo_cache = {} def memorized_ttinfo(*args): '''Create only one instance of each distinct tuple''' try: return _ttinfo_cache[args] except KeyError: ttinfo = ( memorized_timedelta(args[0]), memorized_timedelta(args[1]), args[2] ) _ttinfo_cache[args] = ttinfo return ttinfo _notime = memorized_timedelta(0) def _to_seconds(td): '''Convert a timedelta to seconds''' return td.seconds + td.days * 24 * 60 * 60 class BaseTzInfo(tzinfo): # Overridden in subclass _utcoffset = None _tzname = None zone = None def __str__(self): return self.zone class StaticTzInfo(BaseTzInfo): '''A timezone that has a constant offset from UTC These timezones are rare, as most regions have changed their offset from UTC at some point in their history ''' def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' return (dt + self._utcoffset).replace(tzinfo=self) def utcoffset(self,dt): '''See datetime.tzinfo.utcoffset''' return self._utcoffset def dst(self,dt): '''See datetime.tzinfo.dst''' return _notime def tzname(self,dt): '''See datetime.tzinfo.tzname''' return self._tzname def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is None: raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self) def __repr__(self): return '<StaticTzInfo %r>' % (self.zone,) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, (self.zone,) class DstTzInfo(BaseTzInfo): '''A timezone that has a variable offset from UTC The offset might change if daylight savings time comes into effect, or at a point in history when the region decides to change their timezone definition. ''' # Overridden in subclass _utc_transition_times = None # Sorted list of DST transition times in UTC _transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding # to _utc_transition_times entries zone = None # Set in __init__ _tzinfos = None _dst = None # DST offset def __init__(self, _inf=None, _tzinfos=None): if _inf: self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = _inf else: _tzinfos = {} self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = self._transition_info[0] _tzinfos[self._transition_info[0]] = self for inf in self._transition_info[1:]: if not _tzinfos.has_key(inf): _tzinfos[inf] = self.__class__(inf, _tzinfos) def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' dt = dt.replace(tzinfo=None) idx = max(0, bisect_right(self._utc_transition_times, dt) - 1) inf = self._transition_info[idx] return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf]) def normalize(self, dt): '''Correct the timezone information on the given datetime If date arithmetic crosses DST boundaries, the tzinfo is not magically adjusted. This method normalizes the tzinfo to the correct one. To test, first we need to do some setup >>> from pytz import timezone >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' We next create a datetime right on an end-of-DST transition point, the instant when the wallclocks are wound back one hour. >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' Now, if we subtract a few minutes from it, note that the timezone information has not changed. >>> before = loc_dt - timedelta(minutes=10) >>> before.strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' But we can fix that by calling the normalize method >>> before = eastern.normalize(before) >>> before.strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' ''' if dt.tzinfo is None: raise ValueError, 'Naive time - no tzinfo set' # Convert dt in localtime to UTC offset = dt.tzinfo._utcoffset dt = dt.replace(tzinfo=None) dt = dt - offset # convert it back, and return it return self.fromutc(dt) def localize(self, dt, is_dst=False): '''Convert naive time to local time. This method should be used to construct localtimes, rather than passing a tzinfo argument to a datetime constructor. is_dst is used to determine the correct timezone in the ambigous period at the end of daylight savings time. >>> from pytz import timezone >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> amdam = timezone('Europe/Amsterdam') >>> dt = datetime(2004, 10, 31, 2, 0, 0) >>> loc_dt1 = amdam.localize(dt, is_dst=True) >>> loc_dt2 = amdam.localize(dt, is_dst=False) >>> loc_dt1.strftime(fmt) '2004-10-31 02:00:00 CEST (+0200)' >>> loc_dt2.strftime(fmt) '2004-10-31 02:00:00 CET (+0100)' >>> str(loc_dt2 - loc_dt1) '1:00:00' Use is_dst=None to raise an AmbiguousTimeError for ambiguous times at the end of daylight savings >>> loc_dt1 = amdam.localize(dt, is_dst=None) Traceback (most recent call last): [...] AmbiguousTimeError: 2004-10-31 02:00:00 is_dst defaults to False >>> amdam.localize(dt) == amdam.localize(dt, False) True is_dst is also used to determine the correct timezone in the wallclock times jumped over at the start of daylight savings time. >>> pacific = timezone('US/Pacific') >>> dt = datetime(2008, 3, 9, 2, 0, 0) >>> ploc_dt1 = pacific.localize(dt, is_dst=True) >>> ploc_dt2 = pacific.localize(dt, is_dst=False) >>> ploc_dt1.strftime(fmt) '2008-03-09 02:00:00 PDT (-0700)' >>> ploc_dt2.strftime(fmt) '2008-03-09 02:00:00 PST (-0800)' >>> str(ploc_dt2 - ploc_dt1) '1:00:00' Use is_dst=None to raise a NonExistentTimeError for these skipped times. >>> loc_dt1 = pacific.localize(dt, is_dst=None) Traceback (most recent call last): [...] NonExistentTimeError: 2008-03-09 02:00:00 ''' if dt.tzinfo is not None: raise ValueError, 'Not naive datetime (tzinfo is already set)' # Find the possibly correct timezones. We probably just have one, # but we might end up with two if we are in the end-of-DST # transition period. Or possibly more in some particularly confused # location... possible_loc_dt = set() for tzinfo in self._tzinfos.values(): loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) if loc_dt.replace(tzinfo=None) == dt: possible_loc_dt.add(loc_dt) if len(possible_loc_dt) == 1: return possible_loc_dt.pop() # If there are no possibly correct timezones, we are attempting # to convert a time that never happened - the time period jumped # during the start-of-DST transition period. if len(possible_loc_dt) == 0: # If we refuse to guess, raise an exception. if is_dst is None: raise NonExistentTimeError(dt) # If we are forcing the pre-DST side of the DST transition, we # obtain the correct timezone by winding the clock forward a few # hours. elif is_dst: return self.localize( dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6) # If we are forcing the post-DST side of the DST transition, we # obtain the correct timezone by winding the clock back. else: return self.localize( dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6) # If we get this far, we have multiple possible timezones - this # is an ambiguous case occuring during the end-of-DST transition. # If told to be strict, raise an exception since we have an # ambiguous case if is_dst is None: raise AmbiguousTimeError(dt) # Filter out the possiblilities that don't match the requested # is_dst filtered_possible_loc_dt = [ p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst ] # Hopefully we only have one possibility left. Return it. if len(filtered_possible_loc_dt) == 1: return filtered_possible_loc_dt[0] if len(filtered_possible_loc_dt) == 0: filtered_possible_loc_dt = list(possible_loc_dt) # If we get this far, we have in a wierd timezone transition # where the clocks have been wound back but is_dst is the same # in both (eg. Europe/Warsaw 1915 when they switched to CET). # At this point, we just have to guess unless we allow more # hints to be passed in (such as the UTC offset or abbreviation), # but that is just getting silly. # # Choose the earliest (by UTC) applicable timezone. def mycmp(a,b): return cmp( a.replace(tzinfo=None) - a.tzinfo._utcoffset, b.replace(tzinfo=None) - b.tzinfo._utcoffset, ) filtered_possible_loc_dt.sort(mycmp) return filtered_possible_loc_dt[0] def utcoffset(self, dt): '''See datetime.tzinfo.utcoffset''' return self._utcoffset def dst(self, dt): '''See datetime.tzinfo.dst''' return self._dst def tzname(self, dt): '''See datetime.tzinfo.tzname''' return self._tzname def __repr__(self): if self._dst: dst = 'DST' else: dst = 'STD' if self._utcoffset > _notime: return '<DstTzInfo %r %s+%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) else: return '<DstTzInfo %r %s%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, ( self.zone, _to_seconds(self._utcoffset), _to_seconds(self._dst), self._tzname ) class InvalidTimeError(Exception): '''Base class for invalid time exceptions.''' class AmbiguousTimeError(InvalidTimeError): '''Exception raised when attempting to create an ambiguous wallclock time. At the end of a DST transition period, a particular wallclock time will occur twice (once before the clocks are set back, once after). Both possibilities may be correct, unless further information is supplied. See DstTzInfo.normalize() for more info ''' class NonExistentTimeError(InvalidTimeError): '''Exception raised when attempting to create a wallclock time that cannot exist. At the start of a DST transition period, the wallclock time jumps forward. The instants jumped over never occur. ''' def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None): """Factory function for unpickling pytz tzinfo instances. This is shared for both StaticTzInfo and DstTzInfo instances, because database changes could cause a zones implementation to switch between these two base classes and we can't break pickles on a pytz version upgrade. """ # Raises a KeyError if zone no longer exists, which should never happen # and would be a bug. tz = pytz.timezone(zone) # A StaticTzInfo - just return it if utcoffset is None: return tz # This pickle was created from a DstTzInfo. We need to # determine which of the list of tzinfo instances for this zone # to use in order to restore the state of any datetime instances using # it correctly. utcoffset = memorized_timedelta(utcoffset) dstoffset = memorized_timedelta(dstoffset) try: return tz._tzinfos[(utcoffset, dstoffset, tzname)] except KeyError: # The particular state requested in this timezone no longer exists. # This indicates a corrupt pickle, or the timezone database has been # corrected violently enough to make this particular # (utcoffset,dstoffset) no longer exist in the zone, or the # abbreviation has been changed. pass # See if we can find an entry differing only by tzname. Abbreviations # get changed from the initial guess by the database maintainers to # match reality when this information is discovered. for localized_tz in tz._tzinfos.values(): if (localized_tz._utcoffset == utcoffset and localized_tz._dst == dstoffset): return localized_tz # This (utcoffset, dstoffset) information has been removed from the # zone. Add it back. This might occur when the database maintainers have # corrected incorrect information. datetime instances using this # incorrect information will continue to do so, exactly as they were # before being pickled. This is purely an overly paranoid safety net - I # doubt this will ever been needed in real life. inf = (utcoffset, dstoffset, tzname) tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos) return tz._tzinfos[inf]
Python
#!/usr/bin/env python ''' $Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $ ''' from cStringIO import StringIO from datetime import datetime, timedelta from struct import unpack, calcsize from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo from pytz.tzinfo import memorized_datetime, memorized_timedelta def build_tzinfo(zone, fp): head_fmt = '>4s 16x 6l' head_size = calcsize(head_fmt) (magic,ttisgmtcnt,ttisstdcnt,leapcnt, timecnt,typecnt,charcnt) = unpack(head_fmt, fp.read(head_size)) # Make sure it is a tzinfo(5) file assert magic == 'TZif' # Read out the transition times, localtime indices and ttinfo structures. data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt) data_size = calcsize(data_fmt) data = unpack(data_fmt, fp.read(data_size)) # make sure we unpacked the right number of values assert len(data) == 2 * timecnt + 3 * typecnt + 1 transitions = [memorized_datetime(trans) for trans in data[:timecnt]] lindexes = list(data[timecnt:2 * timecnt]) ttinfo_raw = data[2 * timecnt:-1] tznames_raw = data[-1] del data # Process ttinfo into separate structs ttinfo = [] tznames = {} i = 0 while i < len(ttinfo_raw): # have we looked up this timezone name yet? tzname_offset = ttinfo_raw[i+2] if tzname_offset not in tznames: nul = tznames_raw.find('\0', tzname_offset) if nul < 0: nul = len(tznames_raw) tznames[tzname_offset] = tznames_raw[tzname_offset:nul] ttinfo.append((ttinfo_raw[i], bool(ttinfo_raw[i+1]), tznames[tzname_offset])) i += 3 # Now build the timezone object if len(transitions) == 0: ttinfo[0][0], ttinfo[0][2] cls = type(zone, (StaticTzInfo,), dict( zone=zone, _utcoffset=memorized_timedelta(ttinfo[0][0]), _tzname=ttinfo[0][2])) else: # Early dates use the first standard time ttinfo i = 0 while ttinfo[i][1]: i += 1 if ttinfo[i] == ttinfo[lindexes[0]]: transitions[0] = datetime.min else: transitions.insert(0, datetime.min) lindexes.insert(0, i) # calculate transition info transition_info = [] for i in range(len(transitions)): inf = ttinfo[lindexes[i]] utcoffset = inf[0] if not inf[1]: dst = 0 else: for j in range(i-1, -1, -1): prev_inf = ttinfo[lindexes[j]] if not prev_inf[1]: break dst = inf[0] - prev_inf[0] # dst offset tzname = inf[2] # Round utcoffset and dst to the nearest minute or the # datetime library will complain. Conversions to these timezones # might be up to plus or minus 30 seconds out, but it is # the best we can do. utcoffset = int((utcoffset + 30) / 60) * 60 dst = int((dst + 30) / 60) * 60 transition_info.append(memorized_ttinfo(utcoffset, dst, tzname)) cls = type(zone, (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info)) return cls() if __name__ == '__main__': import os.path from pprint import pprint base = os.path.join(os.path.dirname(__file__), 'zoneinfo') tz = build_tzinfo('Australia/Melbourne', open(os.path.join(base,'Australia','Melbourne'), 'rb')) tz = build_tzinfo('US/Eastern', open(os.path.join(base,'US','Eastern'), 'rb')) pprint(tz._utc_transition_times) #print tz.asPython(4) #print tz.transitions_mapping
Python
#!/usr/bin/env python ''' $Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $ ''' from cStringIO import StringIO from datetime import datetime, timedelta from struct import unpack, calcsize from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo from pytz.tzinfo import memorized_datetime, memorized_timedelta def build_tzinfo(zone, fp): head_fmt = '>4s 16x 6l' head_size = calcsize(head_fmt) (magic,ttisgmtcnt,ttisstdcnt,leapcnt, timecnt,typecnt,charcnt) = unpack(head_fmt, fp.read(head_size)) # Make sure it is a tzinfo(5) file assert magic == 'TZif' # Read out the transition times, localtime indices and ttinfo structures. data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt) data_size = calcsize(data_fmt) data = unpack(data_fmt, fp.read(data_size)) # make sure we unpacked the right number of values assert len(data) == 2 * timecnt + 3 * typecnt + 1 transitions = [memorized_datetime(trans) for trans in data[:timecnt]] lindexes = list(data[timecnt:2 * timecnt]) ttinfo_raw = data[2 * timecnt:-1] tznames_raw = data[-1] del data # Process ttinfo into separate structs ttinfo = [] tznames = {} i = 0 while i < len(ttinfo_raw): # have we looked up this timezone name yet? tzname_offset = ttinfo_raw[i+2] if tzname_offset not in tznames: nul = tznames_raw.find('\0', tzname_offset) if nul < 0: nul = len(tznames_raw) tznames[tzname_offset] = tznames_raw[tzname_offset:nul] ttinfo.append((ttinfo_raw[i], bool(ttinfo_raw[i+1]), tznames[tzname_offset])) i += 3 # Now build the timezone object if len(transitions) == 0: ttinfo[0][0], ttinfo[0][2] cls = type(zone, (StaticTzInfo,), dict( zone=zone, _utcoffset=memorized_timedelta(ttinfo[0][0]), _tzname=ttinfo[0][2])) else: # Early dates use the first standard time ttinfo i = 0 while ttinfo[i][1]: i += 1 if ttinfo[i] == ttinfo[lindexes[0]]: transitions[0] = datetime.min else: transitions.insert(0, datetime.min) lindexes.insert(0, i) # calculate transition info transition_info = [] for i in range(len(transitions)): inf = ttinfo[lindexes[i]] utcoffset = inf[0] if not inf[1]: dst = 0 else: for j in range(i-1, -1, -1): prev_inf = ttinfo[lindexes[j]] if not prev_inf[1]: break dst = inf[0] - prev_inf[0] # dst offset tzname = inf[2] # Round utcoffset and dst to the nearest minute or the # datetime library will complain. Conversions to these timezones # might be up to plus or minus 30 seconds out, but it is # the best we can do. utcoffset = int((utcoffset + 30) / 60) * 60 dst = int((dst + 30) / 60) * 60 transition_info.append(memorized_ttinfo(utcoffset, dst, tzname)) cls = type(zone, (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info)) return cls() if __name__ == '__main__': import os.path from pprint import pprint base = os.path.join(os.path.dirname(__file__), 'zoneinfo') tz = build_tzinfo('Australia/Melbourne', open(os.path.join(base,'Australia','Melbourne'), 'rb')) tz = build_tzinfo('US/Eastern', open(os.path.join(base,'US','Eastern'), 'rb')) pprint(tz._utc_transition_times) #print tz.asPython(4) #print tz.transitions_mapping
Python
''' Reference tzinfo implementations from the Python docs. Used for testing against as they are only correct for the years 1987 to 2006. Do not use these for real code. ''' from datetime import tzinfo, timedelta, datetime from pytz import utc, UTC, HOUR, ZERO # A class building tzinfo objects for fixed-offset time zones. # Note that FixedOffset(0, "UTC") is a different way to build a # UTC tzinfo object. class FixedOffset(tzinfo): """Fixed offset in minutes east from UTC.""" def __init__(self, offset, name): self.__offset = timedelta(minutes = offset) self.__name = name def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return ZERO # A class capturing the platform's idea of local time. import time as _time STDOFFSET = timedelta(seconds = -_time.timezone) if _time.daylight: DSTOFFSET = timedelta(seconds = -_time.altzone) else: DSTOFFSET = STDOFFSET DSTDIFF = DSTOFFSET - STDOFFSET class LocalTimezone(tzinfo): def utcoffset(self, dt): if self._isdst(dt): return DSTOFFSET else: return STDOFFSET def dst(self, dt): if self._isdst(dt): return DSTDIFF else: return ZERO def tzname(self, dt): return _time.tzname[self._isdst(dt)] def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1) stamp = _time.mktime(tt) tt = _time.localtime(stamp) return tt.tm_isdst > 0 Local = LocalTimezone() # A complete implementation of current DST rules for major US time zones. def first_sunday_on_or_after(dt): days_to_go = 6 - dt.weekday() if days_to_go: dt += timedelta(days_to_go) return dt # In the US, DST starts at 2am (standard time) on the first Sunday in April. DSTSTART = datetime(1, 4, 1, 2) # and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct. # which is the first Sunday on or after Oct 25. DSTEND = datetime(1, 10, 25, 1) class USTimeZone(tzinfo): def __init__(self, hours, reprname, stdname, dstname): self.stdoffset = timedelta(hours=hours) self.reprname = reprname self.stdname = stdname self.dstname = dstname def __repr__(self): return self.reprname def tzname(self, dt): if self.dst(dt): return self.dstname else: return self.stdname def utcoffset(self, dt): return self.stdoffset + self.dst(dt) def dst(self, dt): if dt is None or dt.tzinfo is None: # An exception may be sensible here, in one or both cases. # It depends on how you want to treat them. The default # fromutc() implementation (called by the default astimezone() # implementation) passes a datetime with dt.tzinfo is self. return ZERO assert dt.tzinfo is self # Find first Sunday in April & the last in October. start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year)) end = first_sunday_on_or_after(DSTEND.replace(year=dt.year)) # Can't compare naive to aware objects, so strip the timezone from # dt first. if start <= dt.replace(tzinfo=None) < end: return HOUR else: return ZERO Eastern = USTimeZone(-5, "Eastern", "EST", "EDT") Central = USTimeZone(-6, "Central", "CST", "CDT") Mountain = USTimeZone(-7, "Mountain", "MST", "MDT") Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
Python
''' datetime.tzinfo timezone definitions generated from the Olson timezone database: ftp://elsie.nci.nih.gov/pub/tz*.tar.gz See the datetime section of the Python Library Reference for information on how to use these modules. ''' # The Olson database has historically been updated about 4 times a year OLSON_VERSION = '2009e' VERSION = OLSON_VERSION # Version format for a patch release - only one so far. #VERSION = OLSON_VERSION + '.2' __version__ = OLSON_VERSION OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling __all__ = [ 'timezone', 'utc', 'country_timezones', 'country_names', 'AmbiguousTimeError', 'InvalidTimeError', 'NonExistentTimeError', 'UnknownTimeZoneError', 'all_timezones', 'all_timezones_set', 'common_timezones', 'common_timezones_set', ] import sys, datetime, os.path, gettext from UserDict import DictMixin try: from pkg_resources import resource_stream except ImportError: resource_stream = None from tzinfo import AmbiguousTimeError, InvalidTimeError, NonExistentTimeError from tzinfo import unpickler from tzfile import build_tzinfo # Use 2.3 sets module implementation if set builtin is not available try: set except NameError: from sets import Set as set def open_resource(name): """Open a resource from the zoneinfo subdir for reading. Uses the pkg_resources module if available. """ if resource_stream is not None: return resource_stream(__name__, 'zoneinfo/' + name) else: name_parts = name.lstrip('/').split('/') for part in name_parts: if part == os.path.pardir or os.path.sep in part: raise ValueError('Bad path segment: %r' % part) filename = os.path.join(os.path.dirname(__file__), 'zoneinfo', *name_parts) return open(filename, 'rb') def resource_exists(name): """Return true if the given resource exists""" try: open_resource(name) return True except IOError: return False # Enable this when we get some translations? # We want an i18n API that is useful to programs using Python's gettext # module, as well as the Zope3 i18n package. Perhaps we should just provide # the POT file and translations, and leave it up to callers to make use # of them. # # t = gettext.translation( # 'pytz', os.path.join(os.path.dirname(__file__), 'locales'), # fallback=True # ) # def _(timezone_name): # """Translate a timezone name using the current locale, returning Unicode""" # return t.ugettext(timezone_name) class UnknownTimeZoneError(KeyError): '''Exception raised when pytz is passed an unknown timezone. >>> isinstance(UnknownTimeZoneError(), LookupError) True This class is actually a subclass of KeyError to provide backwards compatibility with code relying on the undocumented behavior of earlier pytz releases. >>> isinstance(UnknownTimeZoneError(), KeyError) True ''' pass _tzinfo_cache = {} def timezone(zone): r''' Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(u'US/Eastern') is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> timezone('Asia/Shangri-La') Traceback (most recent call last): ... UnknownTimeZoneError: 'Asia/Shangri-La' >>> timezone(u'\N{TRADE MARK SIGN}') Traceback (most recent call last): ... UnknownTimeZoneError: u'\u2122' ''' if zone.upper() == 'UTC': return utc try: zone = zone.encode('US-ASCII') except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) zone = _unmunge_zone(zone) if zone not in _tzinfo_cache: if zone in all_timezones_set: _tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone)) else: raise UnknownTimeZoneError(zone) return _tzinfo_cache[zone] def _unmunge_zone(zone): """Undo the time zone name munging done by older versions of pytz.""" return zone.replace('_plus_', '+').replace('_minus_', '-') ZERO = datetime.timedelta(0) HOUR = datetime.timedelta(hours=1) class UTC(datetime.tzinfo): """UTC Identical to the reference UTC implementation given in Python docs except that it unpickles using the single module global instance defined beneath this class declaration. Also contains extra attributes and methods to match other pytz tzinfo instances. """ zone = "UTC" def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO def __reduce__(self): return _UTC, () def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is None: raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self) def __repr__(self): return "<UTC>" def __str__(self): return "UTC" UTC = utc = UTC() # UTC is a singleton def _UTC(): """Factory function for utc unpickling. Makes sure that unpickling a utc instance always returns the same module global. These examples belong in the UTC class above, but it is obscured; or in the README.txt, but we are not depending on Python 2.4 so integrating the README.txt examples with the unit tests is not trivial. >>> import datetime, pickle >>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) >>> naive = dt.replace(tzinfo=None) >>> p = pickle.dumps(dt, 1) >>> naive_p = pickle.dumps(naive, 1) >>> len(p), len(naive_p), len(p) - len(naive_p) (60, 43, 17) >>> new = pickle.loads(p) >>> new == dt True >>> new is dt False >>> new.tzinfo is dt.tzinfo True >>> utc is UTC is timezone('UTC') True >>> utc is timezone('GMT') False """ return utc _UTC.__safe_for_unpickling__ = True def _p(*args): """Factory function for unpickling pytz tzinfo instances. Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle by shortening the path. """ return unpickler(*args) _p.__safe_for_unpickling__ = True class _LazyDict(DictMixin): """Dictionary populated on first use.""" data = None def __getitem__(self, key): if self.data is None: self._fill() return self.data[key.upper()] def keys(self): if self.data is None: self._fill() return self.data.keys() class _CountryTimezoneDict(_LazyDict): """Map ISO 3166 country code to a list of timezone names commonly used in that country. iso3166_code is the two letter code used to identify the country. >>> country_timezones['ch'] ['Europe/Zurich'] >>> country_timezones['CH'] ['Europe/Zurich'] >>> country_timezones[u'ch'] ['Europe/Zurich'] >>> country_timezones['XXX'] Traceback (most recent call last): ... KeyError: 'XXX' Previously, this information was exposed as a function rather than a dictionary. This is still supported:: >>> country_timezones('nz') ['Pacific/Auckland', 'Pacific/Chatham'] """ def __call__(self, iso3166_code): """Backwards compatibility.""" return self[iso3166_code] def _fill(self): data = {} zone_tab = open_resource('zone.tab') for line in zone_tab: if line.startswith('#'): continue code, coordinates, zone = line.split(None, 4)[:3] if zone not in all_timezones_set: continue try: data[code].append(zone) except KeyError: data[code] = [zone] self.data = data country_timezones = _CountryTimezoneDict() class _CountryNameDict(_LazyDict): '''Dictionary proving ISO3166 code -> English name. >>> country_names['au'] 'Australia' ''' def _fill(self): data = {} zone_tab = open_resource('iso3166.tab') for line in zone_tab.readlines(): if line.startswith('#'): continue code, name = line.split(None, 1) data[code] = name.strip() self.data = data country_names = _CountryNameDict() # Time-zone info based solely on fixed offsets class _FixedOffset(datetime.tzinfo): zone = None # to match the standard pytz API def __init__(self, minutes): if abs(minutes) >= 1440: raise ValueError("absolute offset is too large", minutes) self._minutes = minutes self._offset = datetime.timedelta(minutes=minutes) def utcoffset(self, dt): return self._offset def __reduce__(self): return FixedOffset, (self._minutes, ) def dst(self, dt): return None def tzname(self, dt): return None def __repr__(self): return 'pytz.FixedOffset(%d)' % self._minutes def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is None: raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self) def FixedOffset(offset, _tzinfos = {}): """return a fixed-offset timezone based off a number of minutes. >>> one = FixedOffset(-330) >>> one pytz.FixedOffset(-330) >>> one.utcoffset(datetime.datetime.now()) datetime.timedelta(-1, 66600) >>> two = FixedOffset(1380) >>> two pytz.FixedOffset(1380) >>> two.utcoffset(datetime.datetime.now()) datetime.timedelta(0, 82800) The datetime.timedelta must be between the range of -1 and 1 day, non-inclusive. >>> FixedOffset(1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', 1440) >>> FixedOffset(-1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', -1440) An offset of 0 is special-cased to return UTC. >>> FixedOffset(0) is UTC True There should always be only one instance of a FixedOffset per timedelta. This should be true for multiple creation calls. >>> FixedOffset(-330) is one True >>> FixedOffset(1380) is two True It should also be true for pickling. >>> import pickle >>> pickle.loads(pickle.dumps(one)) is one True >>> pickle.loads(pickle.dumps(two)) is two True """ if offset == 0: return UTC info = _tzinfos.get(offset) if info is None: # We haven't seen this one before. we need to save it. # Use setdefault to avoid a race condition and make sure we have # only one info = _tzinfos.setdefault(offset, _FixedOffset(offset)) return info FixedOffset.__safe_for_unpickling__ = True def _test(): import doctest, os, sys sys.path.insert(0, os.pardir) import pytz return doctest.testmod(pytz) if __name__ == '__main__': _test() all_timezones = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Asmera', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Timbuktu', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Argentina/Buenos_Aires', 'America/Argentina/Catamarca', 'America/Argentina/ComodRivadavia', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/La_Rioja', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Salta', 'America/Argentina/San_Juan', 'America/Argentina/San_Luis', 'America/Argentina/Tucuman', 'America/Argentina/Ushuaia', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Atka', 'America/Bahia', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Buenos_Aires', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Catamarca', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Coral_Harbour', 'America/Cordoba', 'America/Costa_Rica', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Ensenada', 'America/Fort_Wayne', 'America/Fortaleza', 'America/Glace_Bay', 'America/Godthab', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indiana/Indianapolis', 'America/Indiana/Knox', 'America/Indiana/Marengo', 'America/Indiana/Petersburg', 'America/Indiana/Tell_City', 'America/Indiana/Vevay', 'America/Indiana/Vincennes', 'America/Indiana/Winamac', 'America/Indianapolis', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Jujuy', 'America/Juneau', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Knox_IN', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Louisville', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Marigot', 'America/Martinique', 'America/Mazatlan', 'America/Mendoza', 'America/Menominee', 'America/Merida', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montreal', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nipigon', 'America/Nome', 'America/Noronha', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Panama', 'America/Pangnirtung', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Acre', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Rainy_River', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Rosario', 'America/Santarem', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/Shiprock', 'America/St_Barthelemy', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Thunder_Bay', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Virgin', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/South_Pole', 'Antarctica/Syowa', 'Antarctica/Vostok', 'Arctic/Longyearbyen', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Ashkhabad', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Calcutta', 'Asia/Choibalsan', 'Asia/Chongqing', 'Asia/Chungking', 'Asia/Colombo', 'Asia/Dacca', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Gaza', 'Asia/Harbin', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Istanbul', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kashgar', 'Asia/Kathmandu', 'Asia/Katmandu', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macao', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qyzylorda', 'Asia/Rangoon', 'Asia/Riyadh', 'Asia/Saigon', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Tel_Aviv', 'Asia/Thimbu', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Ujung_Pandang', 'Asia/Ulaanbaatar', 'Asia/Ulan_Bator', 'Asia/Urumqi', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faeroe', 'Atlantic/Faroe', 'Atlantic/Jan_Mayen', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/ACT', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Canberra', 'Australia/Currie', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/LHI', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/NSW', 'Australia/North', 'Australia/Perth', 'Australia/Queensland', 'Australia/South', 'Australia/Sydney', 'Australia/Tasmania', 'Australia/Victoria', 'Australia/West', 'Australia/Yancowinna', 'Brazil/Acre', 'Brazil/DeNoronha', 'Brazil/East', 'Brazil/West', 'CET', 'CST6CDT', 'Canada/Atlantic', 'Canada/Central', 'Canada/East-Saskatchewan', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', 'Canada/Pacific', 'Canada/Saskatchewan', 'Canada/Yukon', 'Chile/Continental', 'Chile/EasterIsland', 'Cuba', 'EET', 'EST', 'EST5EDT', 'Egypt', 'Eire', 'Etc/GMT', 'Etc/GMT+0', 'Etc/GMT+1', 'Etc/GMT+10', 'Etc/GMT+11', 'Etc/GMT+12', 'Etc/GMT+2', 'Etc/GMT+3', 'Etc/GMT+4', 'Etc/GMT+5', 'Etc/GMT+6', 'Etc/GMT+7', 'Etc/GMT+8', 'Etc/GMT+9', 'Etc/GMT-0', 'Etc/GMT-1', 'Etc/GMT-10', 'Etc/GMT-11', 'Etc/GMT-12', 'Etc/GMT-13', 'Etc/GMT-14', 'Etc/GMT-2', 'Etc/GMT-3', 'Etc/GMT-4', 'Etc/GMT-5', 'Etc/GMT-6', 'Etc/GMT-7', 'Etc/GMT-8', 'Etc/GMT-9', 'Etc/GMT0', 'Etc/Greenwich', 'Etc/UCT', 'Etc/UTC', 'Etc/Universal', 'Etc/Zulu', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Athens', 'Europe/Belfast', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Bratislava', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Guernsey', 'Europe/Helsinki', 'Europe/Isle_of_Man', 'Europe/Istanbul', 'Europe/Jersey', 'Europe/Kaliningrad', 'Europe/Kiev', 'Europe/Lisbon', 'Europe/Ljubljana', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Mariehamn', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Nicosia', 'Europe/Oslo', 'Europe/Paris', 'Europe/Podgorica', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/San_Marino', 'Europe/Sarajevo', 'Europe/Simferopol', 'Europe/Skopje', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Tiraspol', 'Europe/Uzhgorod', 'Europe/Vaduz', 'Europe/Vatican', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zagreb', 'Europe/Zaporozhye', 'Europe/Zurich', 'GB', 'GB-Eire', 'GMT', 'GMT+0', 'GMT-0', 'GMT0', 'Greenwich', 'HST', 'Hongkong', 'Iceland', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Iran', 'Israel', 'Jamaica', 'Japan', 'Kwajalein', 'Libya', 'MET', 'MST', 'MST7MDT', 'Mexico/BajaNorte', 'Mexico/BajaSur', 'Mexico/General', 'NZ', 'NZ-CHAT', 'Navajo', 'PRC', 'PST8PDT', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Chatham', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Enderbury', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Johnston', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Ponape', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Samoa', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Truk', 'Pacific/Wake', 'Pacific/Wallis', 'Pacific/Yap', 'Poland', 'Portugal', 'ROC', 'ROK', 'Singapore', 'Turkey', 'UCT', 'US/Alaska', 'US/Aleutian', 'US/Arizona', 'US/Central', 'US/East-Indiana', 'US/Eastern', 'US/Hawaii', 'US/Indiana-Starke', 'US/Michigan', 'US/Mountain', 'US/Pacific', 'US/Pacific-New', 'US/Samoa', 'UTC', 'Universal', 'W-SU', 'WET', 'Zulu'] all_timezones = [ tz for tz in all_timezones if resource_exists(tz)] all_timezones_set = set(all_timezones) common_timezones = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Argentina/Buenos_Aires', 'America/Argentina/Catamarca', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/La_Rioja', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Salta', 'America/Argentina/San_Juan', 'America/Argentina/San_Luis', 'America/Argentina/Tucuman', 'America/Argentina/Ushuaia', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Bahia', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Costa_Rica', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Fortaleza', 'America/Glace_Bay', 'America/Godthab', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indiana/Indianapolis', 'America/Indiana/Knox', 'America/Indiana/Marengo', 'America/Indiana/Petersburg', 'America/Indiana/Tell_City', 'America/Indiana/Vevay', 'America/Indiana/Vincennes', 'America/Indiana/Winamac', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Juneau', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Martinique', 'America/Mazatlan', 'America/Menominee', 'America/Merida', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montreal', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nipigon', 'America/Nome', 'America/Noronha', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Panama', 'America/Pangnirtung', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Rainy_River', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Santarem', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Thunder_Bay', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/Syowa', 'Antarctica/Vostok', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Choibalsan', 'Asia/Chongqing', 'Asia/Colombo', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Gaza', 'Asia/Harbin', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kashgar', 'Asia/Kathmandu', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qyzylorda', 'Asia/Rangoon', 'Asia/Riyadh', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Ulaanbaatar', 'Asia/Urumqi', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faroe', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Currie', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/Perth', 'Australia/Sydney', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Athens', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Helsinki', 'Europe/Istanbul', 'Europe/Kaliningrad', 'Europe/Kiev', 'Europe/Lisbon', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Oslo', 'Europe/Paris', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/Simferopol', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Uzhgorod', 'Europe/Vaduz', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zaporozhye', 'Europe/Zurich', 'GMT', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Chatham', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Enderbury', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Johnston', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Ponape', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Truk', 'Pacific/Wake', 'Pacific/Wallis', 'US/Alaska', 'US/Arizona', 'US/Central', 'US/Eastern', 'US/Hawaii', 'US/Mountain', 'US/Pacific', 'UTC'] common_timezones = [ tz for tz in common_timezones if tz in all_timezones] common_timezones_set = set(common_timezones)
Python
#!/usr/bin/python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ An interactive, stateful AJAX shell that runs Python code on the server. Part of http://code.google.com/p/google-app-engine-samples/. May be run as a standalone app or in an existing app as an admin-only handler. Can be used for system administration tasks, as an interactive way to try out APIs, or as a debugging aid during development. The logging, os, sys, db, and users modules are imported automatically. Interpreter state is stored in the datastore so that variables, function definitions, and other values in the global and local namespaces can be used across commands. To use the shell in your app, copy shell.py, static/*, and templates/* into your app's source directory. Then, copy the URL handlers from app.yaml into your app.yaml. TODO: unit tests! """ import logging import new import os import pickle import sys import traceback import types import wsgiref.handlers from google.appengine.api import users from google.appengine.ext import db from google.appengine.ext import webapp from google.appengine.ext.webapp import template # Set to True if stack traces should be shown in the browser, etc. _DEBUG = True # The entity kind for shell sessions. Feel free to rename to suit your app. _SESSION_KIND = '_Shell_Session' # Types that can't be pickled. UNPICKLABLE_TYPES = ( types.ModuleType, types.TypeType, types.ClassType, types.FunctionType, ) # Unpicklable statements to seed new sessions with. INITIAL_UNPICKLABLES = [ 'import logging', 'import os', 'import sys', 'from google.appengine.ext import db', 'from google.appengine.api import users', ] class Session(db.Model): """A shell session. Stores the session's globals. Each session globals is stored in one of two places: If the global is picklable, it's stored in the parallel globals and global_names list properties. (They're parallel lists to work around the unfortunate fact that the datastore can't store dictionaries natively.) If the global is not picklable (e.g. modules, classes, and functions), or if it was created by the same statement that created an unpicklable global, it's not stored directly. Instead, the statement is stored in the unpicklables list property. On each request, before executing the current statement, the unpicklable statements are evaluated to recreate the unpicklable globals. The unpicklable_names property stores all of the names of globals that were added by unpicklable statements. When we pickle and store the globals after executing a statement, we skip the ones in unpicklable_names. Using Text instead of string is an optimization. We don't query on any of these properties, so they don't need to be indexed. """ global_names = db.ListProperty(db.Text) globals = db.ListProperty(db.Blob) unpicklable_names = db.ListProperty(db.Text) unpicklables = db.ListProperty(db.Text) def set_global(self, name, value): """Adds a global, or updates it if it already exists. Also removes the global from the list of unpicklable names. Args: name: the name of the global to remove value: any picklable value """ blob = db.Blob(pickle.dumps(value)) if name in self.global_names: index = self.global_names.index(name) self.globals[index] = blob else: self.global_names.append(db.Text(name)) self.globals.append(blob) self.remove_unpicklable_name(name) def remove_global(self, name): """Removes a global, if it exists. Args: name: string, the name of the global to remove """ if name in self.global_names: index = self.global_names.index(name) del self.global_names[index] del self.globals[index] def globals_dict(self): """Returns a dictionary view of the globals. """ return dict((name, pickle.loads(val)) for name, val in zip(self.global_names, self.globals)) def add_unpicklable(self, statement, names): """Adds a statement and list of names to the unpicklables. Also removes the names from the globals. Args: statement: string, the statement that created new unpicklable global(s). names: list of strings; the names of the globals created by the statement. """ self.unpicklables.append(db.Text(statement)) for name in names: self.remove_global(name) if name not in self.unpicklable_names: self.unpicklable_names.append(db.Text(name)) def remove_unpicklable_name(self, name): """Removes a name from the list of unpicklable names, if it exists. Args: name: string, the name of the unpicklable global to remove """ if name in self.unpicklable_names: self.unpicklable_names.remove(name) class FrontPageHandler(webapp.RequestHandler): """Creates a new session and renders the shell.html template. """ def get(self): # set up the session. TODO: garbage collect old shell sessions session_key = self.request.get('session') if session_key: session = Session.get(session_key) else: # create a new session session = Session() session.unpicklables = [db.Text(line) for line in INITIAL_UNPICKLABLES] session_key = session.put() template_file = os.path.join(os.path.dirname(__file__), '../templates', 'shell.html') session_url = '/?session=%s' % session_key vars = { 'server_software': os.environ['SERVER_SOFTWARE'], 'python_version': sys.version, 'session': str(session_key), 'user': users.get_current_user(), 'login_url': users.create_login_url(session_url), 'logout_url': users.create_logout_url(session_url), } rendered = webapp.template.render(template_file, vars, debug=_DEBUG) self.response.out.write(rendered) class StatementHandler(webapp.RequestHandler): """Evaluates a python statement in a given session and returns the result. """ def get(self): self.response.headers['Content-Type'] = 'text/plain' # extract the statement to be run statement = self.request.get('statement') if not statement: return # the python compiler doesn't like network line endings statement = statement.replace('\r\n', '\n') # add a couple newlines at the end of the statement. this makes # single-line expressions such as 'class Foo: pass' evaluate happily. statement += '\n\n' # log and compile the statement up front try: logging.info('Compiling and evaluating:\n%s' % statement) compiled = compile(statement, '<string>', 'single') except: self.response.out.write(traceback.format_exc()) return # create a dedicated module to be used as this statement's __main__ statement_module = new.module('__main__') # use this request's __builtin__, since it changes on each request. # this is needed for import statements, among other things. import __builtin__ statement_module.__builtins__ = __builtin__ # load the session from the datastore session = Session.get(self.request.get('session')) # swap in our custom module for __main__. then unpickle the session # globals, run the statement, and re-pickle the session globals, all # inside it. old_main = sys.modules.get('__main__') try: sys.modules['__main__'] = statement_module statement_module.__name__ = '__main__' # re-evaluate the unpicklables for code in session.unpicklables: exec code in statement_module.__dict__ # re-initialize the globals for name, val in session.globals_dict().items(): try: statement_module.__dict__[name] = val except: msg = 'Dropping %s since it could not be unpickled.\n' % name self.response.out.write(msg) logging.warning(msg + traceback.format_exc()) session.remove_global(name) # run! old_globals = dict(statement_module.__dict__) try: old_stdout = sys.stdout old_stderr = sys.stderr try: sys.stdout = self.response.out sys.stderr = self.response.out exec compiled in statement_module.__dict__ finally: sys.stdout = old_stdout sys.stderr = old_stderr except: self.response.out.write(traceback.format_exc()) return # extract the new globals that this statement added new_globals = {} for name, val in statement_module.__dict__.items(): if name not in old_globals or val != old_globals[name]: new_globals[name] = val if True in [isinstance(val, UNPICKLABLE_TYPES) for val in new_globals.values()]: # this statement added an unpicklable global. store the statement and # the names of all of the globals it added in the unpicklables. session.add_unpicklable(statement, new_globals.keys()) logging.debug('Storing this statement as an unpicklable.') else: # this statement didn't add any unpicklables. pickle and store the # new globals back into the datastore. for name, val in new_globals.items(): if not name.startswith('__'): session.set_global(name, val) finally: sys.modules['__main__'] = old_main session.put() def main(): libs_path = os.path.abspath('./') django_path = os.path.abspath('django.zip') if django_path not in sys.path: sys.path.insert(0, django_path) if libs_path not in sys.path: sys.path.insert(0, libs_path) application = webapp.WSGIApplication( [('/shell/', FrontPageHandler), ('/shell/shell/', StatementHandler)], debug=_DEBUG) wsgiref.handlers.CGIHandler().run(application) if __name__ == '__main__': main()
Python
#! /usr/bin/env python # coding=utf-8 ############################################################################# # # # File: fetch.py # # # # Copyright (C) 2008-2009 Du XiaoGang <dugang@188.com> # # # # Home: http://gappproxy.googlecode.com # # # # This file is part of GAppProxy. # # # # GAppProxy is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as # # published by the Free Software Foundation, either version 3 of the # # License, or (at your option) any later version. # # # # GAppProxy is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with GAppProxy. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################# import wsgiref.handlers, urlparse, StringIO, logging, base64, zlib from google.appengine.ext import webapp from google.appengine.api import urlfetch from google.appengine.api import urlfetch_errors # from accesslog import logAccess class MainHandler(webapp.RequestHandler): Software = 'GAppProxy/1.0.0 beta' # hop to hop header should not be forwarded HtohHdrs= ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] def myError(self, status, description, encodeResponse): # header self.response.out.write('HTTP/1.1 %d %s\r\n' % (status, description)) self.response.out.write('Server: %s\r\n' % self.Software) self.response.out.write('Content-Type: text/html\r\n') self.response.out.write('\r\n') # body content = '<h1>Fetch Server Error</h1><p>Error Code: %d<p>Message: %s' % (status, description) if encodeResponse == 'base64': self.response.out.write(base64.b64encode(content)) elif encodeResponse == 'compress': self.response.out.write(zlib.compress(content)) else: self.response.out.write(content) def post(self): try: # get post data origMethod = self.request.get('method') origPath = self.request.get('encoded_path') if origPath != '': origPath = base64.b64decode(origPath) else: origPath = self.request.get('path') origHeaders = self.request.get('headers') encodeResponse = self.request.get('encodeResponse') origPostData = self.request.get('postdata') # check method if origMethod != 'GET' and origMethod != 'HEAD' \ and origMethod != 'POST': # forbid self.myError(590, 'Invalid local proxy, Method not allowed.', encodeResponse) return if origMethod == 'GET': method = urlfetch.GET elif origMethod == 'HEAD': method = urlfetch.HEAD elif origMethod == 'POST': method = urlfetch.POST # check path (scm, netloc, path, params, query, _) = urlparse.urlparse(origPath) if (scm.lower() != 'http' and scm.lower() != 'https') or not netloc: self.myError(590, 'Invalid local proxy, Unsupported Scheme.', encodeResponse) return # create new path newPath = urlparse.urlunparse((scm, netloc, path, params, query, '')) # make new headers newHeaders = {} contentLength = 0 si = StringIO.StringIO(origHeaders) while True: line = si.readline() line = line.strip() if line == '': break # parse line (name, _, value) = line.partition(':') name = name.strip() value = value.strip() if name.lower() in self.HtohHdrs: # don't forward continue newHeaders[name] = value if name.lower() == 'content-length': contentLength = int(value) # predined header newHeaders['Connection'] = 'close' # check post data if contentLength != 0: if contentLength != len(origPostData): self.myError(590, 'Invalid local proxy, Wrong length of post data.', encodeResponse) return else: origPostData = '' if origPostData != '' and origMethod != 'POST': self.myError(590, 'Invalid local proxy, Inconsistent method and data.', encodeResponse) return except Exception, e: self.myError(591, 'Fetch server error, %s.' % str(e), encodeResponse) return # fetch, try 3 times for _ in range(3): try: resp = urlfetch.fetch(newPath, origPostData, method, newHeaders, False, False) break except urlfetch_errors.ResponseTooLargeError: self.myError(591, 'Fetch server error, Sorry, Google\'s limit, file size up to 1MB.', encodeResponse) return except Exception: continue else: self.myError(591, 'Fetch server error, The target server may be down or not exist. Another possibility: try to request the URL directly.', encodeResponse) return # forward self.response.headers['Content-Type'] = 'application/octet-stream' # status line self.response.out.write('HTTP/1.1 %d %s\r\n' % (resp.status_code, \ self.response.http_status_message(resp.status_code))) # headers # default Content-Type is text textContent = True for header in resp.headers: if header.strip().lower() in self.HtohHdrs: # don't forward continue ## there may have some problems on multi-cookie process in urlfetch. #if header.lower() == 'set-cookie': # logging.info('O %s: %s' % (header, resp.headers[header])) # scs = resp.headers[header].split(',') # for sc in scs: # logging.info('N %s: %s' % (header, sc.strip())) # self.response.out.write('%s: %s\r\n' % (header, sc.strip())) # continue # other self.response.out.write('%s: %s\r\n' % (header, resp.headers[header])) # check Content-Type if header.lower() == 'content-type': if resp.headers[header].lower().find('text') == -1: # not text textContent = False self.response.out.write('\r\n') # need encode? if encodeResponse == 'base64': self.response.out.write(base64.b64encode(resp.content)) elif encodeResponse == 'compress': # only compress when Content-Type is text/xxx if textContent: self.response.out.write(zlib.compress(resp.content)) else: self.response.out.write(resp.content) else: self.response.out.write(resp.content) # log #logAccess(netloc, self.request.remote_addr) def get(self): self.response.headers['Content-Type'] = 'text/html; charset=utf-8' self.response.out.write( \ ''' <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>GAppProxy已经在工作了</title> </head> <body> <table width="800" border="0" align="center"> <tr><td align="center"><hr></td></tr> <tr><td align="center"> <b><h1>%s 已经在工作了</h1></b> </td></tr> <tr><td align="center"><hr></td></tr> <tr><td align="center"> GAppProxy是一个开源的HTTP Proxy软件,使用Python编写,运行于Google App Engine平台上. </td></tr> <tr><td align="center"><hr></td></tr> <tr><td align="center"> 更多相关介绍,请参考<a href="http://gappproxy.googlecode.com/">GAppProxy项目主页</a>. </td></tr> <tr><td align="center"><hr></td></tr> <tr><td align="center"> <img src="http://code.google.com/appengine/images/appengine-silver-120x30.gif" alt="Powered by Google App Engine" /> </td></tr> <tr><td align="center"><hr></td></tr> </table> </body> </html> ''' % self.Software) def main(): application = webapp.WSGIApplication([('/fetch/', MainHandler)]) wsgiref.handlers.CGIHandler().run(application) if __name__ == '__main__': main()
Python
#-*- coding: utf-8 -*- import math class Pager(object): def __init__(self, total, pageSize, page, page_list_num = 10): pageCount = int(math.ceil((total+0.0)/pageSize)) if page < 1: page = 1 if page > pageCount: page = pageCount offset = 2 if page_list_num >= pageCount: from_page = 1 to_page = pageCount else: from_page = page - offset to_page = from_page + page_list_num -1 if from_page < 1: to_page = page + 1 - from_page from_page = 1 if to_page - from_page < page_list_num: to_page = page_list_num elif to_page > pageCount: from_page = pageCount - page_list_num + 1 to_page = pageCount if page - offset >1 and pageCount > page_list_num: show_first = True else: show_first = False if page > 1: show_pre = True else: show_pre = False if page < pageCount: show_next = True else: show_next = False if to_page < pageCount: show_end = True else: show_end = False self.total = total self.pageSize = pageSize self.page = page self.pageCount = pageCount self.from_page = from_page self.to_page = to_page self.page_list = range(from_page, to_page+1) self.show_first = show_first self.show_end = show_end self.show_pre = show_pre self.show_next = show_next def render(self, link=''): html = [] pager = self html.append('<span>共%s条</span>' % pager.total) if link == '': link = '?' else: if link.find('?') == -1: link += '?' else: link += '&' if pager.show_pre: html.append('<a href="%spage=%s">上一页</a>' % (link, pager.page-1)) # else: # html.append('<span class="disabled">上一页</span>') if pager.show_first: html.append('<a href="%spage=1">1</a><span>...</span>' % link) for p in pager.page_list: if p == pager.page: html.append('<span class="current">%s</span>' % pager.page) else: html.append('<a href="%spage=%s">%s</a>' % (link, p, p)) if pager.show_end : html.append('<span>...</span><a href="%spage=%s">%s</a>' % (link, pager.pageCount, pager.pageCount)) if pager.show_next: html.append('<a href="%spage=%s">下一页</a>' % (link, pager.page + 1)) # else: # html.append('<span class="disabled">下一页</span>') return ''.join(html) def render_tpl(self): return """ 共{{ pager.total }}条 {% if pager.show_pre %} <a href="?page={{ pager.page|add:"-1" }}">上一页</a> {% else %} <span class="disabled">上一页</span> {% endif %} {% if pager.show_first %} <a href="?page=1">1</a>... {% endif %} {% for p in pager.page_list %} {% ifequal p pager.page %} <span class="current">{{ pager.page }}</span> {% else %} <a href="?page={{ p }}">{{ p }}</a> {% endifequal %} {% endfor %} {% if pager.show_end %} ...<a href="?page={{ pager.pageCount }}">{{ pager.pageCount }}</a> {% endif %} {% if pager.show_next %} <a href="?page={{ pager.page|add:"1" }}">下一页</a> {% else %} <span class="disabled">下一页</span> {% endif %} """
Python
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.appengine.api import memcache from google.appengine.ext import db import random class GeneralCounterShardConfig(db.Model): """Tracks the number of shards for each named counter.""" name = db.StringProperty(required=True) num_shards = db.IntegerProperty(required=True, default=20) class GeneralCounterShard(db.Model): """Shards for each named counter""" name = db.StringProperty(required=True) count = db.IntegerProperty(required=True, default=0) def get_count(name): """Retrieve the value for a given sharded counter. Parameters: name - The name of the counter """ total = memcache.get(name) if total is None: total = 0 for counter in GeneralCounterShard.all().filter('name = ', name): total += counter.count memcache.add(name, str(total), 60) else: total = int(total) return total def increment(name): """Increment the value for a given sharded counter. Parameters: name - The name of the counter """ config = GeneralCounterShardConfig.get_or_insert(name, name=name) def txn(): index = random.randint(0, config.num_shards - 1) shard_name = name + str(index) counter = GeneralCounterShard.get_by_key_name(shard_name) if counter is None: counter = GeneralCounterShard(key_name=shard_name, name=name) counter.count += 1 counter.put() db.run_in_transaction(txn) memcache.incr(name) def decrement(name): """Increment the value for a given sharded counter. Parameters: name - The name of the counter """ config = GeneralCounterShardConfig.get_or_insert(name, name=name) def txn(): index = random.randint(0, config.num_shards - 1) shard_name = name + str(index) counter = GeneralCounterShard.get_by_key_name(shard_name) if counter is None: counter = GeneralCounterShard(key_name=shard_name, name=name) counter.count -= 1 counter.put() db.run_in_transaction(txn) memcache.decr(name) def increase_shards(name, num): """Increase the number of shards for a given sharded counter. Will never decrease the number of shards. Parameters: name - The name of the counter num - How many shards to use """ config = GeneralCounterShardConfig.get_or_insert(name, name=name) def txn(): if config.num_shards < num: config.num_shards = num config.put() db.run_in_transaction(txn)
Python
#!/usr/bin/env python # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import string import sys ############################################################################### # match "#00 pc 0003f52e /system/lib/libdvm.so" for example ############################################################################### trace_line = re.compile("(.*)(\#[0-9]+) (..) ([0-9a-f]{8}) ([^\r\n \t]*)") # returns a list containing the function name and the file/lineno def CallAddr2Line(lib, addr): global symbols_dir global addr2line_cmd global cppfilt_cmd if lib != "": cmd = addr2line_cmd + \ " -f -e " + symbols_dir + lib + " 0x" + addr stream = os.popen(cmd) lines = stream.readlines() list = map(string.strip, lines) else: list = [] if list != []: # Name like "move_forward_type<JavaVMOption>" causes troubles mangled_name = re.sub('<', '\<', list[0]); mangled_name = re.sub('>', '\>', mangled_name); cmd = cppfilt_cmd + " " + mangled_name stream = os.popen(cmd) list[0] = stream.readline() stream.close() list = map(string.strip, list) else: list = [ "(unknown)", "(unknown)" ] return list ############################################################################### # similar to CallAddr2Line, but using objdump to find out the name of the # containing function of the specified address ############################################################################### def CallObjdump(lib, addr): global objdump_cmd global symbols_dir unknown = "(unknown)" uname = os.uname()[0] if uname == "Darwin": proc = os.uname()[-1] if proc == "i386": uname = "darwin-x86" else: uname = "darwin-ppc" elif uname == "Linux": uname = "linux-x86" if lib != "": next_addr = string.atoi(addr, 16) + 1 cmd = objdump_cmd \ + " -C -d --start-address=0x" + addr + " --stop-address=" \ + str(next_addr) \ + " " + symbols_dir + lib stream = os.popen(cmd) lines = stream.readlines() map(string.strip, lines) stream.close() else: return unknown # output looks like # # file format elf32-littlearm # # Disassembly of section .text: # # 0000833c <func+0x4>: # 833c: 701a strb r2, [r3, #0] # # we want to extract the "func" part num_lines = len(lines) if num_lines < 2: return unknown func_name = lines[num_lines-2] func_regexp = re.compile("(^.*\<)(.*)(\+.*\>:$)") components = func_regexp.match(func_name) if components is None: return unknown return components.group(2) ############################################################################### # determine the symbols directory in the local build ############################################################################### def FindSymbolsDir(): global symbols_dir try: path = os.environ['ANDROID_PRODUCT_OUT'] + "/symbols" except: cmd = "CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core " \ + "SRC_TARGET_DIR=build/target make -f build/core/config.mk " \ + "dumpvar-abs-TARGET_OUT_UNSTRIPPED" stream = os.popen(cmd) str = stream.read() stream.close() path = str.strip() if (not os.path.exists(path)): print path + " not found!" sys.exit(1) symbols_dir = path ############################################################################### # determine the path of binutils ############################################################################### def SetupToolsPath(): global addr2line_cmd global objdump_cmd global cppfilt_cmd global symbols_dir uname = os.uname()[0] if uname == "Darwin": uname = "darwin-x86" elif uname == "Linux": uname = "linux-x86" gcc_version = os.environ["TARGET_GCC_VERSION"] prefix = "./prebuilts/gcc/" + uname + "/arm/arm-linux-androideabi-" + \ gcc_version + "/bin/" addr2line_cmd = prefix + "arm-linux-androideabi-addr2line" if (not os.path.exists(addr2line_cmd)): try: prefix = os.environ['ANDROID_BUILD_TOP'] + "/prebuilts/gcc/" + \ uname + "/arm/arm-linux-androideabi-" + gcc_version + "/bin/" except: prefix = ""; addr2line_cmd = prefix + "arm-linux-androideabi-addr2line" if (not os.path.exists(addr2line_cmd)): print addr2line_cmd + " not found!" sys.exit(1) objdump_cmd = prefix + "arm-linux-androideabi-objdump" cppfilt_cmd = prefix + "arm-linux-androideabi-c++filt" ############################################################################### # look up the function and file/line number for a raw stack trace line # groups[0]: log tag # groups[1]: stack level # groups[2]: "pc" # groups[3]: code address # groups[4]: library name ############################################################################### def SymbolTranslation(groups): lib_name = groups[4] code_addr = groups[3] caller = CallObjdump(lib_name, code_addr) func_line_pair = CallAddr2Line(lib_name, code_addr) # If a callee is inlined to the caller, objdump will see the caller's # address but addr2line will report the callee's address. So the printed # format is desgined to be "caller<-callee file:line" if (func_line_pair[0] != caller): print groups[0] + groups[1] + " " + caller + "<-" + \ ' '.join(func_line_pair[:]) + " " else: print groups[0] + groups[1] + " " + ' '.join(func_line_pair[:]) + " " ############################################################################### if __name__ == '__main__': # pass the options to adb adb_cmd = "adb " + ' '.join(sys.argv[1:]) # setup addr2line_cmd and objdump_cmd SetupToolsPath() # setup the symbols directory FindSymbolsDir() # invoke the adb command and filter its output stream = os.popen(adb_cmd) while (True): line = stream.readline() # EOF reached if (line == ''): break # remove the trailing \n line = line.strip() # see if this is a stack trace line match = trace_line.match(line) if (match): groups = match.groups() # translate raw address into symbols SymbolTranslation(groups) else: print line sys.stdout.flush() # adb itself aborts stream.close()
Python
#!/usr/bin/env python import os import re import sys def fail_with_usage(): sys.stderr.write("usage: java-layers.py DEPENDENCY_FILE SOURCE_DIRECTORIES...\n") sys.stderr.write("\n") sys.stderr.write("Enforces layering between java packages. Scans\n") sys.stderr.write("DIRECTORY and prints errors when the packages violate\n") sys.stderr.write("the rules defined in the DEPENDENCY_FILE.\n") sys.stderr.write("\n") sys.stderr.write("Prints a warning when an unknown package is encountered\n") sys.stderr.write("on the assumption that it should fit somewhere into the\n") sys.stderr.write("layering.\n") sys.stderr.write("\n") sys.stderr.write("DEPENDENCY_FILE format\n") sys.stderr.write(" - # starts comment\n") sys.stderr.write(" - Lines consisting of two java package names: The\n") sys.stderr.write(" first package listed must not contain any references\n") sys.stderr.write(" to any classes present in the second package, or any\n") sys.stderr.write(" of its dependencies.\n") sys.stderr.write(" - Lines consisting of one java package name: The\n") sys.stderr.write(" packge is assumed to be a high level package and\n") sys.stderr.write(" nothing may depend on it.\n") sys.stderr.write(" - Lines consisting of a dash (+) followed by one java\n") sys.stderr.write(" package name: The package is considered a low level\n") sys.stderr.write(" package and may not import any of the other packages\n") sys.stderr.write(" listed in the dependency file.\n") sys.stderr.write(" - Lines consisting of a plus (-) followed by one java\n") sys.stderr.write(" package name: The package is considered \'legacy\'\n") sys.stderr.write(" and excluded from errors.\n") sys.stderr.write("\n") sys.exit(1) class Dependency: def __init__(self, filename, lineno, lower, top, lowlevel, legacy): self.filename = filename self.lineno = lineno self.lower = lower self.top = top self.lowlevel = lowlevel self.legacy = legacy self.uppers = [] self.transitive = set() def matches(self, imp): for d in self.transitive: if imp.startswith(d): return True return False class Dependencies: def __init__(self, deps): def recurse(obj, dep, visited): global err if dep in visited: sys.stderr.write("%s:%d: Circular dependency found:\n" % (dep.filename, dep.lineno)) for v in visited: sys.stderr.write("%s:%d: Dependency: %s\n" % (v.filename, v.lineno, v.lower)) err = True return visited.append(dep) for upper in dep.uppers: obj.transitive.add(upper) if upper in deps: recurse(obj, deps[upper], visited) self.deps = deps self.parts = [(dep.lower.split('.'),dep) for dep in deps.itervalues()] # transitive closure of dependencies for dep in deps.itervalues(): recurse(dep, dep, []) # disallow everything from the low level components for dep in deps.itervalues(): if dep.lowlevel: for d in deps.itervalues(): if dep != d and not d.legacy: dep.transitive.add(d.lower) # disallow the 'top' components everywhere but in their own package for dep in deps.itervalues(): if dep.top and not dep.legacy: for d in deps.itervalues(): if dep != d and not d.legacy: d.transitive.add(dep.lower) for dep in deps.itervalues(): dep.transitive = set([x+"." for x in dep.transitive]) if False: for dep in deps.itervalues(): print "-->", dep.lower, "-->", dep.transitive # Lookup the dep object for the given package. If pkg is a subpackage # of one with a rule, that one will be returned. If no matches are found, # None is returned. def lookup(self, pkg): # Returns the number of parts that match def compare_parts(parts, pkg): if len(parts) > len(pkg): return 0 n = 0 for i in range(0, len(parts)): if parts[i] != pkg[i]: return 0 n = n + 1 return n pkg = pkg.split(".") matched = 0 result = None for (parts,dep) in self.parts: x = compare_parts(parts, pkg) if x > matched: matched = x result = dep return result def parse_dependency_file(filename): global err f = file(filename) lines = f.readlines() f.close() def lineno(s, i): i[0] = i[0] + 1 return (i[0],s) n = [0] lines = [lineno(x,n) for x in lines] lines = [(n,s.split("#")[0].strip()) for (n,s) in lines] lines = [(n,s) for (n,s) in lines if len(s) > 0] lines = [(n,s.split()) for (n,s) in lines] deps = {} for n,words in lines: if len(words) == 1: lower = words[0] top = True legacy = False lowlevel = False if lower[0] == '+': lower = lower[1:] top = False lowlevel = True elif lower[0] == '-': lower = lower[1:] legacy = True if lower in deps: sys.stderr.write(("%s:%d: Package '%s' already defined on" + " line %d.\n") % (filename, n, lower, deps[lower].lineno)) err = True else: deps[lower] = Dependency(filename, n, lower, top, lowlevel, legacy) elif len(words) == 2: lower = words[0] upper = words[1] if lower in deps: dep = deps[lower] if dep.top: sys.stderr.write(("%s:%d: Can't add dependency to top level package " + "'%s'\n") % (filename, n, lower)) err = True else: dep = Dependency(filename, n, lower, False, False, False) deps[lower] = dep dep.uppers.append(upper) else: sys.stderr.write("%s:%d: Too many words on line starting at \'%s\'\n" % ( filename, n, words[2])) err = True return Dependencies(deps) def find_java_files(srcs): result = [] for d in srcs: if d[0] == '@': f = file(d[1:]) result.extend([fn for fn in [s.strip() for s in f.readlines()] if len(fn) != 0]) f.close() else: for root, dirs, files in os.walk(d): result.extend([os.sep.join((root,f)) for f in files if f.lower().endswith(".java")]) return result COMMENTS = re.compile("//.*?\n|/\*.*?\*/", re.S) PACKAGE = re.compile("package\s+(.*)") IMPORT = re.compile("import\s+(.*)") def examine_java_file(deps, filename): global err # Yes, this is a crappy java parser. Write a better one if you want to. f = file(filename) text = f.read() f.close() text = COMMENTS.sub("", text) index = text.find("{") if index < 0: sys.stderr.write(("%s: Error: Unable to parse java. Can't find class " + "declaration.\n") % filename) err = True return text = text[0:index] statements = [s.strip() for s in text.split(";")] # First comes the package declaration. Then iterate while we see import # statements. Anything else is either bad syntax that we don't care about # because the compiler will fail, or the beginning of the class declaration. m = PACKAGE.match(statements[0]) if not m: sys.stderr.write(("%s: Error: Unable to parse java. Missing package " + "statement.\n") % filename) err = True return pkg = m.group(1) imports = [] for statement in statements[1:]: m = IMPORT.match(statement) if not m: break imports.append(m.group(1)) # Do the checking if False: print filename print "'%s' --> %s" % (pkg, imports) dep = deps.lookup(pkg) if not dep: sys.stderr.write(("%s: Error: Package does not appear in dependency file: " + "%s\n") % (filename, pkg)) err = True return for imp in imports: if dep.matches(imp): sys.stderr.write("%s: Illegal import in package '%s' of '%s'\n" % (filename, pkg, imp)) err = True err = False def main(argv): if len(argv) < 3: fail_with_usage() deps = parse_dependency_file(argv[1]) if err: sys.exit(1) java = find_java_files(argv[2:]) for filename in java: examine_java_file(deps, filename) if err: sys.stderr.write("%s: Using this file as dependency file.\n" % argv[1]) sys.exit(1) sys.exit(0) if __name__ == "__main__": main(sys.argv)
Python
#!/usr/bin/env python # # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import sys def break_lines(key, val): # these don't get split if key in ("PRODUCT_MODEL"): return (key,val) return (key, "\n".join(val.split())) def split_line(line): words = line.split("=", 1) if len(words) == 1: return (words[0], "") else: return (words[0], words[1]) def sort_lines(text): lines = text.split() lines.sort() return "\n".join(lines) def parse_variables(lines): return [split_line(line) for line in lines if line.strip()] def render_variables(variables): variables = dict(variables) del variables["FILE"] variables = list(variables.iteritems()) variables.sort(lambda a, b: cmp(a[0], b[0])) return ("<table id='variables'>" + "\n".join([ "<tr><th>%(key)s</th><td>%(val)s</td></tr>" % { "key": key, "val": val } for key,val in variables]) +"</table>") def linkify_inherit(variables, text, func_name): groups = re.split("(\\$\\(call " + func_name + ",.*\\))", text) result = "" for i in range(0,len(groups)/2): i = i * 2 result = result + groups[i] s = groups[i+1] href = s.split(",", 1)[1].strip()[:-1] href = href.replace("$(SRC_TARGET_DIR)", "build/target") href = ("../" * variables["FILE"].count("/")) + href + ".html" result = result + "<a href=\"%s\">%s</a>" % (href,s) result = result + groups[-1] return result def render_original(variables, text): text = linkify_inherit(variables, text, "inherit-product") text = linkify_inherit(variables, text, "inherit-product-if-exists") return text def read_file(fn): f = file(fn) text = f.read() f.close() return text def main(argv): # read the variables lines = sys.stdin.readlines() variables = parse_variables(lines) # format the variables variables = [break_lines(key,val) for key,val in variables] # now it's a dict variables = dict(variables) sorted_vars = ( "PRODUCT_COPY_FILES", "PRODUCT_PACKAGES", "PRODUCT_LOCALES", "PRODUCT_FACTORY_RAMDISK_MODULES", "PRODUCT_PROPERTY_OVERRIDES", ) for key in sorted_vars: variables[key] = sort_lines(variables[key]) # the original file original = read_file(variables["FILE"]) # formatting values = dict(variables) values.update({ "variables": render_variables(variables), "original": render_original(variables, original), }) print """<html> <head> <title>%(FILE)s</title> <style type="text/css"> body { font-family: Helvetica, Arial, sans-serif; padding-bottom: 20px; } #variables { border-collapse: collapse; } #variables th, #variables td { vertical-align: top; text-align: left; border-top: 1px solid #c5cdde; border-bottom: 1px solid #c5cdde; padding: 2px 10px 2px 10px; } #variables th { font-size: 10pt; background-color: #e2ecff } #variables td { background-color: #ebf2ff; white-space: pre; font-size: 10pt; } #original { background-color: #ebf2ff; border-top: 1px solid #c5cdde; border-bottom: 1px solid #c5cdde; padding: 2px 10px 2px 10px; white-space: pre; font-size: 10pt; } </style> </head> <body> <h1>%(FILE)s</h1> <a href="#Original">Original</a> <a href="#Variables">Variables</a> <h2><a name="Original"></a>Original</h2> <div id="original">%(original)s</div> <h2><a name="Variables"></a>Variables</h2> %(variables)s </body> </html> """ % values if __name__ == "__main__": main(sys.argv)
Python
#!/usr/bin/env python # This file uses the following encoding: utf-8 import sys import re if len(sys.argv) == 1: print 'usage: ' + sys.argv[0] + ' <build.log>' sys.exit() # if you add another level, don't forget to give it a color below class severity: UNKNOWN=0 SKIP=100 FIXMENOW=1 HIGH=2 MEDIUM=3 LOW=4 HARMLESS=5 def colorforseverity(sev): if sev == severity.FIXMENOW: return 'fuchsia' if sev == severity.HIGH: return 'red' if sev == severity.MEDIUM: return 'orange' if sev == severity.LOW: return 'yellow' if sev == severity.HARMLESS: return 'limegreen' if sev == severity.UNKNOWN: return 'blue' return 'grey' warnpatterns = [ { 'category':'make', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'make: overriding commands/ignoring old commands', 'patterns':[r".*: warning: overriding commands for target .+", r".*: warning: ignoring old commands for target .+"] }, { 'category':'C/C++', 'severity':severity.HIGH, 'members':[], 'option':'-Wimplicit-function-declaration', 'description':'Implicit function declaration', 'patterns':[r".*: warning: implicit declaration of function .+"] }, { 'category':'C/C++', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: conflicting types for '.+'"] }, { 'category':'C/C++', 'severity':severity.HIGH, 'members':[], 'option':'-Wtype-limits', 'description':'Expression always evaluates to true or false', 'patterns':[r".*: warning: comparison is always false due to limited range of data type", r".*: warning: comparison of unsigned expression >= 0 is always true", r".*: warning: comparison of unsigned expression < 0 is always false"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Incompatible pointer types', 'patterns':[r".*: warning: assignment from incompatible pointer type", r".*: warning: return from incompatible pointer type", r".*: warning: passing argument [0-9]+ of '.*' from incompatible pointer type", r".*: warning: initialization from incompatible pointer type"] }, { 'category':'C/C++', 'severity':severity.HIGH, 'members':[], 'option':'-fno-builtin', 'description':'Incompatible declaration of built in function', 'patterns':[r".*: warning: incompatible implicit declaration of built-in function .+"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wunused-parameter', 'description':'Unused parameter', 'patterns':[r".*: warning: unused parameter '.*'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wunused', 'description':'Unused function, variable or label', 'patterns':[r".*: warning: '.+' defined but not used", r".*: warning: unused variable '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wunused-value', 'description':'Statement with no effect', 'patterns':[r".*: warning: statement with no effect"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wmissing-field-initializers', 'description':'Missing initializer', 'patterns':[r".*: warning: missing initializer"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: \(near initialization for '.+'\)"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wformat', 'description':'Format string does not match arguments', 'patterns':[r".*: warning: format '.+' expects type '.+', but argument [0-9]+ has type '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wformat-extra-args', 'description':'Too many arguments for format string', 'patterns':[r".*: warning: too many arguments for format"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wsign-compare', 'description':'Comparison between signed and unsigned', 'patterns':[r".*: warning: comparison between signed and unsigned", r".*: warning: comparison of promoted \~unsigned with unsigned", r".*: warning: signed and unsigned type in conditional expression"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Comparison between enum and non-enum', 'patterns':[r".*: warning: enumeral and non-enumeral type in conditional expression"] }, { 'category':'libpng', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'libpng: zero area', 'patterns':[r".*libpng warning: Ignoring attempt to set cHRM RGB triangle with zero area"] }, { 'category':'aapt', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'aapt: no comment for public symbol', 'patterns':[r".*: warning: No comment for public symbol .+"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wmissing-braces', 'description':'Missing braces around initializer', 'patterns':[r".*: warning: missing braces around initializer.*"] }, { 'category':'C/C++', 'severity':severity.HARMLESS, 'members':[], 'option':'', 'description':'No newline at end of file', 'patterns':[r".*: warning: no newline at end of file"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wcast-qual', 'description':'Qualifier discarded', 'patterns':[r".*: warning: passing argument [0-9]+ of '.+' discards qualifiers from pointer target type", r".*: warning: assignment discards qualifiers from pointer target type", r".*: warning: return discards qualifiers from pointer target type"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wattributes', 'description':'Attribute ignored', 'patterns':[r".*: warning: '_*packed_*' attribute ignored"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wattributes', 'description':'Visibility mismatch', 'patterns':[r".*: warning: '.+' declared with greater visibility than the type of its field '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Shift count greater than width of type', 'patterns':[r".*: warning: (left|right) shift count >= width of type"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'extern &lt;foo&gt; is initialized', 'patterns':[r".*: warning: '.+' initialized and declared 'extern'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wold-style-declaration', 'description':'Old style declaration', 'patterns':[r".*: warning: 'static' is not at beginning of declaration"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wuninitialized', 'description':'Variable may be used uninitialized', 'patterns':[r".*: warning: '.+' may be used uninitialized in this function"] }, { 'category':'C/C++', 'severity':severity.HIGH, 'members':[], 'option':'-Wuninitialized', 'description':'Variable is used uninitialized', 'patterns':[r".*: warning: '.+' is used uninitialized in this function"] }, { 'category':'ld', 'severity':severity.MEDIUM, 'members':[], 'option':'-fshort-enums', 'description':'ld: possible enum size mismatch', 'patterns':[r".*: warning: .* uses variable-size enums yet the output is to use 32-bit enums; use of enum values across objects may fail"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wpointer-sign', 'description':'Pointer targets differ in signedness', 'patterns':[r".*: warning: pointer targets in initialization differ in signedness", r".*: warning: pointer targets in assignment differ in signedness", r".*: warning: pointer targets in return differ in signedness", r".*: warning: pointer targets in passing argument [0-9]+ of '.+' differ in signedness"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wstrict-overflow', 'description':'Assuming overflow does not occur', 'patterns':[r".*: warning: assuming signed overflow does not occur when assuming that .* is always (true|false)"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wempty-body', 'description':'Suggest adding braces around empty body', 'patterns':[r".*: warning: suggest braces around empty body in an 'if' statement", r".*: warning: empty body in an if-statement", r".*: warning: suggest braces around empty body in an 'else' statement", r".*: warning: empty body in an else-statement"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wparentheses', 'description':'Suggest adding parentheses', 'patterns':[r".*: warning: suggest explicit braces to avoid ambiguous 'else'", r".*: warning: suggest parentheses around arithmetic in operand of '.+'", r".*: warning: suggest parentheses around comparison in operand of '.+'", r".*: warning: suggest parentheses around '.+?' .+ '.+?'", r".*: warning: suggest parentheses around assignment used as truth value"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Static variable used in non-static inline function', 'patterns':[r".*: warning: '.+' is static but used in inline function '.+' which is not static"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wimplicit int', 'description':'No type or storage class (will default to int)', 'patterns':[r".*: warning: data definition has no type or storage class"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: type defaults to 'int' in declaration of '.+'"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: parameter names \(without types\) in function declaration"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wstrict-aliasing', 'description':'Dereferencing &lt;foo&gt; breaks strict aliasing rules', 'patterns':[r".*: warning: dereferencing .* break strict-aliasing rules"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wpointer-to-int-cast', 'description':'Cast from pointer to integer of different size', 'patterns':[r".*: warning: cast from pointer to integer of different size"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wint-to-pointer-cast', 'description':'Cast to pointer from integer of different size', 'patterns':[r".*: warning: cast to pointer from integer of different size"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Symbol redefined', 'patterns':[r".*: warning: "".+"" redefined"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: this is the location of the previous definition"] }, { 'category':'ld', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'ld: type and size of dynamic symbol are not defined', 'patterns':[r".*: warning: type and size of dynamic symbol `.+' are not defined"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Pointer from integer without cast', 'patterns':[r".*: warning: assignment makes pointer from integer without a cast"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Pointer from integer without cast', 'patterns':[r".*: warning: passing argument [0-9]+ of '.+' makes pointer from integer without a cast"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Integer from pointer without cast', 'patterns':[r".*: warning: assignment makes integer from pointer without a cast"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Integer from pointer without cast', 'patterns':[r".*: warning: passing argument [0-9]+ of '.+' makes integer from pointer without a cast"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Integer from pointer without cast', 'patterns':[r".*: warning: return makes integer from pointer without a cast"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wunknown-pragmas', 'description':'Ignoring pragma', 'patterns':[r".*: warning: ignoring #pragma .+"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wclobbered', 'description':'Variable might be clobbered by longjmp or vfork', 'patterns':[r".*: warning: variable '.+' might be clobbered by 'longjmp' or 'vfork'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wclobbered', 'description':'Argument might be clobbered by longjmp or vfork', 'patterns':[r".*: warning: argument '.+' might be clobbered by 'longjmp' or 'vfork'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wredundant-decls', 'description':'Redundant declaration', 'patterns':[r".*: warning: redundant redeclaration of '.+'"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: previous declaration of '.+' was here"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wswitch-enum', 'description':'Enum value not handled in switch', 'patterns':[r".*: warning: enumeration value '.+' not handled in switch"] }, { 'category':'java', 'severity':severity.MEDIUM, 'members':[], 'option':'-encoding', 'description':'Java: Non-ascii characters used, but ascii encoding specified', 'patterns':[r".*: warning: unmappable character for encoding ascii"] }, { 'category':'java', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Java: Non-varargs call of varargs method with inexact argument type for last parameter', 'patterns':[r".*: warning: non-varargs call of varargs method with inexact argument type for last parameter"] }, { 'category':'aapt', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'aapt: No default translation', 'patterns':[r".*: warning: string '.+' has no default translation in .*"] }, { 'category':'aapt', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'aapt: Missing default or required localization', 'patterns':[r".*: warning: \*\*\*\* string '.+' has no default or required localization for '.+' in .+"] }, { 'category':'aapt', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'aapt: String marked untranslatable, but translation exists', 'patterns':[r".*: warning: string '.+' in .* marked untranslatable but exists in locale '??_??'"] }, { 'category':'aapt', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'aapt: empty span in string', 'patterns':[r".*: warning: empty '.+' span found in text '.+"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Taking address of temporary', 'patterns':[r".*: warning: taking address of temporary"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Possible broken line continuation', 'patterns':[r".*: warning: backslash and newline separated by space"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Warray-bounds', 'description':'Array subscript out of bounds', 'patterns':[r".*: warning: array subscript is above array bounds", r".*: warning: array subscript is below array bounds"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Decimal constant is unsigned only in ISO C90', 'patterns':[r".*: warning: this decimal constant is unsigned only in ISO C90"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wmain', 'description':'main is usually a function', 'patterns':[r".*: warning: 'main' is usually a function"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Typedef ignored', 'patterns':[r".*: warning: 'typedef' was ignored in this declaration"] }, { 'category':'C/C++', 'severity':severity.HIGH, 'members':[], 'option':'-Waddress', 'description':'Address always evaluates to true', 'patterns':[r".*: warning: the address of '.+' will always evaluate as 'true'"] }, { 'category':'C/C++', 'severity':severity.FIXMENOW, 'members':[], 'option':'', 'description':'Freeing a non-heap object', 'patterns':[r".*: warning: attempt to free a non-heap object '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wchar-subscripts', 'description':'Array subscript has type char', 'patterns':[r".*: warning: array subscript has type 'char'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Constant too large for type', 'patterns':[r".*: warning: integer constant is too large for '.+' type"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Woverflow', 'description':'Constant too large for type, truncated', 'patterns':[r".*: warning: large integer implicitly truncated to unsigned type"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Woverflow', 'description':'Overflow in implicit constant conversion', 'patterns':[r".*: warning: overflow in implicit constant conversion"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Declaration does not declare anything', 'patterns':[r".*: warning: declaration 'class .+' does not declare anything"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wreorder', 'description':'Initialization order will be different', 'patterns':[r".*: warning: '.+' will be initialized after"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: '.+'"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: base '.+'"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: when initialized here"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wmissing-parameter-type', 'description':'Parameter type not specified', 'patterns':[r".*: warning: type of '.+' defaults to 'int'"] }, { 'category':'gcc', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Invalid option for C file', 'patterns':[r".*: warning: command line option "".+"" is valid for C\+\+\/ObjC\+\+ but not for C"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'User warning', 'patterns':[r".*: warning: #warning "".+"""] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wextra', 'description':'Dereferencing void*', 'patterns':[r".*: warning: dereferencing 'void \*' pointer"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wextra', 'description':'Comparison of pointer to zero', 'patterns':[r".*: warning: ordered comparison of pointer with integer zero"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wwrite-strings', 'description':'Conversion of string constant to non-const char*', 'patterns':[r".*: warning: deprecated conversion from string constant to '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wstrict-prototypes', 'description':'Function declaration isn''t a prototype', 'patterns':[r".*: warning: function declaration isn't a prototype"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wignored-qualifiers', 'description':'Type qualifiers ignored on function return value', 'patterns':[r".*: warning: type qualifiers ignored on function return type"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'&lt;foo&gt; declared inside parameter list, scope limited to this definition', 'patterns':[r".*: warning: '.+' declared inside parameter list"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: its scope is only this definition or declaration, which is probably not what you want"] }, { 'category':'C/C++', 'severity':severity.LOW, 'members':[], 'option':'-Wcomment', 'description':'Line continuation inside comment', 'patterns':[r".*: warning: multi-line comment"] }, { 'category':'C/C++', 'severity':severity.LOW, 'members':[], 'option':'-Wcomment', 'description':'Comment inside comment', 'patterns':[r".*: warning: "".+"" within comment"] }, { 'category':'C/C++', 'severity':severity.HARMLESS, 'members':[], 'option':'', 'description':'Extra tokens after #endif', 'patterns':[r".*: warning: extra tokens at end of #endif directive"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wenum-compare', 'description':'Comparison between different enums', 'patterns':[r".*: warning: comparison between 'enum .+' and 'enum .+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wconversion', 'description':'Implicit conversion of negative number to unsigned type', 'patterns':[r".*: warning: converting negative value '.+' to '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Passing NULL as non-pointer argument', 'patterns':[r".*: warning: passing NULL to non-pointer argument [0-9]+ of '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wctor-dtor-privacy', 'description':'Class seems unusable because of private ctor/dtor' , 'patterns':[r".*: warning: all member functions in class '.+' are private"] }, # skip this next one, because it only points out some RefBase-based classes where having a private destructor is perfectly fine { 'category':'C/C++', 'severity':severity.SKIP, 'members':[], 'option':'-Wctor-dtor-privacy', 'description':'Class seems unusable because of private ctor/dtor' , 'patterns':[r".*: warning: 'class .+' only defines a private destructor and has no friends"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wctor-dtor-privacy', 'description':'Class seems unusable because of private ctor/dtor' , 'patterns':[r".*: warning: 'class .+' only defines private constructors and has no friends"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wpointer-arith', 'description':'void* used in arithmetic' , 'patterns':[r".*: warning: pointer of type 'void \*' used in (arithmetic|subtraction)", r".*: warning: wrong type argument to increment"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'-Wsign-promo', 'description':'Overload resolution chose to promote from unsigned or enum to signed type' , 'patterns':[r".*: warning: passing '.+' chooses 'int' over '.* int'"] }, { 'category':'cont.', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: in call to '.+'"] }, { 'category':'C/C++', 'severity':severity.HIGH, 'members':[], 'option':'-Wextra', 'description':'Base should be explicitly initialized in copy constructor', 'patterns':[r".*: warning: base class '.+' should be explicitly initialized in the copy constructor"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Converting from <type> to <other type>', 'patterns':[r".*: warning: converting to '.+' from '.+'"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Return value from void function', 'patterns':[r".*: warning: 'return' with a value, in function returning void"] }, { 'category':'C/C++', 'severity':severity.LOW, 'members':[], 'option':'', 'description':'Useless specifier', 'patterns':[r".*: warning: useless storage class specifier in empty declaration"] }, { 'category':'logtags', 'severity':severity.LOW, 'members':[], 'option':'', 'description':'Duplicate logtag', 'patterns':[r".*: warning: tag "".+"" \(None\) duplicated in .+"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Operator new returns NULL', 'patterns':[r".*: warning: 'operator new' must not return NULL unless it is declared 'throw\(\)' .+"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'NULL used in arithmetic', 'patterns':[r".*: warning: NULL used in arithmetic"] }, { 'category':'C/C++', 'severity':severity.MEDIUM, 'members':[], 'option':'', 'description':'Use of deprecated method', 'patterns':[r".*: warning: '.+' is deprecated .+"] }, # these next ones are to deal with formatting problems resulting from the log being mixed up by 'make -j' { 'category':'C/C++', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: ,$"] }, { 'category':'C/C++', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: $"] }, { 'category':'C/C++', 'severity':severity.SKIP, 'members':[], 'option':'', 'description':'', 'patterns':[r".*: warning: In file included from .+,"] }, # catch-all for warnings this script doesn't know about yet { 'category':'C/C++', 'severity':severity.UNKNOWN, 'members':[], 'option':'', 'description':'Unclassified/unrecognized warnings', 'patterns':[r".*: warning: .+"] }, ] anchor = 0 cur_row_color = 0 row_colors = [ 'e0e0e0', 'd0d0d0' ] def output(text): print text, def htmlbig(param): return '<font size="+2">' + param + '</font>' def dumphtmlprologue(title): output('<html>\n<head>\n<title>' + title + '</title>\n<body>\n') output(htmlbig(title)) output('<p>\n') def tablerow(text): global cur_row_color output('<tr bgcolor="' + row_colors[cur_row_color] + '"><td colspan="2">',) cur_row_color = 1 - cur_row_color output(text,) output('</td></tr>') def begintable(text, backgroundcolor): global anchor output('<table border="1" rules="cols" frame="box" width="100%" bgcolor="black"><tr bgcolor="' + backgroundcolor + '"><a name="anchor' + str(anchor) + '"><td>') output(htmlbig(text[0]) + '<br>') for i in text[1:]: output(i + '<br>') output('</td>') output('<td width="100" bgcolor="grey"><a align="right" href="#anchor' + str(anchor-1) + '">previous</a><br><a align="right" href="#anchor' + str(anchor+1) + '">next</a>') output('</td></a></tr>') anchor += 1 def endtable(): output('</table><p>') # dump some stats about total number of warnings and such def dumpstats(): known = 0 unknown = 0 for i in warnpatterns: if i['severity'] == severity.UNKNOWN: unknown += len(i['members']) elif i['severity'] != severity.SKIP: known += len(i['members']) output('Number of classified warnings: <b>' + str(known) + '</b><br>' ) output('Number of unclassified warnings: <b>' + str(unknown) + '</b><br>') total = unknown + known output('Total number of warnings: <b>' + str(total) + '</b>') if total < 1000: output('(low count may indicate incremental build)') output('<p>') def allpatterns(cat): pats = '' for i in cat['patterns']: pats += i pats += ' / ' return pats def descriptionfor(cat): if cat['description'] != '': return cat['description'] return allpatterns(cat) # show which warnings no longer occur def dumpfixed(): tablestarted = False for i in warnpatterns: if len(i['members']) == 0 and i['severity'] != severity.SKIP: if tablestarted == False: tablestarted = True begintable(['Fixed warnings', 'No more occurences. Please consider turning these in to errors if possible, before they are reintroduced in to the build'], 'blue') tablerow(i['description'] + ' (' + allpatterns(i) + ') ' + i['option']) if tablestarted: endtable() # dump a category, provided it is not marked as 'SKIP' and has more than 0 occurrences def dumpcategory(cat): if cat['severity'] != severity.SKIP and len(cat['members']) != 0: header = [descriptionfor(cat),str(len(cat['members'])) + ' occurences:'] if cat['option'] != '': header[1:1] = [' (related option: ' + cat['option'] +')'] begintable(header, colorforseverity(cat['severity'])) for i in cat['members']: tablerow(i) endtable() # dump everything for a given severity def dumpseverity(sev): for i in warnpatterns: if i['severity'] == sev: dumpcategory(i) def classifywarning(line): for i in warnpatterns: for cpat in i['compiledpatterns']: if cpat.match(line): i['members'].append(line) return else: # If we end up here, there was a problem parsing the log # probably caused by 'make -j' mixing the output from # 2 or more concurrent compiles pass # precompiling every pattern speeds up parsing by about 30x def compilepatterns(): for i in warnpatterns: i['compiledpatterns'] = [] for pat in i['patterns']: i['compiledpatterns'].append(re.compile(pat)) infile = open(sys.argv[1], 'r') warnings = [] platformversion = 'unknown' targetproduct = 'unknown' targetvariant = 'unknown' linecounter = 0 warningpattern = re.compile('.* warning:.*') compilepatterns() # read the log file and classify all the warnings lastmatchedline = '' for line in infile: # replace fancy quotes with plain ol' quotes line = line.replace("‘", "'"); line = line.replace("’", "'"); if warningpattern.match(line): if line != lastmatchedline: classifywarning(line) lastmatchedline = line else: # save a little bit of time by only doing this for the first few lines if linecounter < 50: linecounter +=1 m = re.search('(?<=^PLATFORM_VERSION=).*', line) if m != None: platformversion = m.group(0) m = re.search('(?<=^TARGET_PRODUCT=).*', line) if m != None: targetproduct = m.group(0) m = re.search('(?<=^TARGET_BUILD_VARIANT=).*', line) if m != None: targetvariant = m.group(0) # dump the html output to stdout dumphtmlprologue('Warnings for ' + platformversion + ' - ' + targetproduct + ' - ' + targetvariant) dumpstats() dumpseverity(severity.FIXMENOW) dumpseverity(severity.HIGH) dumpseverity(severity.MEDIUM) dumpseverity(severity.LOW) dumpseverity(severity.HARMLESS) dumpseverity(severity.UNKNOWN) dumpfixed()
Python
#!/usr/bin/env python # vim: ts=2 sw=2 import optparse import re import sys class Dependency: def __init__(self, tgt): self.tgt = tgt self.pos = "" self.prereqs = set() self.visit = 0 def add(self, prereq): self.prereqs.add(prereq) class Dependencies: def __init__(self): self.lines = {} self.__visit = 0 self.count = 0 def add(self, tgt, prereq): t = self.lines.get(tgt) if not t: t = Dependency(tgt) self.lines[tgt] = t p = self.lines.get(prereq) if not p: p = Dependency(prereq) self.lines[prereq] = p t.add(p) self.count = self.count + 1 def setPos(self, tgt, pos): t = self.lines.get(tgt) if not t: t = Dependency(tgt) self.lines[tgt] = t t.pos = pos def get(self, tgt): if self.lines.has_key(tgt): return self.lines[tgt] else: return None def __iter__(self): return self.lines.iteritems() def trace(self, tgt, prereq): self.__visit = self.__visit + 1 d = self.lines.get(tgt) if not d: return return self.__trace(d, prereq) def __trace(self, d, prereq): if d.visit == self.__visit: return d.trace if d.tgt == prereq: return [ [ d ], ] d.visit = self.__visit result = [] for pre in d.prereqs: recursed = self.__trace(pre, prereq) for r in recursed: result.append([ d ] + r) d.trace = result return result def help(): print "Commands:" print " dep TARGET Print the prerequisites for TARGET" print " trace TARGET PREREQ Print the paths from TARGET to PREREQ" def main(argv): opts = optparse.OptionParser() opts.add_option("-i", "--interactive", action="store_true", dest="interactive", help="Interactive mode") (options, args) = opts.parse_args() deps = Dependencies() filename = args[0] print "Reading %s" % filename if True: f = open(filename) for line in f: line = line.strip() if len(line) > 0: if line[0] == '#': pos,tgt = line.rsplit(":", 1) pos = pos[1:].strip() tgt = tgt.strip() deps.setPos(tgt, pos) else: (tgt,prereq) = line.split(':', 1) tgt = tgt.strip() prereq = prereq.strip() deps.add(tgt, prereq) f.close() print "Read %d dependencies. %d targets." % (deps.count, len(deps.lines)) while True: line = raw_input("target> ") if not line.strip(): continue split = line.split() cmd = split[0] if len(split) == 2 and cmd == "dep": tgt = split[1] d = deps.get(tgt) if d: for prereq in d.prereqs: print prereq.tgt elif len(split) == 3 and cmd == "trace": tgt = split[1] prereq = split[2] if False: print "from %s to %s" % (tgt, prereq) trace = deps.trace(tgt, prereq) if trace: width = 0 for g in trace: for t in g: if len(t.tgt) > width: width = len(t.tgt) for g in trace: for t in g: if t.pos: print t.tgt, " " * (width-len(t.tgt)), " #", t.pos else: print t.tgt print else: help() if __name__ == "__main__": try: main(sys.argv) except KeyboardInterrupt: print except EOFError: print
Python
#!/usr/bin/env python # # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import cgi, os, string, sys def IsDifferent(row): val = None for v in row: if v: if not val: val = v else: if val != v: return True return False def main(argv): inputs = argv[1:] data = {} index = 0 for input in inputs: f = file(input, "r") lines = f.readlines() f.close() lines = map(string.split, lines) lines = map(lambda (x,y): (y,int(x)), lines) for fn,sz in lines: if not data.has_key(fn): data[fn] = {} data[fn][index] = sz index = index + 1 rows = [] for fn,sizes in data.iteritems(): row = [fn] for i in range(0,index): if sizes.has_key(i): row.append(sizes[i]) else: row.append(None) rows.append(row) rows = sorted(rows, key=lambda x: x[0]) print """<html> <head> <style type="text/css"> .fn, .sz, .z, .d { padding-left: 10px; padding-right: 10px; } .sz, .z, .d { text-align: right; } .fn { background-color: #ffffdd; } .sz { background-color: #ffffcc; } .z { background-color: #ffcccc; } .d { background-color: #99ccff; } </style> </head> <body> """ print "<table>" print "<tr>" for input in inputs: combo = input.split(os.path.sep)[1] print " <td class='fn'>%s</td>" % cgi.escape(combo) print "</tr>" for row in rows: print "<tr>" for sz in row[1:]: if not sz: print " <td class='z'>&nbsp;</td>" elif IsDifferent(row[1:]): print " <td class='d'>%d</td>" % sz else: print " <td class='sz'>%d</td>" % sz print " <td class='fn'>%s</td>" % cgi.escape(row[0]) print "</tr>" print "</table>" print "</body></html>" if __name__ == '__main__': main(sys.argv)
Python
#!/usr/bin/env python # # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Usage: generate-notice-files [plain text output file] [html output file] [file title] [directory of notices] Generate the Android notice files, including both text and html files. -h to display this usage message and exit. """ from collections import defaultdict import getopt import hashlib import itertools import os import os.path import re import sys MD5_BLOCKSIZE = 1024 * 1024 HTML_ESCAPE_TABLE = { "&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;", } try: opts, args = getopt.getopt(sys.argv[1:], "h") except getopt.GetoptError, err: print str(err) print __doc__ sys.exit(2) for o, a in opts: if o == "-h": print __doc__ sys.exit(2) else: print >> sys.stderr, "unhandled option %s" % (o,) if len(args) != 4: print """need exactly four arguments, the two output files, the file title and the directory containing notices, not %d""" % (len(args),) print __doc__ sys.exit(1) def hexify(s): return ("%02x"*len(s)) % tuple(map(ord, s)) def md5sum(filename): """Calculate an MD5 of the file given by FILENAME, and return hex digest as a string. Output should be compatible with md5sum command""" f = open(filename, "rb") sum = hashlib.md5() while 1: block = f.read(MD5_BLOCKSIZE) if not block: break sum.update(block) f.close() return hexify(sum.digest()) def html_escape(text): """Produce entities within text.""" return "".join(HTML_ESCAPE_TABLE.get(c,c) for c in text) HTML_OUTPUT_CSS=""" <style type="text/css"> body { padding: 0; font-family: sans-serif; } .same-license { background-color: #eeeeee; border-top: 20px solid white; padding: 10px; } .label { font-weight: bold; } .file-list { margin-left: 1em; color: blue; } </style> """ def combine_notice_files_html(file_hash, input_dir, output_filename): """Combine notice files in FILE_HASH and output a HTML version to OUTPUT_FILENAME.""" SRC_DIR_STRIP_RE = re.compile(input_dir + "(/.*).txt") # Set up a filename to row id table (anchors inside tables don't work in # most browsers, but href's to table row ids do) id_table = {} id_count = 0 for value in file_hash.values(): for filename in value: id_table[filename] = id_count id_count += 1 # Open the output file, and output the header pieces output_file = open(output_filename, "wb") print >> output_file, "<html><head>" print >> output_file, HTML_OUTPUT_CSS print >> output_file, '</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">' # Output our table of contents print >> output_file, '<div class="toc">' print >> output_file, "<ul>" # Flatten the list of lists into a single list of filenames sorted_filenames = sorted(itertools.chain.from_iterable(file_hash.values())) # Print out a nice table of contents for filename in sorted_filenames: stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename) print >> output_file, '<li><a href="#id%d">%s</a></li>' % (id_table.get(filename), stripped_filename) print >> output_file, "</ul>" print >> output_file, "</div><!-- table of contents -->" # Output the individual notice file lists print >>output_file, '<table cellpadding="0" cellspacing="0" border="0">' for value in file_hash.values(): print >> output_file, '<tr id="id%d"><td class="same-license">' % id_table.get(value[0]) print >> output_file, '<div class="label">Notices for file(s):</div>' print >> output_file, '<div class="file-list">' for filename in sorted(value): print >> output_file, "%s <br/>" % (SRC_DIR_STRIP_RE.sub(r"\1", filename)) print >> output_file, "</div><!-- file-list -->" print >> output_file print >> output_file, '<pre class="license-text">' print >> output_file, html_escape(open(value[0]).read()) print >> output_file, "</pre><!-- license-text -->" print >> output_file, "</td></tr><!-- same-license -->" print >> output_file print >> output_file print >> output_file # Finish off the file output print >> output_file, "</table>" print >> output_file, "</body></html>" output_file.close() def combine_notice_files_text(file_hash, input_dir, output_filename, file_title): """Combine notice files in FILE_HASH and output a text version to OUTPUT_FILENAME.""" SRC_DIR_STRIP_RE = re.compile(input_dir + "(/.*).txt") output_file = open(output_filename, "wb") print >> output_file, file_title for value in file_hash.values(): print >> output_file, "============================================================" print >> output_file, "Notices for file(s):" for filename in sorted(value): print >> output_file, SRC_DIR_STRIP_RE.sub(r"\1", filename) print >> output_file, "------------------------------------------------------------" print >> output_file, open(value[0]).read() output_file.close() def main(args): txt_output_file = args[0] html_output_file = args[1] file_title = args[2] # Find all the notice files and md5 them input_dir = os.path.normpath(args[3]) files_with_same_hash = defaultdict(list) for root, dir, files in os.walk(input_dir): for file in files: if file.endswith(".txt"): filename = os.path.join(root, file) file_md5sum = md5sum(filename) files_with_same_hash[file_md5sum].append(filename) print "Combining NOTICE files into HTML" combine_notice_files_html(files_with_same_hash, input_dir, html_output_file) print "Combining NOTICE files into text" combine_notice_files_text(files_with_same_hash, input_dir, txt_output_file, file_title) if __name__ == "__main__": main(args)
Python
#!/usr/bin/env python # vim: ts=2 sw=2 nocindent import re import sys def choose_regex(regs, line): for func,reg in regs: m = reg.match(line) if m: return (func,m) return (None,None) def gather(included, deps): result = set() for inc in included: result.add(inc) for d in deps: if inc == d[1]: result.add(d[0]) return result def main(): deps = [] infos = [] def dependency(m): deps.append((m.group(1), m.group(2))) def info(m): infos.append((m.group(1), m.group(2))) REGS = [ (dependency, re.compile(r'"(.*)"\s*->\s*"(.*)"')), (info, re.compile(r'"(.*)"(\s*\[.*\])')), ] lines = sys.stdin.readlines() lines = [line.strip() for line in lines] for line in lines: func,m = choose_regex(REGS, line) if func: func(m) # filter sys.stderr.write("argv: " + str(sys.argv) + "\n") if not (len(sys.argv) == 2 and sys.argv[1] == "--all"): targets = sys.argv[1:] included = set(targets) prevLen = -1 while prevLen != len(included): prevLen = len(included) included = gather(included, deps) deps = [dep for dep in deps if dep[1] in included] infos = [info for info in infos if info[0] in included] print "digraph {" print "graph [ ratio=.5 ];" for dep in deps: print '"%s" -> "%s"' % dep for info in infos: print '"%s"%s' % info print "}" if __name__ == "__main__": main()
Python
#!/usr/bin/env python # # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys # Put the modifications that you need to make into the /system/build.prop into this # function. The prop object has get(name) and put(name,value) methods. def mangle_build_prop(prop): pass # Put the modifications that you need to make into the /system/build.prop into this # function. The prop object has get(name) and put(name,value) methods. def mangle_default_prop(prop): # If ro.debuggable is 1, then enable adb on USB by default # (this is for userdebug builds) if prop.get("ro.debuggable") == "1": val = prop.get("persist.sys.usb.config") if val == "": val = "adb" else: val = val + ",adb" prop.put("persist.sys.usb.config", val) # UsbDeviceManager expects a value here. If it doesn't get it, it will # default to "adb". That might not the right policy there, but it's better # to be explicit. if not prop.get("persist.sys.usb.config"): prop.put("persist.sys.usb.config", "none"); class PropFile: def __init__(self, lines): self.lines = [s[:-1] for s in lines] def get(self, name): key = name + "=" for line in self.lines: if line.startswith(key): return line[len(key):] return "" def put(self, name, value): key = name + "=" for i in range(0,len(self.lines)): if self.lines[i].startswith(key): self.lines[i] = key + value return self.lines.append(key + value) def write(self, f): f.write("\n".join(self.lines)) f.write("\n") def main(argv): filename = argv[1] f = open(filename) lines = f.readlines() f.close() properties = PropFile(lines) if filename.endswith("/build.prop"): mangle_build_prop(properties) elif filename.endswith("/default.prop"): mangle_default_prop(properties) else: sys.stderr.write("bad command line: " + str(argv) + "\n") sys.exit(1) f = open(filename, 'w+') properties.write(f) f.close() if __name__ == "__main__": main(sys.argv)
Python
#!/usr/bin/env python # # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os try: from hashlib import sha1 except ImportError: from sha import sha as sha1 if len(sys.argv) < 2: sys.exit(0) build_info = {} f = open(sys.argv[1]) for line in f: line = line.strip() if line.startswith("require"): key, value = line.split()[1].split("=", 1) build_info[key] = value f.close() bad = False for item in sys.argv[2:]: key, fn = item.split(":", 1) values = build_info.get(key, None) if not values: continue values = values.split("|") f = open(fn, "rb") digest = sha1(f.read()).hexdigest() f.close() versions = {} try: f = open(fn + ".sha1") except IOError: if not bad: print print "*** Error opening \"%s.sha1\"; can't verify %s" % (fn, key) bad = True continue for line in f: line = line.strip() if not line or line.startswith("#"): continue h, v = line.split() versions[h] = v if digest not in versions: if not bad: print print "*** SHA-1 hash of \"%s\" doesn't appear in \"%s.sha1\"" % (fn, fn) bad = True continue if versions[digest] not in values: if not bad: print print "*** \"%s\" is version %s; not any %s allowed by \"%s\"." % ( fn, versions[digest], key, sys.argv[1]) bad = True if bad: print sys.exit(1)
Python
# Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module for reading and parsing event-log-tags files.""" import re import sys class Tag(object): __slots__ = ["tagnum", "tagname", "description", "filename", "linenum"] def __init__(self, tagnum, tagname, description, filename, linenum): self.tagnum = tagnum self.tagname = tagname self.description = description self.filename = filename self.linenum = linenum class TagFile(object): """Read an input event-log-tags file.""" def AddError(self, msg, linenum=None): if linenum is None: linenum = self.linenum self.errors.append((self.filename, linenum, msg)) def AddWarning(self, msg, linenum=None): if linenum is None: linenum = self.linenum self.warnings.append((self.filename, linenum, msg)) def __init__(self, filename, file_object=None): """'filename' is the name of the file (included in any error messages). If 'file_object' is None, 'filename' will be opened for reading.""" self.errors = [] self.warnings = [] self.tags = [] self.options = {} self.filename = filename self.linenum = 0 if file_object is None: try: file_object = open(filename, "rb") except (IOError, OSError), e: self.AddError(str(e)) return try: for self.linenum, line in enumerate(file_object): self.linenum += 1 line = line.strip() if not line or line[0] == '#': continue parts = re.split(r"\s+", line, 2) if len(parts) < 2: self.AddError("failed to parse \"%s\"" % (line,)) continue if parts[0] == "option": self.options[parts[1]] = parts[2:] continue if parts[0] == "?": tag = None else: try: tag = int(parts[0]) except ValueError: self.AddError("\"%s\" isn't an integer tag or '?'" % (parts[0],)) continue tagname = parts[1] if len(parts) == 3: description = parts[2] else: description = None if description: # EventLog.java checks that the description field is # surrounded by parens, so we should too (to avoid a runtime # crash from badly-formatted descriptions). if not re.match(r"\(.*\)\s*$", description): self.AddError("tag \"%s\" has unparseable description" % (tagname,)) continue self.tags.append(Tag(tag, tagname, description, self.filename, self.linenum)) except (IOError, OSError), e: self.AddError(str(e)) def BooleanFromString(s): """Interpret 's' as a boolean and return its value. Raise ValueError if it's not something we can interpret as true or false.""" s = s.lower() if s in ("true", "t", "1", "on", "yes", "y"): return True if s in ("false", "f", "0", "off", "no", "n"): return False raise ValueError("'%s' not a valid boolean" % (s,)) def WriteOutput(output_file, data): """Write 'data' to the given output filename (which may be None to indicate stdout). Emit an error message and die on any failure. 'data' may be a string or a StringIO object.""" if not isinstance(data, str): data = data.getvalue() try: if output_file is None: out = sys.stdout output_file = "<stdout>" else: out = open(output_file, "wb") out.write(data) out.close() except (IOError, OSError), e: print >> sys.stderr, "failed to write %s: %s" % (output_file, e) sys.exit(1)
Python
# Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import common class EdifyGenerator(object): """Class to generate scripts in the 'edify' recovery script language used from donut onwards.""" def __init__(self, version, info): self.script = [] self.mounts = set() self.version = version self.info = info def MakeTemporary(self): """Make a temporary script object whose commands can latter be appended to the parent script with AppendScript(). Used when the caller wants to generate script commands out-of-order.""" x = EdifyGenerator(self.version, self.info) x.mounts = self.mounts return x @staticmethod def _WordWrap(cmd, linelen=80): """'cmd' should be a function call with null characters after each parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd to a given line length, replacing nulls with spaces and/or newlines to format it nicely.""" indent = cmd.index("(")+1 out = [] first = True x = re.compile("^(.{,%d})\0" % (linelen-indent,)) while True: if not first: out.append(" " * indent) first = False m = x.search(cmd) if not m: parts = cmd.split("\0", 1) out.append(parts[0]+"\n") if len(parts) == 1: break else: cmd = parts[1] continue out.append(m.group(1)+"\n") cmd = cmd[m.end():] return "".join(out).replace("\0", " ").rstrip("\n") def AppendScript(self, other): """Append the contents of another script (which should be created with temporary=True) to this one.""" self.script.extend(other.script) def AssertSomeFingerprint(self, *fp): """Assert that the current system build fingerprint is one of *fp.""" if not fp: raise ValueError("must specify some fingerprints") cmd = ('assert(' + ' ||\0'.join([('file_getprop("/system/build.prop", ' '"ro.build.fingerprint") == "%s"') % i for i in fp]) + ');') self.script.append(self._WordWrap(cmd)) def AssertOlderBuild(self, timestamp): """Assert that the build on the device is older (or the same as) the given timestamp.""" self.script.append(('assert(!less_than_int(%s, ' 'getprop("ro.build.date.utc")));') % (timestamp,)) def AssertDevice(self, device): """Assert that the device identifier is the given string.""" cmd = ('assert(getprop("ro.product.device") == "%s" ||\0' 'getprop("ro.build.product") == "%s");' % (device, device)) self.script.append(self._WordWrap(cmd)) def AssertSomeBootloader(self, *bootloaders): """Asert that the bootloader version is one of *bootloaders.""" cmd = ("assert(" + " ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,) for b in bootloaders]) + ");") self.script.append(self._WordWrap(cmd)) def ShowProgress(self, frac, dur): """Update the progress bar, advancing it over 'frac' over the next 'dur' seconds. 'dur' may be zero to advance it via SetProgress commands instead of by time.""" self.script.append("show_progress(%f, %d);" % (frac, int(dur))) def SetProgress(self, frac): """Set the position of the progress bar within the chunk defined by the most recent ShowProgress call. 'frac' should be in [0,1].""" self.script.append("set_progress(%f);" % (frac,)) def PatchCheck(self, filename, *sha1): """Check that the given file (or MTD reference) has one of the given *sha1 hashes, checking the version saved in cache if the file does not match.""" self.script.append('assert(apply_patch_check("%s"' % (filename,) + "".join([', "%s"' % (i,) for i in sha1]) + '));') def FileCheck(self, filename, *sha1): """Check that the given file (or MTD reference) has one of the given *sha1 hashes.""" self.script.append('assert(sha1_check(read_file("%s")' % (filename,) + "".join([', "%s"' % (i,) for i in sha1]) + '));') def CacheFreeSpaceCheck(self, amount): """Check that there's at least 'amount' space that can be made available on /cache.""" self.script.append("assert(apply_patch_space(%d));" % (amount,)) def Mount(self, mount_point): """Mount the partition with the given mount_point.""" fstab = self.info.get("fstab", None) if fstab: p = fstab[mount_point] self.script.append('mount("%s", "%s", "%s", "%s");' % (p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device, p.mount_point)) self.mounts.add(p.mount_point) def UnpackPackageDir(self, src, dst): """Unpack a given directory from the OTA package into the given destination directory.""" self.script.append('package_extract_dir("%s", "%s");' % (src, dst)) def Comment(self, comment): """Write a comment into the update script.""" self.script.append("") for i in comment.split("\n"): self.script.append("# " + i) self.script.append("") def Print(self, message): """Log a message to the screen (if the logs are visible).""" self.script.append('ui_print("%s");' % (message,)) def FormatPartition(self, partition): """Format the given partition, specified by its mount point (eg, "/system").""" reserve_size = 0 fstab = self.info.get("fstab", None) if fstab: p = fstab[partition] self.script.append('format("%s", "%s", "%s", "%s", "%s");' % (p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device, p.length, p.mount_point)) def DeleteFiles(self, file_list): """Delete all files in file_list.""" if not file_list: return cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");" self.script.append(self._WordWrap(cmd)) def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs): """Apply binary patches (in *patchpairs) to the given srcfile to produce tgtfile (which may be "-" to indicate overwriting the source file.""" if len(patchpairs) % 2 != 0 or len(patchpairs) == 0: raise ValueError("bad patches given to ApplyPatch") cmd = ['apply_patch("%s",\0"%s",\0%s,\0%d' % (srcfile, tgtfile, tgtsha1, tgtsize)] for i in range(0, len(patchpairs), 2): cmd.append(',\0%s, package_extract_file("%s")' % patchpairs[i:i+2]) cmd.append(');') cmd = "".join(cmd) self.script.append(self._WordWrap(cmd)) def WriteRawImage(self, mount_point, fn): """Write the given package file into the partition for the given mount point.""" fstab = self.info["fstab"] if fstab: p = fstab[mount_point] partition_type = common.PARTITION_TYPES[p.fs_type] args = {'device': p.device, 'fn': fn} if partition_type == "MTD": self.script.append( 'write_raw_image(package_extract_file("%(fn)s"), "%(device)s");' % args) elif partition_type == "EMMC": self.script.append( 'package_extract_file("%(fn)s", "%(device)s");' % args) else: raise ValueError("don't know how to write \"%s\" partitions" % (p.fs_type,)) def SetPermissions(self, fn, uid, gid, mode): """Set file ownership and permissions.""" self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn)) def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode): """Recursively set path ownership and permissions.""" self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");' % (uid, gid, dmode, fmode, fn)) def MakeSymlinks(self, symlink_list): """Create symlinks, given a list of (dest, link) pairs.""" by_dest = {} for d, l in symlink_list: by_dest.setdefault(d, []).append(l) for dest, links in sorted(by_dest.iteritems()): cmd = ('symlink("%s", ' % (dest,) + ",\0".join(['"' + i + '"' for i in sorted(links)]) + ");") self.script.append(self._WordWrap(cmd)) def AppendExtra(self, extra): """Append text verbatim to the output script.""" self.script.append(extra) def UnmountAll(self): for p in sorted(self.mounts): self.script.append('unmount("%s");' % (p,)) self.mounts = set() def AddToZip(self, input_zip, output_zip, input_path=None): """Write the accumulated script to the output_zip file. input_zip is used as the source for the 'updater' binary needed to run script. If input_path is not None, it will be used as a local path for the binary instead of input_zip.""" self.UnmountAll() common.ZipWriteStr(output_zip, "META-INF/com/google/android/updater-script", "\n".join(self.script) + "\n") if input_path is None: data = input_zip.read("OTA/bin/updater") else: data = open(os.path.join(input_path, "updater")).read() common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary", data, perms=0755)
Python
#!/usr/bin/env python # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Given a target-files zipfile, produces an image zipfile suitable for use with 'fastboot update'. Usage: img_from_target_files [flags] input_target_files output_image_zip -b (--board_config) <file> Deprecated. -z (--bootable_zip) Include only the bootable images (eg 'boot' and 'recovery') in the output. """ import sys if sys.hexversion < 0x02040000: print >> sys.stderr, "Python 2.4 or newer is required." sys.exit(1) import errno import os import re import shutil import subprocess import tempfile import zipfile # missing in Python 2.4 and before if not hasattr(os, "SEEK_SET"): os.SEEK_SET = 0 import build_image import common OPTIONS = common.OPTIONS def AddUserdata(output_zip): """Create an empty userdata image and store it in output_zip.""" print "creating userdata.img..." # The name of the directory it is making an image out of matters to # mkyaffs2image. So we create a temp dir, and within it we create an # empty dir named "data", and build the image from that. temp_dir = tempfile.mkdtemp() user_dir = os.path.join(temp_dir, "data") os.mkdir(user_dir) img = tempfile.NamedTemporaryFile() image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "data") fstab = OPTIONS.info_dict["fstab"] if fstab: image_props["fs_type" ] = fstab["/data"].fs_type succ = build_image.BuildImage(user_dir, image_props, img.name) assert succ, "build userdata.img image failed" common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict) output_zip.write(img.name, "userdata.img") img.close() os.rmdir(user_dir) os.rmdir(temp_dir) def AddCache(output_zip): """Create an empty cache image and store it in output_zip.""" image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache") # The build system has to explicitly request for cache.img. if "fs_type" not in image_props: return print "creating cache.img..." # The name of the directory it is making an image out of matters to # mkyaffs2image. So we create a temp dir, and within it we create an # empty dir named "cache", and build the image from that. temp_dir = tempfile.mkdtemp() user_dir = os.path.join(temp_dir, "cache") os.mkdir(user_dir) img = tempfile.NamedTemporaryFile() fstab = OPTIONS.info_dict["fstab"] if fstab: image_props["fs_type" ] = fstab["/cache"].fs_type succ = build_image.BuildImage(user_dir, image_props, img.name) assert succ, "build cache.img image failed" common.CheckSize(img.name, "cache.img", OPTIONS.info_dict) output_zip.write(img.name, "cache.img") img.close() os.rmdir(user_dir) os.rmdir(temp_dir) def AddSystem(output_zip): """Turn the contents of SYSTEM into a system image and store it in output_zip.""" print "creating system.img..." img = tempfile.NamedTemporaryFile() # The name of the directory it is making an image out of matters to # mkyaffs2image. It wants "system" but we have a directory named # "SYSTEM", so create a symlink. try: os.symlink(os.path.join(OPTIONS.input_tmp, "SYSTEM"), os.path.join(OPTIONS.input_tmp, "system")) except OSError, e: # bogus error on my mac version? # File "./build/tools/releasetools/img_from_target_files", line 86, in AddSystem # os.path.join(OPTIONS.input_tmp, "system")) # OSError: [Errno 17] File exists if (e.errno == errno.EEXIST): pass image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "system") fstab = OPTIONS.info_dict["fstab"] if fstab: image_props["fs_type" ] = fstab["/system"].fs_type succ = build_image.BuildImage(os.path.join(OPTIONS.input_tmp, "system"), image_props, img.name) assert succ, "build system.img image failed" img.seek(os.SEEK_SET, 0) data = img.read() img.close() common.CheckSize(data, "system.img", OPTIONS.info_dict) common.ZipWriteStr(output_zip, "system.img", data) def CopyInfo(output_zip): """Copy the android-info.txt file from the input to the output.""" output_zip.write(os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"), "android-info.txt") def main(argv): bootable_only = [False] def option_handler(o, a): if o in ("-b", "--board_config"): pass # deprecated if o in ("-z", "--bootable_zip"): bootable_only[0] = True else: return False return True args = common.ParseOptions(argv, __doc__, extra_opts="b:z", extra_long_opts=["board_config=", "bootable_zip"], extra_option_handler=option_handler) bootable_only = bootable_only[0] if len(args) != 2: common.Usage(__doc__) sys.exit(1) OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) OPTIONS.info_dict = common.LoadInfoDict(input_zip) output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED) common.GetBootableImage( "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT").AddToZip(output_zip) common.GetBootableImage( "recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY").AddToZip(output_zip) if not bootable_only: AddSystem(output_zip) AddUserdata(output_zip) AddCache(output_zip) CopyInfo(output_zip) print "cleaning up..." output_zip.close() shutil.rmtree(OPTIONS.input_tmp) print "done." if __name__ == '__main__': try: common.CloseInheritedPipes() main(sys.argv[1:]) except common.ExternalError, e: print print " ERROR: %s" % (e,) print sys.exit(1)
Python
#!/usr/bin/env python # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Signs all the APK files in a target-files zipfile, producing a new target-files zip. Usage: sign_target_files_apks [flags] input_target_files output_target_files -e (--extra_apks) <name,name,...=key> Add extra APK name/key pairs as though they appeared in apkcerts.txt (so mappings specified by -k and -d are applied). Keys specified in -e override any value for that app contained in the apkcerts.txt file. Option may be repeated to give multiple extra packages. -k (--key_mapping) <src_key=dest_key> Add a mapping from the key name as specified in apkcerts.txt (the src_key) to the real key you wish to sign the package with (dest_key). Option may be repeated to give multiple key mappings. -d (--default_key_mappings) <dir> Set up the following key mappings: $devkey/devkey ==> $dir/releasekey $devkey/testkey ==> $dir/releasekey $devkey/media ==> $dir/media $devkey/shared ==> $dir/shared $devkey/platform ==> $dir/platform where $devkey is the directory part of the value of default_system_dev_certificate from the input target-files's META/misc_info.txt. (Defaulting to "build/target/product/security" if the value is not present in misc_info. -d and -k options are added to the set of mappings in the order in which they appear on the command line. -o (--replace_ota_keys) Replace the certificate (public key) used by OTA package verification with the one specified in the input target_files zip (in the META/otakeys.txt file). Key remapping (-k and -d) is performed on this key. -t (--tag_changes) <+tag>,<-tag>,... Comma-separated list of changes to make to the set of tags (in the last component of the build fingerprint). Prefix each with '+' or '-' to indicate whether that tag should be added or removed. Changes are processed in the order they appear. Default value is "-test-keys,-dev-keys,+release-keys". """ import sys if sys.hexversion < 0x02040000: print >> sys.stderr, "Python 2.4 or newer is required." sys.exit(1) import cStringIO import copy import os import re import subprocess import tempfile import zipfile import common OPTIONS = common.OPTIONS OPTIONS.extra_apks = {} OPTIONS.key_map = {} OPTIONS.replace_ota_keys = False OPTIONS.tag_changes = ("-test-keys", "-dev-keys", "+release-keys") def GetApkCerts(tf_zip): certmap = common.ReadApkCerts(tf_zip) # apply the key remapping to the contents of the file for apk, cert in certmap.iteritems(): certmap[apk] = OPTIONS.key_map.get(cert, cert) # apply all the -e options, overriding anything in the file for apk, cert in OPTIONS.extra_apks.iteritems(): if not cert: cert = "PRESIGNED" certmap[apk] = OPTIONS.key_map.get(cert, cert) return certmap def CheckAllApksSigned(input_tf_zip, apk_key_map): """Check that all the APKs we want to sign have keys specified, and error out if they don't.""" unknown_apks = [] for info in input_tf_zip.infolist(): if info.filename.endswith(".apk"): name = os.path.basename(info.filename) if name not in apk_key_map: unknown_apks.append(name) if unknown_apks: print "ERROR: no key specified for:\n\n ", print "\n ".join(unknown_apks) print "\nUse '-e <apkname>=' to specify a key (which may be an" print "empty string to not sign this apk)." sys.exit(1) def SignApk(data, keyname, pw): unsigned = tempfile.NamedTemporaryFile() unsigned.write(data) unsigned.flush() signed = tempfile.NamedTemporaryFile() common.SignFile(unsigned.name, signed.name, keyname, pw, align=4) data = signed.read() unsigned.close() signed.close() return data def SignApks(input_tf_zip, output_tf_zip, apk_key_map, key_passwords): maxsize = max([len(os.path.basename(i.filename)) for i in input_tf_zip.infolist() if i.filename.endswith('.apk')]) for info in input_tf_zip.infolist(): data = input_tf_zip.read(info.filename) out_info = copy.copy(info) if info.filename.endswith(".apk"): name = os.path.basename(info.filename) key = apk_key_map[name] if key not in common.SPECIAL_CERT_STRINGS: print " signing: %-*s (%s)" % (maxsize, name, key) signed_data = SignApk(data, key, key_passwords[key]) output_tf_zip.writestr(out_info, signed_data) else: # an APK we're not supposed to sign. print "NOT signing: %s" % (name,) output_tf_zip.writestr(out_info, data) elif info.filename in ("SYSTEM/build.prop", "RECOVERY/RAMDISK/default.prop"): print "rewriting %s:" % (info.filename,) new_data = RewriteProps(data) output_tf_zip.writestr(out_info, new_data) else: # a non-APK file; copy it verbatim output_tf_zip.writestr(out_info, data) def EditTags(tags): """Given a string containing comma-separated tags, apply the edits specified in OPTIONS.tag_changes and return the updated string.""" tags = set(tags.split(",")) for ch in OPTIONS.tag_changes: if ch[0] == "-": tags.discard(ch[1:]) elif ch[0] == "+": tags.add(ch[1:]) return ",".join(sorted(tags)) def RewriteProps(data): output = [] for line in data.split("\n"): line = line.strip() original_line = line if line and line[0] != '#': key, value = line.split("=", 1) if key == "ro.build.fingerprint": pieces = value.split("/") pieces[-1] = EditTags(pieces[-1]) value = "/".join(pieces) elif key == "ro.build.description": pieces = value.split(" ") assert len(pieces) == 5 pieces[-1] = EditTags(pieces[-1]) value = " ".join(pieces) elif key == "ro.build.tags": value = EditTags(value) line = key + "=" + value if line != original_line: print " replace: ", original_line print " with: ", line output.append(line) return "\n".join(output) + "\n" def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): try: keylist = input_tf_zip.read("META/otakeys.txt").split() except KeyError: raise ExternalError("can't read META/otakeys.txt from input") extra_recovery_keys = misc_info.get("extra_recovery_keys", None) if extra_recovery_keys: extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" for k in extra_recovery_keys.split()] if extra_recovery_keys: print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) else: extra_recovery_keys = [] mapped_keys = [] for k in keylist: m = re.match(r"^(.*)\.x509\.pem$", k) if not m: raise ExternalError("can't parse \"%s\" from META/otakeys.txt" % (k,)) k = m.group(1) mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") if mapped_keys: print "using:\n ", "\n ".join(mapped_keys) print "for OTA package verification" else: devkey = misc_info.get("default_system_dev_certificate", "build/target/product/security/testkey") mapped_keys.append( OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") print "META/otakeys.txt has no keys; using", mapped_keys[0] # recovery uses a version of the key that has been slightly # predigested (by DumpPublicKey.java) and put in res/keys. # extra_recovery_keys are used only in recovery. p = common.Run(["java", "-jar", os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] + mapped_keys + extra_recovery_keys, stdout=subprocess.PIPE) data, _ = p.communicate() if p.returncode != 0: raise ExternalError("failed to run dumpkeys") common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", data) # SystemUpdateActivity uses the x509.pem version of the keys, but # put into a zipfile system/etc/security/otacerts.zip. # We DO NOT include the extra_recovery_keys (if any) here. tempfile = cStringIO.StringIO() certs_zip = zipfile.ZipFile(tempfile, "w") for k in mapped_keys: certs_zip.write(k) certs_zip.close() common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", tempfile.getvalue()) def BuildKeyMap(misc_info, key_mapping_options): for s, d in key_mapping_options: if s is None: # -d option devkey = misc_info.get("default_system_dev_certificate", "build/target/product/security/testkey") devkeydir = os.path.dirname(devkey) OPTIONS.key_map.update({ devkeydir + "/testkey": d + "/releasekey", devkeydir + "/devkey": d + "/releasekey", devkeydir + "/media": d + "/media", devkeydir + "/shared": d + "/shared", devkeydir + "/platform": d + "/platform", }) else: OPTIONS.key_map[s] = d def main(argv): key_mapping_options = [] def option_handler(o, a): if o in ("-e", "--extra_apks"): names, key = a.split("=") names = names.split(",") for n in names: OPTIONS.extra_apks[n] = key elif o in ("-d", "--default_key_mappings"): key_mapping_options.append((None, a)) elif o in ("-k", "--key_mapping"): key_mapping_options.append(a.split("=", 1)) elif o in ("-o", "--replace_ota_keys"): OPTIONS.replace_ota_keys = True elif o in ("-t", "--tag_changes"): new = [] for i in a.split(","): i = i.strip() if not i or i[0] not in "-+": raise ValueError("Bad tag change '%s'" % (i,)) new.append(i[0] + i[1:].strip()) OPTIONS.tag_changes = tuple(new) else: return False return True args = common.ParseOptions(argv, __doc__, extra_opts="e:d:k:ot:", extra_long_opts=["extra_apks=", "default_key_mappings=", "key_mapping=", "replace_ota_keys", "tag_changes="], extra_option_handler=option_handler) if len(args) != 2: common.Usage(__doc__) sys.exit(1) input_zip = zipfile.ZipFile(args[0], "r") output_zip = zipfile.ZipFile(args[1], "w") misc_info = common.LoadInfoDict(input_zip) BuildKeyMap(misc_info, key_mapping_options) apk_key_map = GetApkCerts(input_zip) CheckAllApksSigned(input_zip, apk_key_map) key_passwords = common.GetKeyPasswords(set(apk_key_map.values())) SignApks(input_zip, output_zip, apk_key_map, key_passwords) if OPTIONS.replace_ota_keys: ReplaceOtaKeys(input_zip, output_zip, misc_info) input_zip.close() output_zip.close() print "done." if __name__ == '__main__': try: main(sys.argv[1:]) except common.ExternalError, e: print print " ERROR: %s" % (e,) print sys.exit(1)
Python
#!/usr/bin/env python # # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Build image output_image_file from input_directory and properties_file. Usage: build_image input_directory properties_file output_image_file """ import os import os.path import subprocess import sys def RunCommand(cmd): """ Echo and run the given command Args: cmd: the command represented as a list of strings. Returns: The exit code. """ print "Running: ", " ".join(cmd) p = subprocess.Popen(cmd) p.communicate() return p.returncode def BuildImage(in_dir, prop_dict, out_file): """Build an image to out_file from in_dir with property prop_dict. Args: in_dir: path of input directory. prop_dict: property dictionary. out_file: path of the output image file. Returns: True iff the image is built successfully. """ build_command = [] fs_type = prop_dict.get("fs_type", "") run_fsck = False if fs_type.startswith("ext"): build_command = ["mkuserimg.sh"] if "extfs_sparse_flag" in prop_dict: build_command.append(prop_dict["extfs_sparse_flag"]) run_fsck = True build_command.extend([in_dir, out_file, fs_type, prop_dict["mount_point"]]) if "partition_size" in prop_dict: build_command.append(prop_dict["partition_size"]) if "selinux_fc" in prop_dict: build_command.append(prop_dict["selinux_fc"]) else: build_command = ["mkyaffs2image", "-f"] if prop_dict.get("mkyaffs2_extra_flags", None): build_command.extend(prop_dict["mkyaffs2_extra_flags"].split()) build_command.append(in_dir) build_command.append(out_file) if "selinux_fc" in prop_dict: build_command.append(prop_dict["selinux_fc"]) build_command.append(prop_dict["mount_point"]) exit_code = RunCommand(build_command) if exit_code != 0: return False if run_fsck: # Inflate the sparse image unsparse_image = os.path.join( os.path.dirname(out_file), "unsparse_" + os.path.basename(out_file)) inflate_command = ["simg2img", out_file, unsparse_image] exit_code = RunCommand(inflate_command) if exit_code != 0: os.remove(unsparse_image) return False # Run e2fsck on the inflated image file e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image] exit_code = RunCommand(e2fsck_command) os.remove(unsparse_image) return exit_code == 0 def ImagePropFromGlobalDict(glob_dict, mount_point): """Build an image property dictionary from the global dictionary. Args: glob_dict: the global dictionary from the build system. mount_point: such as "system", "data" etc. """ d = {} def copy_prop(src_p, dest_p): if src_p in glob_dict: d[dest_p] = str(glob_dict[src_p]) common_props = ( "extfs_sparse_flag", "mkyaffs2_extra_flags", "selinux_fc", ) for p in common_props: copy_prop(p, p) d["mount_point"] = mount_point if mount_point == "system": copy_prop("fs_type", "fs_type") copy_prop("system_size", "partition_size") elif mount_point == "data": copy_prop("fs_type", "fs_type") copy_prop("userdata_size", "partition_size") elif mount_point == "cache": copy_prop("cache_fs_type", "fs_type") copy_prop("cache_size", "partition_size") return d def LoadGlobalDict(filename): """Load "name=value" pairs from filename""" d = {} f = open(filename) for line in f: line = line.strip() if not line or line.startswith("#"): continue k, v = line.split("=", 1) d[k] = v f.close() return d def main(argv): if len(argv) != 3: print __doc__ sys.exit(1) in_dir = argv[0] glob_dict_file = argv[1] out_file = argv[2] glob_dict = LoadGlobalDict(glob_dict_file) image_filename = os.path.basename(out_file) mount_point = "" if image_filename == "system.img": mount_point = "system" elif image_filename == "userdata.img": mount_point = "data" elif image_filename == "cache.img": mount_point = "cache" else: print >> sys.stderr, "error: unknown image file name ", image_filename exit(1) image_properties = ImagePropFromGlobalDict(glob_dict, mount_point) if not BuildImage(in_dir, image_properties, out_file): print >> sys.stderr, "error: failed to build %s from %s" % (out_file, in_dir) exit(1) if __name__ == '__main__': main(sys.argv[1:])
Python
# Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import errno import getopt import getpass import imp import os import platform import re import shutil import subprocess import sys import tempfile import threading import time import zipfile try: from hashlib import sha1 as sha1 except ImportError: from sha import sha as sha1 # missing in Python 2.4 and before if not hasattr(os, "SEEK_SET"): os.SEEK_SET = 0 class Options(object): pass OPTIONS = Options() OPTIONS.search_path = "out/host/linux-x86" OPTIONS.verbose = False OPTIONS.tempfiles = [] OPTIONS.device_specific = None OPTIONS.extras = {} OPTIONS.info_dict = None # Values for "certificate" in apkcerts that mean special things. SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") class ExternalError(RuntimeError): pass def Run(args, **kwargs): """Create and return a subprocess.Popen object, printing the command line on the terminal if -v was specified.""" if OPTIONS.verbose: print " running: ", " ".join(args) return subprocess.Popen(args, **kwargs) def CloseInheritedPipes(): """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds before doing other work.""" if platform.system() != "Darwin": return for d in range(3, 1025): try: stat = os.fstat(d) if stat is not None: pipebit = stat[0] & 0x1000 if pipebit != 0: os.close(d) except OSError: pass def LoadInfoDict(zip): """Read and parse the META/misc_info.txt key/value pairs from the input target files and return a dict.""" d = {} try: for line in zip.read("META/misc_info.txt").split("\n"): line = line.strip() if not line or line.startswith("#"): continue k, v = line.split("=", 1) d[k] = v except KeyError: # ok if misc_info.txt doesn't exist pass # backwards compatibility: These values used to be in their own # files. Look for them, in case we're processing an old # target_files zip. if "mkyaffs2_extra_flags" not in d: try: d["mkyaffs2_extra_flags"] = zip.read("META/mkyaffs2-extra-flags.txt").strip() except KeyError: # ok if flags don't exist pass if "recovery_api_version" not in d: try: d["recovery_api_version"] = zip.read("META/recovery-api-version.txt").strip() except KeyError: raise ValueError("can't find recovery API version in input target-files") if "tool_extensions" not in d: try: d["tool_extensions"] = zip.read("META/tool-extensions.txt").strip() except KeyError: # ok if extensions don't exist pass try: data = zip.read("META/imagesizes.txt") for line in data.split("\n"): if not line: continue name, value = line.split(" ", 1) if not value: continue if name == "blocksize": d[name] = value else: d[name + "_size"] = value except KeyError: pass def makeint(key): if key in d: d[key] = int(d[key], 0) makeint("recovery_api_version") makeint("blocksize") makeint("system_size") makeint("userdata_size") makeint("cache_size") makeint("recovery_size") makeint("boot_size") d["fstab"] = LoadRecoveryFSTab(zip) d["build.prop"] = LoadBuildProp(zip) return d def LoadBuildProp(zip): try: data = zip.read("SYSTEM/build.prop") except KeyError: print "Warning: could not find SYSTEM/build.prop in %s" % zip data = "" d = {} for line in data.split("\n"): line = line.strip() if not line or line.startswith("#"): continue name, value = line.split("=", 1) d[name] = value return d def LoadRecoveryFSTab(zip): class Partition(object): pass try: data = zip.read("RECOVERY/RAMDISK/etc/recovery.fstab") except KeyError: print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab in %s." % zip data = "" d = {} for line in data.split("\n"): line = line.strip() if not line or line.startswith("#"): continue pieces = line.split() if not (3 <= len(pieces) <= 4): raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) p = Partition() p.mount_point = pieces[0] p.fs_type = pieces[1] p.device = pieces[2] p.length = 0 options = None if len(pieces) >= 4: if pieces[3].startswith("/"): p.device2 = pieces[3] if len(pieces) >= 5: options = pieces[4] else: p.device2 = None options = pieces[3] else: p.device2 = None if options: options = options.split(",") for i in options: if i.startswith("length="): p.length = int(i[7:]) else: print "%s: unknown option \"%s\"" % (p.mount_point, i) d[p.mount_point] = p return d def DumpInfoDict(d): for k, v in sorted(d.items()): print "%-25s = (%s) %s" % (k, type(v).__name__, v) def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): """Take a kernel, cmdline, and ramdisk directory from the input (in 'sourcedir'), and turn them into a boot image. Return the image data, or None if sourcedir does not appear to contains files for building the requested image.""" if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)): return None if info_dict is None: info_dict = OPTIONS.info_dict ramdisk_img = tempfile.NamedTemporaryFile() img = tempfile.NamedTemporaryFile() if os.access(fs_config_file, os.F_OK): cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")] else: cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")] p1 = Run(cmd, stdout=subprocess.PIPE) p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) p2.wait() p1.wait() assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,) assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,) cmd = ["mkbootimg", "--kernel", os.path.join(sourcedir, "kernel")] fn = os.path.join(sourcedir, "cmdline") if os.access(fn, os.F_OK): cmd.append("--cmdline") cmd.append(open(fn).read().rstrip("\n")) fn = os.path.join(sourcedir, "base") if os.access(fn, os.F_OK): cmd.append("--base") cmd.append(open(fn).read().rstrip("\n")) fn = os.path.join(sourcedir, "pagesize") if os.access(fn, os.F_OK): cmd.append("--pagesize") cmd.append(open(fn).read().rstrip("\n")) args = info_dict.get("mkbootimg_args", None) if args and args.strip(): cmd.extend(args.split()) cmd.extend(["--ramdisk", ramdisk_img.name, "--output", img.name]) p = Run(cmd, stdout=subprocess.PIPE) p.communicate() assert p.returncode == 0, "mkbootimg of %s image failed" % ( os.path.basename(sourcedir),) img.seek(os.SEEK_SET, 0) data = img.read() ramdisk_img.close() img.close() return data def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, info_dict=None): """Return a File object (with name 'name') with the desired bootable image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name', otherwise construct it from the source files in 'unpack_dir'/'tree_subdir'.""" prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) if os.path.exists(prebuilt_path): print "using prebuilt %s..." % (prebuilt_name,) return File.FromLocalFile(name, prebuilt_path) else: print "building image from target_files %s..." % (tree_subdir,) fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" return File(name, BuildBootableImage(os.path.join(unpack_dir, tree_subdir), os.path.join(unpack_dir, fs_config), info_dict)) def UnzipTemp(filename, pattern=None): """Unzip the given archive into a temporary directory and return the name. If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the main file), open for reading. """ tmp = tempfile.mkdtemp(prefix="targetfiles-") OPTIONS.tempfiles.append(tmp) def unzip_to_dir(filename, dirname): cmd = ["unzip", "-o", "-q", filename, "-d", dirname] if pattern is not None: cmd.append(pattern) p = Run(cmd, stdout=subprocess.PIPE) p.communicate() if p.returncode != 0: raise ExternalError("failed to unzip input target-files \"%s\"" % (filename,)) m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) if m: unzip_to_dir(m.group(1), tmp) unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES")) filename = m.group(1) else: unzip_to_dir(filename, tmp) return tmp, zipfile.ZipFile(filename, "r") def GetKeyPasswords(keylist): """Given a list of keys, prompt the user to enter passwords for those which require them. Return a {key: password} dict. password will be None if the key has no password.""" no_passwords = [] need_passwords = [] devnull = open("/dev/null", "w+b") for k in sorted(keylist): # We don't need a password for things that aren't really keys. if k in SPECIAL_CERT_STRINGS: no_passwords.append(k) continue p = Run(["openssl", "pkcs8", "-in", k+".pk8", "-inform", "DER", "-nocrypt"], stdin=devnull.fileno(), stdout=devnull.fileno(), stderr=subprocess.STDOUT) p.communicate() if p.returncode == 0: no_passwords.append(k) else: need_passwords.append(k) devnull.close() key_passwords = PasswordManager().GetPasswords(need_passwords) key_passwords.update(dict.fromkeys(no_passwords, None)) return key_passwords def SignFile(input_name, output_name, key, password, align=None, whole_file=False): """Sign the input_name zip/jar/apk, producing output_name. Use the given key and password (the latter may be None if the key does not have a password. If align is an integer > 1, zipalign is run to align stored files in the output zip on 'align'-byte boundaries. If whole_file is true, use the "-w" option to SignApk to embed a signature that covers the whole file in the archive comment of the zip file. """ if align == 0 or align == 1: align = None if align: temp = tempfile.NamedTemporaryFile() sign_name = temp.name else: sign_name = output_name cmd = ["java", "-Xmx2048m", "-jar", os.path.join(OPTIONS.search_path, "framework", "signapk.jar")] if whole_file: cmd.append("-w") cmd.extend([key + ".x509.pem", key + ".pk8", input_name, sign_name]) p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) if password is not None: password += "\n" p.communicate(password) if p.returncode != 0: raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,)) if align: p = Run(["zipalign", "-f", str(align), sign_name, output_name]) p.communicate() if p.returncode != 0: raise ExternalError("zipalign failed: return code %s" % (p.returncode,)) temp.close() def CheckSize(data, target, info_dict): """Check the data string passed against the max size limit, if any, for the given target. Raise exception if the data is too big. Print a warning if the data is nearing the maximum size.""" if target.endswith(".img"): target = target[:-4] mount_point = "/" + target if info_dict["fstab"]: if mount_point == "/userdata": mount_point = "/data" p = info_dict["fstab"][mount_point] fs_type = p.fs_type device = p.device if "/" in device: device = device[device.rfind("/")+1:] limit = info_dict.get(device + "_size", None) if not fs_type or not limit: return if fs_type == "yaffs2": # image size should be increased by 1/64th to account for the # spare area (64 bytes per 2k page) limit = limit / 2048 * (2048+64) size = len(data) pct = float(size) * 100.0 / limit msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit) if pct >= 99.0: raise ExternalError(msg) elif pct >= 95.0: print print " WARNING: ", msg print elif OPTIONS.verbose: print " ", msg def ReadApkCerts(tf_zip): """Given a target_files ZipFile, parse the META/apkcerts.txt file and return a {package: cert} dict.""" certmap = {} for line in tf_zip.read("META/apkcerts.txt").split("\n"): line = line.strip() if not line: continue m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+' r'private_key="(.*)"$', line) if m: name, cert, privkey = m.groups() if cert in SPECIAL_CERT_STRINGS and not privkey: certmap[name] = cert elif (cert.endswith(".x509.pem") and privkey.endswith(".pk8") and cert[:-9] == privkey[:-4]): certmap[name] = cert[:-9] else: raise ValueError("failed to parse line from apkcerts.txt:\n" + line) return certmap COMMON_DOCSTRING = """ -p (--path) <dir> Prepend <dir>/bin to the list of places to search for binaries run by this script, and expect to find jars in <dir>/framework. -s (--device_specific) <file> Path to the python module containing device-specific releasetools code. -x (--extra) <key=value> Add a key/value pair to the 'extras' dict, which device-specific extension code may look at. -v (--verbose) Show command lines being executed. -h (--help) Display this usage message and exit. """ def Usage(docstring): print docstring.rstrip("\n") print COMMON_DOCSTRING def ParseOptions(argv, docstring, extra_opts="", extra_long_opts=(), extra_option_handler=None): """Parse the options in argv and return any arguments that aren't flags. docstring is the calling module's docstring, to be displayed for errors and -h. extra_opts and extra_long_opts are for flags defined by the caller, which are processed by passing them to extra_option_handler.""" try: opts, args = getopt.getopt( argv, "hvp:s:x:" + extra_opts, ["help", "verbose", "path=", "device_specific=", "extra="] + list(extra_long_opts)) except getopt.GetoptError, err: Usage(docstring) print "**", str(err), "**" sys.exit(2) path_specified = False for o, a in opts: if o in ("-h", "--help"): Usage(docstring) sys.exit() elif o in ("-v", "--verbose"): OPTIONS.verbose = True elif o in ("-p", "--path"): OPTIONS.search_path = a elif o in ("-s", "--device_specific"): OPTIONS.device_specific = a elif o in ("-x", "--extra"): key, value = a.split("=", 1) OPTIONS.extras[key] = value else: if extra_option_handler is None or not extra_option_handler(o, a): assert False, "unknown option \"%s\"" % (o,) os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + os.pathsep + os.environ["PATH"]) return args def Cleanup(): for i in OPTIONS.tempfiles: if os.path.isdir(i): shutil.rmtree(i) else: os.remove(i) class PasswordManager(object): def __init__(self): self.editor = os.getenv("EDITOR", None) self.pwfile = os.getenv("ANDROID_PW_FILE", None) def GetPasswords(self, items): """Get passwords corresponding to each string in 'items', returning a dict. (The dict may have keys in addition to the values in 'items'.) Uses the passwords in $ANDROID_PW_FILE if available, letting the user edit that file to add more needed passwords. If no editor is available, or $ANDROID_PW_FILE isn't define, prompts the user interactively in the ordinary way. """ current = self.ReadFile() first = True while True: missing = [] for i in items: if i not in current or not current[i]: missing.append(i) # Are all the passwords already in the file? if not missing: return current for i in missing: current[i] = "" if not first: print "key file %s still missing some passwords." % (self.pwfile,) answer = raw_input("try to edit again? [y]> ").strip() if answer and answer[0] not in 'yY': raise RuntimeError("key passwords unavailable") first = False current = self.UpdateAndReadFile(current) def PromptResult(self, current): """Prompt the user to enter a value (password) for each key in 'current' whose value is fales. Returns a new dict with all the values. """ result = {} for k, v in sorted(current.iteritems()): if v: result[k] = v else: while True: result[k] = getpass.getpass("Enter password for %s key> " % (k,)).strip() if result[k]: break return result def UpdateAndReadFile(self, current): if not self.editor or not self.pwfile: return self.PromptResult(current) f = open(self.pwfile, "w") os.chmod(self.pwfile, 0600) f.write("# Enter key passwords between the [[[ ]]] brackets.\n") f.write("# (Additional spaces are harmless.)\n\n") first_line = None sorted = [(not v, k, v) for (k, v) in current.iteritems()] sorted.sort() for i, (_, k, v) in enumerate(sorted): f.write("[[[ %s ]]] %s\n" % (v, k)) if not v and first_line is None: # position cursor on first line with no password. first_line = i + 4 f.close() p = Run([self.editor, "+%d" % (first_line,), self.pwfile]) _, _ = p.communicate() return self.ReadFile() def ReadFile(self): result = {} if self.pwfile is None: return result try: f = open(self.pwfile, "r") for line in f: line = line.strip() if not line or line[0] == '#': continue m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) if not m: print "failed to parse password file: ", line else: result[m.group(2)] = m.group(1) f.close() except IOError, e: if e.errno != errno.ENOENT: print "error reading password file: ", str(e) return result def ZipWriteStr(zip, filename, data, perms=0644): # use a fixed timestamp so the output is repeatable. zinfo = zipfile.ZipInfo(filename=filename, date_time=(2009, 1, 1, 0, 0, 0)) zinfo.compress_type = zip.compression zinfo.external_attr = perms << 16 zip.writestr(zinfo, data) class DeviceSpecificParams(object): module = None def __init__(self, **kwargs): """Keyword arguments to the constructor become attributes of this object, which is passed to all functions in the device-specific module.""" for k, v in kwargs.iteritems(): setattr(self, k, v) self.extras = OPTIONS.extras if self.module is None: path = OPTIONS.device_specific if not path: return try: if os.path.isdir(path): info = imp.find_module("releasetools", [path]) else: d, f = os.path.split(path) b, x = os.path.splitext(f) if x == ".py": f = b info = imp.find_module(f, [d]) self.module = imp.load_module("device_specific", *info) except ImportError: print "unable to load device-specific module; assuming none" def _DoCall(self, function_name, *args, **kwargs): """Call the named function in the device-specific module, passing the given args and kwargs. The first argument to the call will be the DeviceSpecific object itself. If there is no module, or the module does not define the function, return the value of the 'default' kwarg (which itself defaults to None).""" if self.module is None or not hasattr(self.module, function_name): return kwargs.get("default", None) return getattr(self.module, function_name)(*((self,) + args), **kwargs) def FullOTA_Assertions(self): """Called after emitting the block of assertions at the top of a full OTA package. Implementations can add whatever additional assertions they like.""" return self._DoCall("FullOTA_Assertions") def FullOTA_InstallBegin(self): """Called at the start of full OTA installation.""" return self._DoCall("FullOTA_InstallBegin") def FullOTA_InstallEnd(self): """Called at the end of full OTA installation; typically this is used to install the image for the device's baseband processor.""" return self._DoCall("FullOTA_InstallEnd") def IncrementalOTA_Assertions(self): """Called after emitting the block of assertions at the top of an incremental OTA package. Implementations can add whatever additional assertions they like.""" return self._DoCall("IncrementalOTA_Assertions") def IncrementalOTA_VerifyBegin(self): """Called at the start of the verification phase of incremental OTA installation; additional checks can be placed here to abort the script before any changes are made.""" return self._DoCall("IncrementalOTA_VerifyBegin") def IncrementalOTA_VerifyEnd(self): """Called at the end of the verification phase of incremental OTA installation; additional checks can be placed here to abort the script before any changes are made.""" return self._DoCall("IncrementalOTA_VerifyEnd") def IncrementalOTA_InstallBegin(self): """Called at the start of incremental OTA installation (after verification is complete).""" return self._DoCall("IncrementalOTA_InstallBegin") def IncrementalOTA_InstallEnd(self): """Called at the end of incremental OTA installation; typically this is used to install the image for the device's baseband processor.""" return self._DoCall("IncrementalOTA_InstallEnd") class File(object): def __init__(self, name, data): self.name = name self.data = data self.size = len(data) self.sha1 = sha1(data).hexdigest() @classmethod def FromLocalFile(cls, name, diskname): f = open(diskname, "rb") data = f.read() f.close() return File(name, data) def WriteToTemp(self): t = tempfile.NamedTemporaryFile() t.write(self.data) t.flush() return t def AddToZip(self, z): ZipWriteStr(z, self.name, self.data) DIFF_PROGRAM_BY_EXT = { ".gz" : "imgdiff", ".zip" : ["imgdiff", "-z"], ".jar" : ["imgdiff", "-z"], ".apk" : ["imgdiff", "-z"], ".img" : "imgdiff", } class Difference(object): def __init__(self, tf, sf, diff_program=None): self.tf = tf self.sf = sf self.patch = None self.diff_program = diff_program def ComputePatch(self): """Compute the patch (as a string of data) needed to turn sf into tf. Returns the same tuple as GetPatch().""" tf = self.tf sf = self.sf if self.diff_program: diff_program = self.diff_program else: ext = os.path.splitext(tf.name)[1] diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff") ttemp = tf.WriteToTemp() stemp = sf.WriteToTemp() ext = os.path.splitext(tf.name)[1] try: ptemp = tempfile.NamedTemporaryFile() if isinstance(diff_program, list): cmd = copy.copy(diff_program) else: cmd = [diff_program] cmd.append(stemp.name) cmd.append(ttemp.name) cmd.append(ptemp.name) p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, err = p.communicate() if err or p.returncode != 0: print "WARNING: failure running %s:\n%s\n" % (diff_program, err) return None diff = ptemp.read() finally: ptemp.close() stemp.close() ttemp.close() self.patch = diff return self.tf, self.sf, self.patch def GetPatch(self): """Return a tuple (target_file, source_file, patch_data). patch_data may be None if ComputePatch hasn't been called, or if computing the patch failed.""" return self.tf, self.sf, self.patch def ComputeDifferences(diffs): """Call ComputePatch on all the Difference objects in 'diffs'.""" print len(diffs), "diffs to compute" # Do the largest files first, to try and reduce the long-pole effect. by_size = [(i.tf.size, i) for i in diffs] by_size.sort(reverse=True) by_size = [i[1] for i in by_size] lock = threading.Lock() diff_iter = iter(by_size) # accessed under lock def worker(): try: lock.acquire() for d in diff_iter: lock.release() start = time.time() d.ComputePatch() dur = time.time() - start lock.acquire() tf, sf, patch = d.GetPatch() if sf.name == tf.name: name = tf.name else: name = "%s (%s)" % (tf.name, sf.name) if patch is None: print "patching failed! %s" % (name,) else: print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % ( dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name) lock.release() except Exception, e: print e raise # start worker threads; wait for them all to finish. threads = [threading.Thread(target=worker) for i in range(OPTIONS.worker_threads)] for th in threads: th.start() while threads: threads.pop().join() # map recovery.fstab's fs_types to mount/format "partition types" PARTITION_TYPES = { "yaffs2": "MTD", "mtd": "MTD", "ext4": "EMMC", "emmc": "EMMC" } def GetTypeAndDevice(mount_point, info): fstab = info["fstab"] if fstab: return PARTITION_TYPES[fstab[mount_point].fs_type], fstab[mount_point].device else: return None
Python
#!/usr/bin/env python # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Given a target-files zipfile, produces an OTA package that installs that build. An incremental OTA is produced if -i is given, otherwise a full OTA is produced. Usage: ota_from_target_files [flags] input_target_files output_ota_package -b (--board_config) <file> Deprecated. -k (--package_key) <key> Key to use to sign the package (default is the value of default_system_dev_certificate from the input target-files's META/misc_info.txt, or "build/target/product/security/testkey" if that value is not specified). For incremental OTAs, the default value is based on the source target-file, not the target build. -i (--incremental_from) <file> Generate an incremental OTA using the given target-files zip as the starting build. -w (--wipe_user_data) Generate an OTA package that will wipe the user data partition when installed. -n (--no_prereq) Omit the timestamp prereq check normally included at the top of the build scripts (used for developer OTA packages which legitimately need to go back and forth). -e (--extra_script) <file> Insert the contents of file at the end of the update script. -a (--aslr_mode) <on|off> Specify whether to turn on ASLR for the package (on by default). """ import sys if sys.hexversion < 0x02040000: print >> sys.stderr, "Python 2.4 or newer is required." sys.exit(1) import copy import errno import os import re import subprocess import tempfile import time import zipfile try: from hashlib import sha1 as sha1 except ImportError: from sha import sha as sha1 import common import edify_generator OPTIONS = common.OPTIONS OPTIONS.package_key = None OPTIONS.incremental_source = None OPTIONS.require_verbatim = set() OPTIONS.prohibit_verbatim = set(("system/build.prop",)) OPTIONS.patch_threshold = 0.95 OPTIONS.wipe_user_data = False OPTIONS.omit_prereq = False OPTIONS.extra_script = None OPTIONS.aslr_mode = True OPTIONS.worker_threads = 3 def MostPopularKey(d, default): """Given a dict, return the key corresponding to the largest value. Returns 'default' if the dict is empty.""" x = [(v, k) for (k, v) in d.iteritems()] if not x: return default x.sort() return x[-1][1] def IsSymlink(info): """Return true if the zipfile.ZipInfo object passed in represents a symlink.""" return (info.external_attr >> 16) == 0120777 def IsRegular(info): """Return true if the zipfile.ZipInfo object passed in represents a symlink.""" return (info.external_attr >> 28) == 010 class Item: """Items represent the metadata (user, group, mode) of files and directories in the system image.""" ITEMS = {} def __init__(self, name, dir=False): self.name = name self.uid = None self.gid = None self.mode = None self.dir = dir if name: self.parent = Item.Get(os.path.dirname(name), dir=True) self.parent.children.append(self) else: self.parent = None if dir: self.children = [] def Dump(self, indent=0): if self.uid is not None: print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode) else: print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode) if self.dir: print "%s%s" % (" "*indent, self.descendants) print "%s%s" % (" "*indent, self.best_subtree) for i in self.children: i.Dump(indent=indent+1) @classmethod def Get(cls, name, dir=False): if name not in cls.ITEMS: cls.ITEMS[name] = Item(name, dir=dir) return cls.ITEMS[name] @classmethod def GetMetadata(cls, input_zip): try: # See if the target_files contains a record of what the uid, # gid, and mode is supposed to be. output = input_zip.read("META/filesystem_config.txt") except KeyError: # Run the external 'fs_config' program to determine the desired # uid, gid, and mode for every Item object. Note this uses the # one in the client now, which might not be the same as the one # used when this target_files was built. p = common.Run(["fs_config"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) suffix = { False: "", True: "/" } input = "".join(["%s%s\n" % (i.name, suffix[i.dir]) for i in cls.ITEMS.itervalues() if i.name]) output, error = p.communicate(input) assert not error for line in output.split("\n"): if not line: continue name, uid, gid, mode = line.split() i = cls.ITEMS.get(name, None) if i is not None: i.uid = int(uid) i.gid = int(gid) i.mode = int(mode, 8) if i.dir: i.children.sort(key=lambda i: i.name) # set metadata for the files generated by this script. i = cls.ITEMS.get("system/recovery-from-boot.p", None) if i: i.uid, i.gid, i.mode = 0, 0, 0644 i = cls.ITEMS.get("system/etc/install-recovery.sh", None) if i: i.uid, i.gid, i.mode = 0, 0, 0544 def CountChildMetadata(self): """Count up the (uid, gid, mode) tuples for all children and determine the best strategy for using set_perm_recursive and set_perm to correctly chown/chmod all the files to their desired values. Recursively calls itself for all descendants. Returns a dict of {(uid, gid, dmode, fmode): count} counting up all descendants of this node. (dmode or fmode may be None.) Also sets the best_subtree of each directory Item to the (uid, gid, dmode, fmode) tuple that will match the most descendants of that Item. """ assert self.dir d = self.descendants = {(self.uid, self.gid, self.mode, None): 1} for i in self.children: if i.dir: for k, v in i.CountChildMetadata().iteritems(): d[k] = d.get(k, 0) + v else: k = (i.uid, i.gid, None, i.mode) d[k] = d.get(k, 0) + 1 # Find the (uid, gid, dmode, fmode) tuple that matches the most # descendants. # First, find the (uid, gid) pair that matches the most # descendants. ug = {} for (uid, gid, _, _), count in d.iteritems(): ug[(uid, gid)] = ug.get((uid, gid), 0) + count ug = MostPopularKey(ug, (0, 0)) # Now find the dmode and fmode that match the most descendants # with that (uid, gid), and choose those. best_dmode = (0, 0755) best_fmode = (0, 0644) for k, count in d.iteritems(): if k[:2] != ug: continue if k[2] is not None and count >= best_dmode[0]: best_dmode = (count, k[2]) if k[3] is not None and count >= best_fmode[0]: best_fmode = (count, k[3]) self.best_subtree = ug + (best_dmode[1], best_fmode[1]) return d def SetPermissions(self, script): """Append set_perm/set_perm_recursive commands to 'script' to set all permissions, users, and groups for the tree of files rooted at 'self'.""" self.CountChildMetadata() def recurse(item, current): # current is the (uid, gid, dmode, fmode) tuple that the current # item (and all its children) have already been set to. We only # need to issue set_perm/set_perm_recursive commands if we're # supposed to be something different. if item.dir: if current != item.best_subtree: script.SetPermissionsRecursive("/"+item.name, *item.best_subtree) current = item.best_subtree if item.uid != current[0] or item.gid != current[1] or \ item.mode != current[2]: script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode) for i in item.children: recurse(i, current) else: if item.uid != current[0] or item.gid != current[1] or \ item.mode != current[3]: script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode) recurse(self, (-1, -1, -1, -1)) def CopySystemFiles(input_zip, output_zip=None, substitute=None): """Copies files underneath system/ in the input zip to the output zip. Populates the Item class with their metadata, and returns a list of symlinks. output_zip may be None, in which case the copy is skipped (but the other side effects still happen). substitute is an optional dict of {output filename: contents} to be output instead of certain input files. """ symlinks = [] for info in input_zip.infolist(): if info.filename.startswith("SYSTEM/"): basefilename = info.filename[7:] if IsSymlink(info): symlinks.append((input_zip.read(info.filename), "/system/" + basefilename)) else: info2 = copy.copy(info) fn = info2.filename = "system/" + basefilename if substitute and fn in substitute and substitute[fn] is None: continue if output_zip is not None: if substitute and fn in substitute: data = substitute[fn] else: data = input_zip.read(info.filename) output_zip.writestr(info2, data) if fn.endswith("/"): Item.Get(fn[:-1], dir=True) else: Item.Get(fn, dir=False) symlinks.sort() return symlinks def SignOutput(temp_zip_name, output_zip_name): key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) pw = key_passwords[OPTIONS.package_key] common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, whole_file=True) def AppendAssertions(script, info_dict): device = GetBuildProp("ro.product.device", info_dict) script.AssertDevice(device) def MakeRecoveryPatch(input_tmp, output_zip, recovery_img, boot_img): """Generate a binary patch that creates the recovery image starting with the boot image. (Most of the space in these images is just the kernel, which is identical for the two, so the resulting patch should be efficient.) Add it to the output zip, along with a shell script that is run from init.rc on first boot to actually do the patching and install the new recovery image. recovery_img and boot_img should be File objects for the corresponding images. info should be the dictionary returned by common.LoadInfoDict() on the input target_files. Returns an Item for the shell script, which must be made executable. """ diff_program = ["imgdiff"] path = os.path.join(input_tmp, "SYSTEM", "etc", "recovery-resource.dat") if os.path.exists(path): diff_program.append("-b") diff_program.append(path) bonus_args = "-b /system/etc/recovery-resource.dat" else: bonus_args = "" d = common.Difference(recovery_img, boot_img, diff_program=diff_program) _, _, patch = d.ComputePatch() common.ZipWriteStr(output_zip, "recovery/recovery-from-boot.p", patch) Item.Get("system/recovery-from-boot.p", dir=False) boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict) recovery_type, recovery_device = common.GetTypeAndDevice("/recovery", OPTIONS.info_dict) sh = """#!/system/bin/sh if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then log -t recovery "Installing new recovery image" applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p else log -t recovery "Recovery image already installed" fi """ % { 'boot_size': boot_img.size, 'boot_sha1': boot_img.sha1, 'recovery_size': recovery_img.size, 'recovery_sha1': recovery_img.sha1, 'boot_type': boot_type, 'boot_device': boot_device, 'recovery_type': recovery_type, 'recovery_device': recovery_device, 'bonus_args': bonus_args, } common.ZipWriteStr(output_zip, "recovery/etc/install-recovery.sh", sh) return Item.Get("system/etc/install-recovery.sh", dir=False) def WriteFullOTAPackage(input_zip, output_zip): # TODO: how to determine this? We don't know what version it will # be installed on top of. For now, we expect the API just won't # change very often. script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) metadata = {"post-build": GetBuildProp("ro.build.fingerprint", OPTIONS.info_dict), "pre-device": GetBuildProp("ro.product.device", OPTIONS.info_dict), "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), } device_specific = common.DeviceSpecificParams( input_zip=input_zip, input_version=OPTIONS.info_dict["recovery_api_version"], output_zip=output_zip, script=script, input_tmp=OPTIONS.input_tmp, metadata=metadata, info_dict=OPTIONS.info_dict) if not OPTIONS.omit_prereq: ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict) script.AssertOlderBuild(ts) AppendAssertions(script, OPTIONS.info_dict) device_specific.FullOTA_Assertions() device_specific.FullOTA_InstallBegin() script.ShowProgress(0.5, 0) if OPTIONS.wipe_user_data: script.FormatPartition("/data") if "selinux_fc" in OPTIONS.info_dict: WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) script.FormatPartition("/system") script.Mount("/system") script.UnpackPackageDir("recovery", "/system") script.UnpackPackageDir("system", "/system") symlinks = CopySystemFiles(input_zip, output_zip) script.MakeSymlinks(symlinks) boot_img = common.GetBootableImage("boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") recovery_img = common.GetBootableImage("recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY") MakeRecoveryPatch(OPTIONS.input_tmp, output_zip, recovery_img, boot_img) Item.GetMetadata(input_zip) Item.Get("system").SetPermissions(script) common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) common.ZipWriteStr(output_zip, "boot.img", boot_img.data) script.ShowProgress(0.2, 0) script.ShowProgress(0.2, 10) script.WriteRawImage("/boot", "boot.img") script.ShowProgress(0.1, 0) device_specific.FullOTA_InstallEnd() if OPTIONS.extra_script is not None: script.AppendExtra(OPTIONS.extra_script) script.UnmountAll() script.AddToZip(input_zip, output_zip) WriteMetadata(metadata, output_zip) def WritePolicyConfig(file_context, output_zip): f = open(file_context, 'r'); basename = os.path.basename(file_context) common.ZipWriteStr(output_zip, basename, f.read()) def WriteMetadata(metadata, output_zip): common.ZipWriteStr(output_zip, "META-INF/com/android/metadata", "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())])) def LoadSystemFiles(z): """Load all the files from SYSTEM/... in a given target-files ZipFile, and return a dict of {filename: File object}.""" out = {} for info in z.infolist(): if info.filename.startswith("SYSTEM/") and not IsSymlink(info): basefilename = info.filename[7:] fn = "system/" + basefilename data = z.read(info.filename) out[fn] = common.File(fn, data) return out def GetBuildProp(prop, info_dict): """Return the fingerprint of the build of a given target-files info_dict.""" try: return info_dict.get("build.prop", {})[prop] except KeyError: raise common.ExternalError("couldn't find %s in build.prop" % (property,)) def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): source_version = OPTIONS.source_info_dict["recovery_api_version"] target_version = OPTIONS.target_info_dict["recovery_api_version"] if source_version == 0: print ("WARNING: generating edify script for a source that " "can't install it.") script = edify_generator.EdifyGenerator(source_version, OPTIONS.target_info_dict) metadata = {"pre-device": GetBuildProp("ro.product.device", OPTIONS.source_info_dict), "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict), } device_specific = common.DeviceSpecificParams( source_zip=source_zip, source_version=source_version, target_zip=target_zip, target_version=target_version, output_zip=output_zip, script=script, metadata=metadata, info_dict=OPTIONS.info_dict) print "Loading target..." target_data = LoadSystemFiles(target_zip) print "Loading source..." source_data = LoadSystemFiles(source_zip) verbatim_targets = [] patch_list = [] diffs = [] largest_source_size = 0 for fn in sorted(target_data.keys()): tf = target_data[fn] assert fn == tf.name sf = source_data.get(fn, None) if sf is None or fn in OPTIONS.require_verbatim: # This file should be included verbatim if fn in OPTIONS.prohibit_verbatim: raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,)) print "send", fn, "verbatim" tf.AddToZip(output_zip) verbatim_targets.append((fn, tf.size)) elif tf.sha1 != sf.sha1: # File is different; consider sending as a patch diffs.append(common.Difference(tf, sf)) else: # Target file identical to source. pass common.ComputeDifferences(diffs) for diff in diffs: tf, sf, d = diff.GetPatch() if d is None or len(d) > tf.size * OPTIONS.patch_threshold: # patch is almost as big as the file; don't bother patching tf.AddToZip(output_zip) verbatim_targets.append((tf.name, tf.size)) else: common.ZipWriteStr(output_zip, "patch/" + tf.name + ".p", d) patch_list.append((tf.name, tf, sf, tf.size, common.sha1(d).hexdigest())) largest_source_size = max(largest_source_size, sf.size) source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) metadata["pre-build"] = source_fp metadata["post-build"] = target_fp script.Mount("/system") script.AssertSomeFingerprint(source_fp, target_fp) source_boot = common.GetBootableImage( "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", OPTIONS.source_info_dict) target_boot = common.GetBootableImage( "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") updating_boot = (source_boot.data != target_boot.data) source_recovery = common.GetBootableImage( "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", OPTIONS.source_info_dict) target_recovery = common.GetBootableImage( "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") updating_recovery = (source_recovery.data != target_recovery.data) # Here's how we divide up the progress bar: # 0.1 for verifying the start state (PatchCheck calls) # 0.8 for applying patches (ApplyPatch calls) # 0.1 for unpacking verbatim files, symlinking, and doing the # device-specific commands. AppendAssertions(script, OPTIONS.target_info_dict) device_specific.IncrementalOTA_Assertions() script.Print("Verifying current system...") device_specific.IncrementalOTA_VerifyBegin() script.ShowProgress(0.1, 0) total_verify_size = float(sum([i[2].size for i in patch_list]) + 1) if updating_boot: total_verify_size += source_boot.size so_far = 0 for fn, tf, sf, size, patch_sha in patch_list: script.PatchCheck("/"+fn, tf.sha1, sf.sha1) so_far += sf.size script.SetProgress(so_far / total_verify_size) if updating_boot: d = common.Difference(target_boot, source_boot) _, _, d = d.ComputePatch() print "boot target: %d source: %d diff: %d" % ( target_boot.size, source_boot.size, len(d)) common.ZipWriteStr(output_zip, "patch/boot.img.p", d) boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict) script.PatchCheck("%s:%s:%d:%s:%d:%s" % (boot_type, boot_device, source_boot.size, source_boot.sha1, target_boot.size, target_boot.sha1)) so_far += source_boot.size script.SetProgress(so_far / total_verify_size) if patch_list or updating_recovery or updating_boot: script.CacheFreeSpaceCheck(largest_source_size) device_specific.IncrementalOTA_VerifyEnd() script.Comment("---- start making changes here ----") device_specific.IncrementalOTA_InstallBegin() if OPTIONS.wipe_user_data: script.Print("Erasing user data...") script.FormatPartition("/data") script.Print("Removing unneeded files...") script.DeleteFiles(["/"+i[0] for i in verbatim_targets] + ["/"+i for i in sorted(source_data) if i not in target_data] + ["/system/recovery.img"]) script.ShowProgress(0.8, 0) total_patch_size = float(sum([i[1].size for i in patch_list]) + 1) if updating_boot: total_patch_size += target_boot.size so_far = 0 script.Print("Patching system files...") deferred_patch_list = [] for item in patch_list: fn, tf, sf, size, _ = item if tf.name == "system/build.prop": deferred_patch_list.append(item) continue script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1, sf.sha1, "patch/"+fn+".p") so_far += tf.size script.SetProgress(so_far / total_patch_size) if updating_boot: # Produce the boot image by applying a patch to the current # contents of the boot partition, and write it back to the # partition. script.Print("Patching boot image...") script.ApplyPatch("%s:%s:%d:%s:%d:%s" % (boot_type, boot_device, source_boot.size, source_boot.sha1, target_boot.size, target_boot.sha1), "-", target_boot.size, target_boot.sha1, source_boot.sha1, "patch/boot.img.p") so_far += target_boot.size script.SetProgress(so_far / total_patch_size) print "boot image changed; including." else: print "boot image unchanged; skipping." if updating_recovery: # Recovery is generated as a patch using both the boot image # (which contains the same linux kernel as recovery) and the file # /system/etc/recovery-resource.dat (which contains all the images # used in the recovery UI) as sources. This lets us minimize the # size of the patch, which must be included in every OTA package. # # For older builds where recovery-resource.dat is not present, we # use only the boot image as the source. MakeRecoveryPatch(OPTIONS.target_tmp, output_zip, target_recovery, target_boot) script.DeleteFiles(["/system/recovery-from-boot.p", "/system/etc/install-recovery.sh"]) print "recovery image changed; including as patch from boot." else: print "recovery image unchanged; skipping." script.ShowProgress(0.1, 10) target_symlinks = CopySystemFiles(target_zip, None) target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) temp_script = script.MakeTemporary() Item.GetMetadata(target_zip) Item.Get("system").SetPermissions(temp_script) # Note that this call will mess up the tree of Items, so make sure # we're done with it. source_symlinks = CopySystemFiles(source_zip, None) source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) # Delete all the symlinks in source that aren't in target. This # needs to happen before verbatim files are unpacked, in case a # symlink in the source is replaced by a real file in the target. to_delete = [] for dest, link in source_symlinks: if link not in target_symlinks_d: to_delete.append(link) script.DeleteFiles(to_delete) if verbatim_targets: script.Print("Unpacking new files...") script.UnpackPackageDir("system", "/system") if updating_recovery: script.Print("Unpacking new recovery...") script.UnpackPackageDir("recovery", "/system") script.Print("Symlinks and permissions...") # Create all the symlinks that don't already exist, or point to # somewhere different than what we want. Delete each symlink before # creating it, since the 'symlink' command won't overwrite. to_create = [] for dest, link in target_symlinks: if link in source_symlinks_d: if dest != source_symlinks_d[link]: to_create.append((dest, link)) else: to_create.append((dest, link)) script.DeleteFiles([i[1] for i in to_create]) script.MakeSymlinks(to_create) # Now that the symlinks are created, we can set all the # permissions. script.AppendScript(temp_script) # Do device-specific installation (eg, write radio image). device_specific.IncrementalOTA_InstallEnd() if OPTIONS.extra_script is not None: script.AppendExtra(OPTIONS.extra_script) # Patch the build.prop file last, so if something fails but the # device can still come up, it appears to be the old build and will # get set the OTA package again to retry. script.Print("Patching remaining system files...") for item in deferred_patch_list: fn, tf, sf, size, _ = item script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1, sf.sha1, "patch/"+fn+".p") script.SetPermissions("/system/build.prop", 0, 0, 0644) script.AddToZip(target_zip, output_zip) WriteMetadata(metadata, output_zip) def main(argv): def option_handler(o, a): if o in ("-b", "--board_config"): pass # deprecated elif o in ("-k", "--package_key"): OPTIONS.package_key = a elif o in ("-i", "--incremental_from"): OPTIONS.incremental_source = a elif o in ("-w", "--wipe_user_data"): OPTIONS.wipe_user_data = True elif o in ("-n", "--no_prereq"): OPTIONS.omit_prereq = True elif o in ("-e", "--extra_script"): OPTIONS.extra_script = a elif o in ("-a", "--aslr_mode"): if a in ("on", "On", "true", "True", "yes", "Yes"): OPTIONS.aslr_mode = True else: OPTIONS.aslr_mode = False elif o in ("--worker_threads"): OPTIONS.worker_threads = int(a) else: return False return True args = common.ParseOptions(argv, __doc__, extra_opts="b:k:i:d:wne:a:", extra_long_opts=["board_config=", "package_key=", "incremental_from=", "wipe_user_data", "no_prereq", "extra_script=", "worker_threads=", "aslr_mode=", ], extra_option_handler=option_handler) if len(args) != 2: common.Usage(__doc__) sys.exit(1) if OPTIONS.extra_script is not None: OPTIONS.extra_script = open(OPTIONS.extra_script).read() print "unzipping target target-files..." OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) OPTIONS.target_tmp = OPTIONS.input_tmp OPTIONS.info_dict = common.LoadInfoDict(input_zip) if OPTIONS.verbose: print "--- target info ---" common.DumpInfoDict(OPTIONS.info_dict) if OPTIONS.device_specific is None: OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None) if OPTIONS.device_specific is not None: OPTIONS.device_specific = os.path.normpath(OPTIONS.device_specific) print "using device-specific extensions in", OPTIONS.device_specific temp_zip_file = tempfile.NamedTemporaryFile() output_zip = zipfile.ZipFile(temp_zip_file, "w", compression=zipfile.ZIP_DEFLATED) if OPTIONS.incremental_source is None: WriteFullOTAPackage(input_zip, output_zip) if OPTIONS.package_key is None: OPTIONS.package_key = OPTIONS.info_dict.get( "default_system_dev_certificate", "build/target/product/security/testkey") else: print "unzipping source target-files..." OPTIONS.source_tmp, source_zip = common.UnzipTemp(OPTIONS.incremental_source) OPTIONS.target_info_dict = OPTIONS.info_dict OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) if OPTIONS.package_key is None: OPTIONS.package_key = OPTIONS.source_info_dict.get( "default_system_dev_certificate", "build/target/product/security/testkey") if OPTIONS.verbose: print "--- source info ---" common.DumpInfoDict(OPTIONS.source_info_dict) WriteIncrementalOTAPackage(input_zip, source_zip, output_zip) output_zip.close() SignOutput(temp_zip_file.name, args[1]) temp_zip_file.close() common.Cleanup() print "done." if __name__ == '__main__': try: common.CloseInheritedPipes() main(sys.argv[1:]) except common.ExternalError, e: print print " ERROR: %s" % (e,) print sys.exit(1)
Python
#!/usr/bin/env python # # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Check the signatures of all APKs in a target_files .zip file. With -c, compare the signatures of each package to the ones in a separate target_files (usually a previously distributed build for the same device) and flag any changes. Usage: check_target_file_signatures [flags] target_files -c (--compare_with) <other_target_files> Look for compatibility problems between the two sets of target files (eg., packages whose keys have changed). -l (--local_cert_dirs) <dir,dir,...> Comma-separated list of top-level directories to scan for .x509.pem files. Defaults to "vendor,build". Where cert files can be found that match APK signatures, the filename will be printed as the cert name, otherwise a hash of the cert plus its subject string will be printed instead. -t (--text) Dump the certificate information for both packages in comparison mode (this output is normally suppressed). """ import sys if sys.hexversion < 0x02040000: print >> sys.stderr, "Python 2.4 or newer is required." sys.exit(1) import os import re import shutil import subprocess import tempfile import zipfile try: from hashlib import sha1 as sha1 except ImportError: from sha import sha as sha1 import common # Work around a bug in python's zipfile module that prevents opening # of zipfiles if any entry has an extra field of between 1 and 3 bytes # (which is common with zipaligned APKs). This overrides the # ZipInfo._decodeExtra() method (which contains the bug) with an empty # version (since we don't need to decode the extra field anyway). class MyZipInfo(zipfile.ZipInfo): def _decodeExtra(self): pass zipfile.ZipInfo = MyZipInfo OPTIONS = common.OPTIONS OPTIONS.text = False OPTIONS.compare_with = None OPTIONS.local_cert_dirs = ("vendor", "build") PROBLEMS = [] PROBLEM_PREFIX = [] def AddProblem(msg): PROBLEMS.append(" ".join(PROBLEM_PREFIX) + " " + msg) def Push(msg): PROBLEM_PREFIX.append(msg) def Pop(): PROBLEM_PREFIX.pop() def Banner(msg): print "-" * 70 print " ", msg print "-" * 70 def GetCertSubject(cert): p = common.Run(["openssl", "x509", "-inform", "DER", "-text"], stdin=subprocess.PIPE, stdout=subprocess.PIPE) out, err = p.communicate(cert) if err and not err.strip(): return "(error reading cert subject)" for line in out.split("\n"): line = line.strip() if line.startswith("Subject:"): return line[8:].strip() return "(unknown cert subject)" class CertDB(object): def __init__(self): self.certs = {} def Add(self, cert, name=None): if cert in self.certs: if name: self.certs[cert] = self.certs[cert] + "," + name else: if name is None: name = "unknown cert %s (%s)" % (common.sha1(cert).hexdigest()[:12], GetCertSubject(cert)) self.certs[cert] = name def Get(self, cert): """Return the name for a given cert.""" return self.certs.get(cert, None) def FindLocalCerts(self): to_load = [] for top in OPTIONS.local_cert_dirs: for dirpath, dirnames, filenames in os.walk(top): certs = [os.path.join(dirpath, i) for i in filenames if i.endswith(".x509.pem")] if certs: to_load.extend(certs) for i in to_load: f = open(i) cert = ParseCertificate(f.read()) f.close() name, _ = os.path.splitext(i) name, _ = os.path.splitext(name) self.Add(cert, name) ALL_CERTS = CertDB() def ParseCertificate(data): """Parse a PEM-format certificate.""" cert = [] save = False for line in data.split("\n"): if "--END CERTIFICATE--" in line: break if save: cert.append(line) if "--BEGIN CERTIFICATE--" in line: save = True cert = "".join(cert).decode('base64') return cert def CertFromPKCS7(data, filename): """Read the cert out of a PKCS#7-format file (which is what is stored in a signed .apk).""" Push(filename + ":") try: p = common.Run(["openssl", "pkcs7", "-inform", "DER", "-outform", "PEM", "-print_certs"], stdin=subprocess.PIPE, stdout=subprocess.PIPE) out, err = p.communicate(data) if err and not err.strip(): AddProblem("error reading cert:\n" + err) return None cert = ParseCertificate(out) if not cert: AddProblem("error parsing cert output") return None return cert finally: Pop() class APK(object): def __init__(self, full_filename, filename): self.filename = filename Push(filename+":") try: self.RecordCerts(full_filename) self.ReadManifest(full_filename) finally: Pop() def RecordCerts(self, full_filename): out = set() try: f = open(full_filename) apk = zipfile.ZipFile(f, "r") pkcs7 = None for info in apk.infolist(): if info.filename.startswith("META-INF/") and \ (info.filename.endswith(".DSA") or info.filename.endswith(".RSA")): pkcs7 = apk.read(info.filename) cert = CertFromPKCS7(pkcs7, info.filename) out.add(cert) ALL_CERTS.Add(cert) if not pkcs7: AddProblem("no signature") finally: f.close() self.certs = frozenset(out) def ReadManifest(self, full_filename): p = common.Run(["aapt", "dump", "xmltree", full_filename, "AndroidManifest.xml"], stdout=subprocess.PIPE) manifest, err = p.communicate() if err: AddProblem("failed to read manifest") return self.shared_uid = None self.package = None for line in manifest.split("\n"): line = line.strip() m = re.search('A: (\S*?)(?:\(0x[0-9a-f]+\))?="(.*?)" \(Raw', line) if m: name = m.group(1) if name == "android:sharedUserId": if self.shared_uid is not None: AddProblem("multiple sharedUserId declarations") self.shared_uid = m.group(2) elif name == "package": if self.package is not None: AddProblem("multiple package declarations") self.package = m.group(2) if self.package is None: AddProblem("no package declaration") class TargetFiles(object): def __init__(self): self.max_pkg_len = 30 self.max_fn_len = 20 def LoadZipFile(self, filename): d, z = common.UnzipTemp(filename, '*.apk') try: self.apks = {} self.apks_by_basename = {} for dirpath, dirnames, filenames in os.walk(d): for fn in filenames: if fn.endswith(".apk"): fullname = os.path.join(dirpath, fn) displayname = fullname[len(d)+1:] apk = APK(fullname, displayname) self.apks[apk.package] = apk self.apks_by_basename[os.path.basename(apk.filename)] = apk self.max_pkg_len = max(self.max_pkg_len, len(apk.package)) self.max_fn_len = max(self.max_fn_len, len(apk.filename)) finally: shutil.rmtree(d) self.certmap = common.ReadApkCerts(z) z.close() def CheckSharedUids(self): """Look for any instances where packages signed with different certs request the same sharedUserId.""" apks_by_uid = {} for apk in self.apks.itervalues(): if apk.shared_uid: apks_by_uid.setdefault(apk.shared_uid, []).append(apk) for uid in sorted(apks_by_uid.keys()): apks = apks_by_uid[uid] for apk in apks[1:]: if apk.certs != apks[0].certs: break else: # all packages have the same set of certs; this uid is fine. continue AddProblem("different cert sets for packages with uid %s" % (uid,)) print "uid %s is shared by packages with different cert sets:" % (uid,) for apk in apks: print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename) for cert in apk.certs: print " ", ALL_CERTS.Get(cert) print def CheckExternalSignatures(self): for apk_filename, certname in self.certmap.iteritems(): if certname == "EXTERNAL": # Apps marked EXTERNAL should be signed with the test key # during development, then manually re-signed after # predexopting. Consider it an error if this app is now # signed with any key that is present in our tree. apk = self.apks_by_basename[apk_filename] name = ALL_CERTS.Get(apk.cert) if not name.startswith("unknown "): Push(apk.filename) AddProblem("hasn't been signed with EXTERNAL cert") Pop() def PrintCerts(self): """Display a table of packages grouped by cert.""" by_cert = {} for apk in self.apks.itervalues(): for cert in apk.certs: by_cert.setdefault(cert, []).append((apk.package, apk)) order = [(-len(v), k) for (k, v) in by_cert.iteritems()] order.sort() for _, cert in order: print "%s:" % (ALL_CERTS.Get(cert),) apks = by_cert[cert] apks.sort() for _, apk in apks: if apk.shared_uid: print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename, self.max_pkg_len, apk.package, apk.shared_uid) else: print " %-*s %-*s" % (self.max_fn_len, apk.filename, self.max_pkg_len, apk.package) print def CompareWith(self, other): """Look for instances where a given package that exists in both self and other have different certs.""" all = set(self.apks.keys()) all.update(other.apks.keys()) max_pkg_len = max(self.max_pkg_len, other.max_pkg_len) by_certpair = {} for i in all: if i in self.apks: if i in other.apks: # in both; should have same set of certs if self.apks[i].certs != other.apks[i].certs: by_certpair.setdefault((other.apks[i].certs, self.apks[i].certs), []).append(i) else: print "%s [%s]: new APK (not in comparison target_files)" % ( i, self.apks[i].filename) else: if i in other.apks: print "%s [%s]: removed APK (only in comparison target_files)" % ( i, other.apks[i].filename) if by_certpair: AddProblem("some APKs changed certs") Banner("APK signing differences") for (old, new), packages in sorted(by_certpair.items()): for i, o in enumerate(old): if i == 0: print "was", ALL_CERTS.Get(o) else: print " ", ALL_CERTS.Get(o) for i, n in enumerate(new): if i == 0: print "now", ALL_CERTS.Get(n) else: print " ", ALL_CERTS.Get(n) for i in sorted(packages): old_fn = other.apks[i].filename new_fn = self.apks[i].filename if old_fn == new_fn: print " %-*s [%s]" % (max_pkg_len, i, old_fn) else: print " %-*s [was: %s; now: %s]" % (max_pkg_len, i, old_fn, new_fn) print def main(argv): def option_handler(o, a): if o in ("-c", "--compare_with"): OPTIONS.compare_with = a elif o in ("-l", "--local_cert_dirs"): OPTIONS.local_cert_dirs = [i.strip() for i in a.split(",")] elif o in ("-t", "--text"): OPTIONS.text = True else: return False return True args = common.ParseOptions(argv, __doc__, extra_opts="c:l:t", extra_long_opts=["compare_with=", "local_cert_dirs="], extra_option_handler=option_handler) if len(args) != 1: common.Usage(__doc__) sys.exit(1) ALL_CERTS.FindLocalCerts() Push("input target_files:") try: target_files = TargetFiles() target_files.LoadZipFile(args[0]) finally: Pop() compare_files = None if OPTIONS.compare_with: Push("comparison target_files:") try: compare_files = TargetFiles() compare_files.LoadZipFile(OPTIONS.compare_with) finally: Pop() if OPTIONS.text or not compare_files: Banner("target files") target_files.PrintCerts() target_files.CheckSharedUids() target_files.CheckExternalSignatures() if compare_files: if OPTIONS.text: Banner("comparison files") compare_files.PrintCerts() target_files.CompareWith(compare_files) if PROBLEMS: print "%d problem(s) found:\n" % (len(PROBLEMS),) for p in PROBLEMS: print p return 1 return 0 if __name__ == '__main__': try: r = main(sys.argv[1:]) sys.exit(r) except common.ExternalError, e: print print " ERROR: %s" % (e,) print sys.exit(1)
Python
#!/usr/bin/env python # # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Usage: merge-event-log-tags.py [-o output_file] [input_files...] Merge together zero or more event-logs-tags files to produce a single output file, stripped of comments. Checks that no tag numbers conflict and fails if they do. -h to display this usage message and exit. """ import cStringIO import getopt try: import hashlib except ImportError: import md5 as hashlib import struct import sys import event_log_tags errors = [] warnings = [] output_file = None pre_merged_file = None # Tags with a tag number of ? are assigned a tag in the range # [ASSIGN_START, ASSIGN_LIMIT). ASSIGN_START = 900000 ASSIGN_LIMIT = 1000000 try: opts, args = getopt.getopt(sys.argv[1:], "ho:m:") except getopt.GetoptError, err: print str(err) print __doc__ sys.exit(2) for o, a in opts: if o == "-h": print __doc__ sys.exit(2) elif o == "-o": output_file = a elif o == "-m": pre_merged_file = a else: print >> sys.stderr, "unhandled option %s" % (o,) sys.exit(1) # Restrictions on tags: # # Tag names must be unique. (If the tag number and description are # also the same, a warning is issued instead of an error.) # # Explicit tag numbers must be unique. (If the tag name is also the # same, no error is issued because the above rule will issue a # warning or error.) by_tagname = {} by_tagnum = {} pre_merged_tags = {} if pre_merged_file: for t in event_log_tags.TagFile(pre_merged_file).tags: pre_merged_tags[t.tagname] = t for fn in args: tagfile = event_log_tags.TagFile(fn) for t in tagfile.tags: tagnum = t.tagnum tagname = t.tagname description = t.description if t.tagname in by_tagname: orig = by_tagname[t.tagname] # Allow an explicit tag number to define an implicit tag number if orig.tagnum is None: orig.tagnum = t.tagnum elif t.tagnum is None: t.tagnum = orig.tagnum if (t.tagnum == orig.tagnum and t.description == orig.description): # if the name and description are identical, issue a warning # instead of failing (to make it easier to move tags between # projects without breaking the build). tagfile.AddWarning("tag \"%s\" (%s) duplicated in %s:%d" % (t.tagname, t.tagnum, orig.filename, orig.linenum), linenum=t.linenum) else: tagfile.AddError( "tag name \"%s\" used by conflicting tag %s from %s:%d" % (t.tagname, orig.tagnum, orig.filename, orig.linenum), linenum=t.linenum) continue if t.tagnum is not None and t.tagnum in by_tagnum: orig = by_tagnum[t.tagnum] if t.tagname != orig.tagname: tagfile.AddError( "tag number %d used by conflicting tag \"%s\" from %s:%d" % (t.tagnum, orig.tagname, orig.filename, orig.linenum), linenum=t.linenum) continue by_tagname[t.tagname] = t if t.tagnum is not None: by_tagnum[t.tagnum] = t errors.extend(tagfile.errors) warnings.extend(tagfile.warnings) if errors: for fn, ln, msg in errors: print >> sys.stderr, "%s:%d: error: %s" % (fn, ln, msg) sys.exit(1) if warnings: for fn, ln, msg in warnings: print >> sys.stderr, "%s:%d: warning: %s" % (fn, ln, msg) # Python's hash function (a) isn't great and (b) varies between # versions of python. Using md5 is overkill here but is the same from # platform to platform and speed shouldn't matter in practice. def hashname(str): d = hashlib.md5(str).digest()[:4] return struct.unpack("!I", d)[0] # Assign a tag number to all the entries that say they want one # assigned. We do this based on a hash of the tag name so that the # numbers should stay relatively stable as tags are added. # If we were provided pre-merged tags (w/ the -m option), then don't # ever try to allocate one, just fail if we don't have a number for name, t in sorted(by_tagname.iteritems()): if t.tagnum is None: if pre_merged_tags: try: t.tagnum = pre_merged_tags[t.tagname] except KeyError: print >> sys.stderr, ("Error: Tag number not defined for tag `%s'." +" Have you done a full build?") % t.tagname sys.exit(1) else: while True: x = (hashname(name) % (ASSIGN_LIMIT - ASSIGN_START - 1)) + ASSIGN_START if x not in by_tagnum: t.tagnum = x by_tagnum[x] = t break name = "_" + name # by_tagnum should be complete now; we've assigned numbers to all tags. buffer = cStringIO.StringIO() for n, t in sorted(by_tagnum.iteritems()): if t.description: buffer.write("%d %s %s\n" % (t.tagnum, t.tagname, t.description)) else: buffer.write("%d %s\n" % (t.tagnum, t.tagname)) event_log_tags.WriteOutput(output_file, buffer)
Python
#!/usr/bin/env python # # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import operator, os, sys def get_file_size(path): st = os.lstat(path) return st.st_size; def main(argv): output = [] roots = argv[1:] for root in roots: base = len(root[:root.rfind(os.path.sep)]) for dir, dirs, files in os.walk(root): relative = dir[base:] for f in files: try: row = ( get_file_size(os.path.sep.join((dir, f))), os.path.sep.join((relative, f)), ) output.append(row) except os.error: pass output.sort(key=operator.itemgetter(0), reverse=True) for row in output: print "%12d %s" % row if __name__ == '__main__': main(sys.argv)
Python
#!/usr/bin/env python # # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Finds files with the specified name under a particular directory, stopping # the search in a given subdirectory when the file is found. # import os import sys def perform_find(mindepth, prune, dirlist, filename): result = [] pruneleaves = set(map(lambda x: os.path.split(x)[1], prune)) for rootdir in dirlist: rootdepth = rootdir.count("/") for root, dirs, files in os.walk(rootdir, followlinks=True): # prune check_prune = False for d in dirs: if d in pruneleaves: check_prune = True break if check_prune: i = 0 while i < len(dirs): if dirs[i] in prune: del dirs[i] else: i += 1 # mindepth if mindepth > 0: depth = 1 + root.count("/") - rootdepth if depth < mindepth: continue # match if filename in files: result.append(os.path.join(root, filename)) del dirs[:] return result def usage(): sys.stderr.write("""Usage: %(progName)s [<options>] <dirlist> <filename> Options: --mindepth=<mindepth> Both behave in the same way as their find(1) equivalents. --prune=<dirname> Avoids returning results from inside any directory called <dirname> (e.g., "*/out/*"). May be used multiple times. """ % { "progName": os.path.split(sys.argv[0])[1], }) sys.exit(1) def main(argv): mindepth = -1 prune = [] i=1 while i<len(argv) and len(argv[i])>2 and argv[i][0:2] == "--": arg = argv[i] if arg.startswith("--mindepth="): try: mindepth = int(arg[len("--mindepth="):]) except ValueError: usage() elif arg.startswith("--prune="): p = arg[len("--prune="):] if len(p) == 0: usage() prune.append(p) else: usage() i += 1 if len(argv)-i < 2: # need both <dirlist> and <filename> usage() dirlist = argv[i:-1] filename = argv[-1] results = list(set(perform_find(mindepth, prune, dirlist, filename))) results.sort() for r in results: print r if __name__ == "__main__": main(sys.argv)
Python
#!/usr/bin/env python # # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Prints to stdout the package names that have overlay changes between current_overlays.txt and previous_overlays.txt. Usage: diff_package_overlays.py <current_packages.txt> <current_overlays.txt> <previous_overlays.txt> current_packages.txt contains all package names separated by space in the current build. This script modfies current_packages.txt if necessary: if there is a package in previous_overlays.txt but absent from current_packages.txt, we copy that line from previous_overlays.txt over to current_packages.txt. Usually that means we just don't care that package in the current build (for example we are switching from a full build to a partial build with mm/mmm), and we should carry on the previous overlay config so current_overlays.txt always reflects the current status of the entire tree. Format of current_overlays.txt and previous_overlays.txt: <package_name> <resource_overlay> [resource_overlay ...] <package_name> <resource_overlay> [resource_overlay ...] ... """ import sys def main(argv): if len(argv) != 4: print >> sys.stderr, __doc__ sys.exit(1) f = open(argv[1]) all_packages = set(f.read().split()) f.close() def load_overlay_config(filename): f = open(filename) result = {} for line in f: line = line.strip() if not line or line.startswith("#"): continue words = line.split() result[words[0]] = " ".join(words[1:]) f.close() return result current_overlays = load_overlay_config(argv[2]) previous_overlays = load_overlay_config(argv[3]) result = [] carryon = [] for p in current_overlays: if p not in previous_overlays: result.append(p) elif current_overlays[p] != previous_overlays[p]: result.append(p) for p in previous_overlays: if p not in current_overlays: if p in all_packages: # overlay changed result.append(p) else: # we don't build p in the current build. carryon.append(p) # Add carryon to the current overlay config file. if carryon: f = open(argv[2], "a") for p in carryon: f.write(p + " " + previous_overlays[p] + "\n") f.close() # Print out the package names that have overlay change. for r in result: print r if __name__ == "__main__": main(sys.argv)
Python
#!/usr/bin/env python # # Copyright (C) 2009 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Usage: java-event-log-tags.py [-o output_file] <input_file> <merged_tags_file> Generate a java class containing constants for each of the event log tags in the given input file. -h to display this usage message and exit. """ import cStringIO import getopt import os import os.path import re import sys import event_log_tags output_file = None try: opts, args = getopt.getopt(sys.argv[1:], "ho:") except getopt.GetoptError, err: print str(err) print __doc__ sys.exit(2) for o, a in opts: if o == "-h": print __doc__ sys.exit(2) elif o == "-o": output_file = a else: print >> sys.stderr, "unhandled option %s" % (o,) sys.exit(1) if len(args) != 2: print "need exactly two input files, not %d" % (len(args),) print __doc__ sys.exit(1) fn = args[0] tagfile = event_log_tags.TagFile(fn) # Load the merged tag file (which should have numbers assigned for all # tags. Use the numbers from the merged file to fill in any missing # numbers from the input file. merged_fn = args[1] merged_tagfile = event_log_tags.TagFile(merged_fn) merged_by_name = dict([(t.tagname, t) for t in merged_tagfile.tags]) for t in tagfile.tags: if t.tagnum is None: if t.tagname in merged_by_name: t.tagnum = merged_by_name[t.tagname].tagnum else: # We're building something that's not being included in the # product, so its tags don't appear in the merged file. Assign # them all an arbitrary number so we can emit the java and # compile the (unused) package. t.tagnum = 999999 if "java_package" not in tagfile.options: tagfile.AddError("java_package option not specified", linenum=0) hide = True if "javadoc_hide" in tagfile.options: hide = event_log_tags.BooleanFromString(tagfile.options["javadoc_hide"][0]) if tagfile.errors: for fn, ln, msg in tagfile.errors: print >> sys.stderr, "%s:%d: error: %s" % (fn, ln, msg) sys.exit(1) buffer = cStringIO.StringIO() buffer.write("/* This file is auto-generated. DO NOT MODIFY.\n" " * Source file: %s\n" " */\n\n" % (fn,)) buffer.write("package %s;\n\n" % (tagfile.options["java_package"][0],)) basename, _ = os.path.splitext(os.path.basename(fn)) if hide: buffer.write("/**\n" " * @hide\n" " */\n") buffer.write("public class %s {\n" % (basename,)) buffer.write(" private %s() { } // don't instantiate\n" % (basename,)) for t in tagfile.tags: if t.description: buffer.write("\n /** %d %s %s */\n" % (t.tagnum, t.tagname, t.description)) else: buffer.write("\n /** %d %s */\n" % (t.tagnum, t.tagname)) buffer.write(" public static final int %s = %d;\n" % (t.tagname.upper(), t.tagnum)) keywords = frozenset(["abstract", "continue", "for", "new", "switch", "assert", "default", "goto", "package", "synchronized", "boolean", "do", "if", "private", "this", "break", "double", "implements", "protected", "throw", "byte", "else", "import", "public", "throws", "case", "enum", "instanceof", "return", "transient", "catch", "extends", "int", "short", "try", "char", "final", "interface", "static", "void", "class", "finally", "long", "strictfp", "volatile", "const", "float", "native", "super", "while"]) def javaName(name): out = name[0].lower() + re.sub(r"[^A-Za-z0-9]", "", name.title())[1:] if out in keywords: out += "_" return out javaTypes = ["ERROR", "int", "long", "String", "Object[]"] for t in tagfile.tags: methodName = javaName("write_" + t.tagname) if t.description: args = [arg.strip("() ").split("|") for arg in t.description.split(",")] else: args = [] argTypesNames = ", ".join([javaTypes[int(arg[1])] + " " + javaName(arg[0]) for arg in args]) argNames = "".join([", " + javaName(arg[0]) for arg in args]) buffer.write("\n public static void %s(%s) {" % (methodName, argTypesNames)) buffer.write("\n android.util.EventLog.writeEvent(%s%s);" % (t.tagname.upper(), argNames)) buffer.write("\n }\n") buffer.write("}\n"); output_dir = os.path.dirname(output_file) if not os.path.exists(output_dir): os.makedirs(output_dir) event_log_tags.WriteOutput(output_file, buffer)
Python
#!/usr/bin/python -E import sys, os, re excludes = [r'.*?/\.obj.*?', r'.*?~', r'.*?\/.DS_Store', r'.*?\/.gdb_history', r'.*?\/buildspec.mk', r'.*?/\..*?\.swp', r'.*?/out/.*?', r'.*?/install/.*?'] excludes_compiled = map(re.compile, excludes) def filter_excludes(str): for e in excludes_compiled: if e.match(str): return False return True def split_perforce_parts(s): spaces = ((s.count(" ") + 1) / 3) * 2 pos = 0 while spaces > 0: pos = s.find(" ", pos) + 1 spaces = spaces - 1 return s[pos:] def quotate(s): return '"' + s + '"' class PerforceError(Exception): def __init__(self,value): self.value = value def __str__(self): return repr(self.value) def run(command, regex, filt): def matchit(s): m = regex_compiled.match(s) if m: return m.group(1) else: return "" def filterit(s): if filt_compiled.match(s): return True else: return False fd = os.popen(command); lines = fd.readlines() status = fd.close() if status: raise PerforceError("error calling " + command) regex_compiled = re.compile(regex) filt_compiled = re.compile(filt) if len(lines) >= 1: lines = filter(filterit, lines) if len(lines) >= 1: return map(matchit, lines) return None try: if len(sys.argv) == 1: do_exclude = True elif len(sys.argv) == 2 and sys.argv[1] == "-a": do_exclude = False else: print "usage: checktree [-a]" print " -a don't filter common crud in the tree" sys.exit(1) have = run("p4 have ...", r'[^#]+#[0-9]+ - (.*)', r'.*') cwd = os.getcwd() files = run("find . -not -type d", r'.(.*)', r'.*') files = map(lambda s: cwd+s, files) added_depot_path = run("p4 opened ...", r'([^#]+)#.*', r'.*?#[0-9]+ - add .*'); added = [] if added_depot_path: added_depot_path = map(quotate, added_depot_path) where = "p4 where " + " ".join(added_depot_path) added = run(where, r'(.*)', r'.*') added = map(split_perforce_parts, added) extras = [] # Python 2.3 -- still default on Mac OS X -- does not have set() # Make dict's here to support the "in" operations below have = dict().fromkeys(have, 1) added = dict().fromkeys(added, 1) for file in files: if not file in have: if not file in added: extras.append(file) if do_exclude: extras = filter(filter_excludes, extras) for s in extras: print s.replace(" ", "\\ ") except PerforceError, e: sys.exit(2)
Python
#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
Python
#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
Python
from django.conf.urls.defaults import * # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns('', (r'^accounts/', 'amfgateway.gateway'), # Uncomment the next line to enable admin documentation: # (r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line for to enable the admin: # (r'^admin/(.*)', admin.site.root), )
Python
# Django settings for 3rdear project. import os PROJECT_PATH = os.path.abspath(os.path.dirname(__file__)) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('zengsun', 'zengsun.gm@gmail.com'), ) MANAGERS = ADMINS DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. DATABASE_NAME = 'data.db' # Or path to database file if using sqlite3. DATABASE_USER = '' # Not used with sqlite3. DATABASE_PASSWORD = '' # Not used with sqlite3. DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3. DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3. # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Asia/Shanghai' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'zh-CN' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(PROJECT_PATH, '../media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/site_media/' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = '6zgw=67#s7+21fr-idxscvqwk&p3zikv*d16beb68*09!(6qje' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.doc.XViewMiddleware', ) ROOT_URLCONF = '3ear.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_PATH, '../templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', # user applications 'userprofile', 'music', ) SESSION_EXPIRE_AT_BROWSER_CLOSE = True AUTH_PROFILE_MODULE = 'userprofile.Profile' # Define template context TEMPLATE_CONTEXT_PROCESSORS = ( "django.core.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.request", )
Python
#!/usr/bin/env python # -*- coding: UTF-8 -*- from django import newforms as forms from django.utils.translation import gettext_lazy as _ from models import Profile class ProfileForm(forms.ModelForm): class Meta: model = Profile exclude = ('user', 'report_to',) class UserForm(forms.Form): login_name = forms.CharField(max_length=30, min_length=4) email = forms.EmailField() password = forms.CharField(max_length=12, min_length=6, widget=forms.PasswordInput) repassword = forms.CharField(max_length=12, min_length=6, widget=forms.PasswordInput) def clean_login_name(self): """ Verify that the username isn't already registered """ username = self.cleaned_data.get("login_name") if not set(username).issubset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_."): raise forms.ValidationError(_("That login name has invalid characters. The valid values are letters, numbers and underscore.")) if User.objects.filter(username__iexact=username).count() == 0: return username else: raise forms.ValidationError(_("The login name is already registered.")) def clean(self): """ Verify that the 2 passwords fields are equal """ if self.cleaned_data.get("password") == self.cleaned_data.get("repassword"): return self.cleaned_data else: raise forms.ValidationError(_("The passwords inserted are different.")) def clean_email(self): """ Verify that the email exists """ email = self.cleaned_data.get("email") try: User.objects.get(email=email) except: return email raise forms.ValidationError(_("That e-mail is already used."))
Python
#!/usr/bin/env python # -*- coding: UTF-8 -*- from django.db import models from django.contrib.auth.models import User from django.utils.translation import gettext_lazy as _ # Create your models here. GENDER_CHOICES = ( ('M', _('Male')), ('F', _('Female')), ) class Profile(models.Model): """Employee """ # User.attribute: username, first_name, last_name, email, password, # is_staff, is_active, is_superuser, last_login, date_joined user = models.OneToOneField(User, primary_key=True) report_to = models.ForeignKey('self', null=True, blank=True) # profile gender = models.CharField(max_length=1, choices=GENDER_CHOICES, radio_admin=True) identification = models.CharField(max_length=18, unique=True) birthday = models.DateField() tel = models.CharField(max_length=50) portrait = models.ImageField(upload_to='photo', blank=True, null=True) class Meta: pass class Admin: list_display = ('user', 'gender', 'identification', 'birthday', 'tel', 'portrait') def __unicode__(self): return self.user.username def get_portrait(self): if self.portrait: return self.get_portrait_url() else: return 'images/default.png'
Python
#!/usr/bin/env python # -*- coding: UTF-8 -*- from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext #from django.contrib import auth from django.contrib.auth.models import User from models import Profile from forms import ProfileForm, UserForm # Create your views here. def register(request): """ new user register. """ if request.method == 'POST': user_form = UserForm(request.POST) profile_form = ProfileForm(request.POST) if user_form.is_valid() and profile_form.is_valid(): username = user_form.cleaned_data.get('login_name') password = user_form.cleaned_data['password'] email = user_form.cleaned_data['email'] new_user = User.objects.create_user(username=username, email=email, password=password) new_user.is_active = False if new_user.save(): # save new profile for user. new_profile = profile_form.save(commit=False) new_profile.user = new_user new_profile.save() # if success return HttpResponseRedirect('/') else: user_form = UserForm() profile_form = ProfileForm() return render_to_response('registration/register.html', {'user_form': user_form, 'profile_form': profile_form}, context_instance=RequestContext(request))
Python
#!/usr/bin/env python # -*- coding: UTF-8 -*- from django import newforms as forms from django.utils.translation import gettext_lazy as _ from models import Profile class ProfileForm(forms.ModelForm): class Meta: model = Profile exclude = ('user', 'report_to',) class UserForm(forms.Form): login_name = forms.CharField(max_length=30, min_length=4) email = forms.EmailField() password = forms.CharField(max_length=12, min_length=6, widget=forms.PasswordInput) repassword = forms.CharField(max_length=12, min_length=6, widget=forms.PasswordInput) def clean_login_name(self): """ Verify that the username isn't already registered """ username = self.cleaned_data.get("login_name") if not set(username).issubset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_."): raise forms.ValidationError(_("That login name has invalid characters. The valid values are letters, numbers and underscore.")) if User.objects.filter(username__iexact=username).count() == 0: return username else: raise forms.ValidationError(_("The login name is already registered.")) def clean(self): """ Verify that the 2 passwords fields are equal """ if self.cleaned_data.get("password") == self.cleaned_data.get("repassword"): return self.cleaned_data else: raise forms.ValidationError(_("The passwords inserted are different.")) def clean_email(self): """ Verify that the email exists """ email = self.cleaned_data.get("email") try: User.objects.get(email=email) except: return email raise forms.ValidationError(_("That e-mail is already used."))
Python
#!/usr/bin/env python # -*- coding: UTF-8 -*- from django.db import models from django.contrib.auth.models import User from django.utils.translation import gettext_lazy as _ # Create your models here. GENDER_CHOICES = ( ('M', _('Male')), ('F', _('Female')), ) class Profile(models.Model): """Employee """ # User.attribute: username, first_name, last_name, email, password, # is_staff, is_active, is_superuser, last_login, date_joined user = models.OneToOneField(User, primary_key=True) report_to = models.ForeignKey('self', null=True, blank=True) # profile gender = models.CharField(max_length=1, choices=GENDER_CHOICES, radio_admin=True) identification = models.CharField(max_length=18, unique=True) birthday = models.DateField() tel = models.CharField(max_length=50) portrait = models.ImageField(upload_to='photo', blank=True, null=True) class Meta: pass class Admin: list_display = ('user', 'gender', 'identification', 'birthday', 'tel', 'portrait') def __unicode__(self): return self.user.username def get_portrait(self): if self.portrait: return self.get_portrait_url() else: return 'images/default.png'
Python
#!/usr/bin/env python # -*- coding: UTF-8 -*- from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext #from django.contrib import auth from django.contrib.auth.models import User from models import Profile from forms import ProfileForm, UserForm # Create your views here. def register(request): """ new user register. """ if request.method == 'POST': user_form = UserForm(request.POST) profile_form = ProfileForm(request.POST) if user_form.is_valid() and profile_form.is_valid(): username = user_form.cleaned_data.get('login_name') password = user_form.cleaned_data['password'] email = user_form.cleaned_data['email'] new_user = User.objects.create_user(username=username, email=email, password=password) new_user.is_active = False if new_user.save(): # save new profile for user. new_profile = profile_form.save(commit=False) new_profile.user = new_user new_profile.save() # if success return HttpResponseRedirect('/') else: user_form = UserForm() profile_form = ProfileForm() return render_to_response('registration/register.html', {'user_form': user_form, 'profile_form': profile_form}, context_instance=RequestContext(request))
Python
from django.contrib.auth.models import User from pyamf.remoting.gateway.django import DjangoGateway def user_list(request): return User.objects.all() gateway = DjangoGateway({ 'userlist': user_list, })
Python