repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
silvermagic/jhbuild
jhbuild/buildbot/scheduler.py
8
6235
# jhbuild - a tool to ease building collections of source packages # Copyright (C) 2008 Igalia S.L., John Carr, Frederic Peters # # scheduler.py: jhbuild jobs scheduler # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import time from twisted.application import service, internet from twisted.python import log from twisted.internet import reactor from buildbot.scheduler import Periodic, BaseUpstreamScheduler, Scheduler, Nightly from buildbot.sourcestamp import SourceStamp from buildbot import buildset, util def SerialScheduler(name, project, builderNames, periodicBuildTimer=60*60*12, upstream=None, branch=None): if not upstream: return StartSerial(name, project, builderNames, periodicBuildTimer, branch) return Serial(name, project, upstream, builderNames, branch) def NightlySerialScheduler(name, project, builderNames, minute=0, hour='*', dayOfMonth='*', month='*', dayOfWeek='*', upstream=None, branch=None): if not upstream: return NightlyStartSerial(name, project, builderNames, minute, hour, dayOfMonth, month, dayOfWeek, branch) return Serial(name, project, upstream, builderNames, branch) class OnCommitScheduler(Scheduler): ''' Scheduler that will build a module when a change notification (on svn-commits-list) is received. ''' def __init__(self, name, project, builderNames, properties={}): Scheduler.__init__(self, name, branch=None, treeStableTimer=180, builderNames=builderNames, properties=properties) self.project = project self.importantChanges = [] self.unimportantChanges = [] self.nextBuildTime = None self.timer = None def changeIsImportant(self, change): if not change.files: # strange, better be on the safe side return True non_po_files = [x for x in change.files if not '/po/' in x] if non_po_files: return True # changes are limited to translations, it is unlikely it would break # the build, mark them as unimportant. return False def addChange(self, change): if change.project != self.project: return if change.branch != self.branch: return log.msg('adding a change') if self.changeIsImportant(change): self.addImportantChange(change) else: self.addUnimportantChange(change) class StartSerial(Periodic): def __init__(self, name, project, builderNames, periodicBuildTimer, branch=None): Periodic.__init__(self,name,builderNames,periodicBuildTimer,branch) self.project = project self.finishedWatchers = [] def subscribeToFinishedBuilds(self, watcher): self.finishedWatchers.append(watcher) def unsubscribeToFinishedBuilds(self, watcher): self.finishedWatchers.remove(watcher) def buildSetFinished(self, bss): if not self.running: return ss = bss.getSourceStamp() for w in self.finishedWatchers: w(ss) Periodic.buildSetFinished(self,bss) class Serial(BaseUpstreamScheduler): """This scheduler runs some set of builds that should be run after the 'upstream' scheduler has completed (successfully or not).""" compare_attrs = ('name', 'upstream', 'builders', 'branch') def __init__(self, name, project, upstream, builderNames, branch): BaseUpstreamScheduler.__init__(self, name) self.project = project self.upstream = upstream self.branch = branch self.builderNames = builderNames self.finishedWatchers = [] def subscribeToFinishedBuilds(self, watcher): self.finishedWatchers.append(watcher) def unsubscribeToFinishedBuilds(self, watcher): self.finishedWatchers.remove(watcher) def buildSetFinished(self, bss): if not self.running: return ss = bss.getSourceStamp() for w in self.finishedWatchers: w(ss) BaseUpstreamScheduler.buildSetFinished(self,bss) def listBuilderNames(self): return self.builderNames def getPendingBuildTimes(self): # report the upstream's value return self.upstream.getPendingBuildTimes() def startService(self): service.MultiService.startService(self) self.upstream.subscribeToFinishedBuilds(self.upstreamBuilt) def stopService(self): d = service.MultiService.stopService(self) self.upstream.unsubscribeToFinishedBuilds(self.upstreamBuilt) return d def upstreamBuilt(self, ss): bs = buildset.BuildSet(self.builderNames, SourceStamp(branch=self.branch)) self.submitBuildSet(bs) class NightlyStartSerial(Nightly): def __init__(self, name, project, builderNames, minute=0, hour='*', dayOfMonth='*', month='*', dayOfWeek='*', branch=None): Nightly.__init__(self, name, builderNames, minute, hour, dayOfMonth, month, dayOfWeek, branch) self.project = project self.finishedWatchers = [] def subscribeToFinishedBuilds(self, watcher): self.finishedWatchers.append(watcher) def unsubscribeToFinishedBuilds(self, watcher): self.finishedWatchers.remove(watcher) def buildSetFinished(self, bss): if not self.running: return ss = bss.getSourceStamp() for w in self.finishedWatchers: w(ss) Nightly.buildSetFinished(self,bss)
gpl-2.0
yasoob/PythonRSSReader
venv/lib/python2.7/site.py
784
27543
"""Append module search paths for third-party packages to sys.path. **************************************************************** * This module is automatically imported during initialization. * **************************************************************** In earlier versions of Python (up to 1.5a3), scripts or modules that needed to use site-specific modules would place ``import site'' somewhere near the top of their code. Because of the automatic import, this is no longer necessary (but code that does it still works). This will append site-specific paths to the module search path. On Unix, it starts with sys.prefix and sys.exec_prefix (if different) and appends lib/python<version>/site-packages as well as lib/site-python. It also supports the Debian convention of lib/python<version>/dist-packages. On other platforms (mainly Mac and Windows), it uses just sys.prefix (and sys.exec_prefix, if different, but this is unlikely). The resulting directories, if they exist, are appended to sys.path, and also inspected for path configuration files. FOR DEBIAN, this sys.path is augmented with directories in /usr/local. Local addons go into /usr/local/lib/python<version>/site-packages (resp. /usr/local/lib/site-python), Debian addons install into /usr/{lib,share}/python<version>/dist-packages. A path configuration file is a file whose name has the form <package>.pth; its contents are additional directories (one per line) to be added to sys.path. Non-existing directories (or non-directories) are never added to sys.path; no directory is added to sys.path more than once. Blank lines and lines beginning with '#' are skipped. Lines starting with 'import' are executed. For example, suppose sys.prefix and sys.exec_prefix are set to /usr/local and there is a directory /usr/local/lib/python2.X/site-packages with three subdirectories, foo, bar and spam, and two path configuration files, foo.pth and bar.pth. Assume foo.pth contains the following: # foo package configuration foo bar bletch and bar.pth contains: # bar package configuration bar Then the following directories are added to sys.path, in this order: /usr/local/lib/python2.X/site-packages/bar /usr/local/lib/python2.X/site-packages/foo Note that bletch is omitted because it doesn't exist; bar precedes foo because bar.pth comes alphabetically before foo.pth; and spam is omitted because it is not mentioned in either path configuration file. After these path manipulations, an attempt is made to import a module named sitecustomize, which can perform arbitrary additional site-specific customizations. If this import fails with an ImportError exception, it is silently ignored. """ import sys import os try: import __builtin__ as builtins except ImportError: import builtins try: set except NameError: from sets import Set as set # Prefixes for site-packages; add additional prefixes like /usr/local here PREFIXES = [sys.prefix, sys.exec_prefix] # Enable per user site-packages directory # set it to False to disable the feature or True to force the feature ENABLE_USER_SITE = None # for distutils.commands.install USER_SITE = None USER_BASE = None _is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32 _is_pypy = hasattr(sys, 'pypy_version_info') _is_jython = sys.platform[:4] == 'java' if _is_jython: ModuleType = type(os) def makepath(*paths): dir = os.path.join(*paths) if _is_jython and (dir == '__classpath__' or dir.startswith('__pyclasspath__')): return dir, dir dir = os.path.abspath(dir) return dir, os.path.normcase(dir) def abs__file__(): """Set all module' __file__ attribute to an absolute path""" for m in sys.modules.values(): if ((_is_jython and not isinstance(m, ModuleType)) or hasattr(m, '__loader__')): # only modules need the abspath in Jython. and don't mess # with a PEP 302-supplied __file__ continue f = getattr(m, '__file__', None) if f is None: continue m.__file__ = os.path.abspath(f) def removeduppaths(): """ Remove duplicate entries from sys.path along with making them absolute""" # This ensures that the initial path provided by the interpreter contains # only absolute pathnames, even if we're running from the build directory. L = [] known_paths = set() for dir in sys.path: # Filter out duplicate paths (on case-insensitive file systems also # if they only differ in case); turn relative paths into absolute # paths. dir, dircase = makepath(dir) if not dircase in known_paths: L.append(dir) known_paths.add(dircase) sys.path[:] = L return known_paths # XXX This should not be part of site.py, since it is needed even when # using the -S option for Python. See http://www.python.org/sf/586680 def addbuilddir(): """Append ./build/lib.<platform> in case we're running in the build dir (especially for Guido :-)""" from distutils.util import get_platform s = "build/lib.%s-%.3s" % (get_platform(), sys.version) if hasattr(sys, 'gettotalrefcount'): s += '-pydebug' s = os.path.join(os.path.dirname(sys.path[-1]), s) sys.path.append(s) def _init_pathinfo(): """Return a set containing all existing directory entries from sys.path""" d = set() for dir in sys.path: try: if os.path.isdir(dir): dir, dircase = makepath(dir) d.add(dircase) except TypeError: continue return d def addpackage(sitedir, name, known_paths): """Add a new path to known_paths by combining sitedir and 'name' or execute sitedir if it starts with 'import'""" if known_paths is None: _init_pathinfo() reset = 1 else: reset = 0 fullname = os.path.join(sitedir, name) try: f = open(fullname, "rU") except IOError: return try: for line in f: if line.startswith("#"): continue if line.startswith("import"): exec(line) continue line = line.rstrip() dir, dircase = makepath(sitedir, line) if not dircase in known_paths and os.path.exists(dir): sys.path.append(dir) known_paths.add(dircase) finally: f.close() if reset: known_paths = None return known_paths def addsitedir(sitedir, known_paths=None): """Add 'sitedir' argument to sys.path if missing and handle .pth files in 'sitedir'""" if known_paths is None: known_paths = _init_pathinfo() reset = 1 else: reset = 0 sitedir, sitedircase = makepath(sitedir) if not sitedircase in known_paths: sys.path.append(sitedir) # Add path component try: names = os.listdir(sitedir) except os.error: return names.sort() for name in names: if name.endswith(os.extsep + "pth"): addpackage(sitedir, name, known_paths) if reset: known_paths = None return known_paths def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix): """Add site-packages (and possibly site-python) to sys.path""" prefixes = [os.path.join(sys_prefix, "local"), sys_prefix] if exec_prefix != sys_prefix: prefixes.append(os.path.join(exec_prefix, "local")) for prefix in prefixes: if prefix: if sys.platform in ('os2emx', 'riscos') or _is_jython: sitedirs = [os.path.join(prefix, "Lib", "site-packages")] elif _is_pypy: sitedirs = [os.path.join(prefix, 'site-packages')] elif sys.platform == 'darwin' and prefix == sys_prefix: if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"), os.path.join(prefix, "Extras", "lib", "python")] else: # any other Python distros on OSX work this way sitedirs = [os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages")] elif os.sep == '/': sitedirs = [os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages"), os.path.join(prefix, "lib", "site-python"), os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")] lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages") if (os.path.exists(lib64_dir) and os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]): if _is_64bit: sitedirs.insert(0, lib64_dir) else: sitedirs.append(lib64_dir) try: # sys.getobjects only available in --with-pydebug build sys.getobjects sitedirs.insert(0, os.path.join(sitedirs[0], 'debug')) except AttributeError: pass # Debian-specific dist-packages directories: sitedirs.append(os.path.join(prefix, "local/lib", "python" + sys.version[:3], "dist-packages")) if sys.version[0] == '2': sitedirs.append(os.path.join(prefix, "lib", "python" + sys.version[:3], "dist-packages")) else: sitedirs.append(os.path.join(prefix, "lib", "python" + sys.version[0], "dist-packages")) sitedirs.append(os.path.join(prefix, "lib", "dist-python")) else: sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")] if sys.platform == 'darwin': # for framework builds *only* we add the standard Apple # locations. Currently only per-user, but /Library and # /Network/Library could be added too if 'Python.framework' in prefix: home = os.environ.get('HOME') if home: sitedirs.append( os.path.join(home, 'Library', 'Python', sys.version[:3], 'site-packages')) for sitedir in sitedirs: if os.path.isdir(sitedir): addsitedir(sitedir, known_paths) return None def check_enableusersite(): """Check if user site directory is safe for inclusion The function tests for the command line flag (including environment var), process uid/gid equal to effective uid/gid. None: Disabled for security reasons False: Disabled by user (command line option) True: Safe and enabled """ if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False): return False if hasattr(os, "getuid") and hasattr(os, "geteuid"): # check process uid == effective uid if os.geteuid() != os.getuid(): return None if hasattr(os, "getgid") and hasattr(os, "getegid"): # check process gid == effective gid if os.getegid() != os.getgid(): return None return True def addusersitepackages(known_paths): """Add a per user site-package to sys.path Each user has its own python directory with site-packages in the home directory. USER_BASE is the root directory for all Python versions USER_SITE is the user specific site-packages directory USER_SITE/.. can be used for data. """ global USER_BASE, USER_SITE, ENABLE_USER_SITE env_base = os.environ.get("PYTHONUSERBASE", None) def joinuser(*args): return os.path.expanduser(os.path.join(*args)) #if sys.platform in ('os2emx', 'riscos'): # # Don't know what to put here # USER_BASE = '' # USER_SITE = '' if os.name == "nt": base = os.environ.get("APPDATA") or "~" if env_base: USER_BASE = env_base else: USER_BASE = joinuser(base, "Python") USER_SITE = os.path.join(USER_BASE, "Python" + sys.version[0] + sys.version[2], "site-packages") else: if env_base: USER_BASE = env_base else: USER_BASE = joinuser("~", ".local") USER_SITE = os.path.join(USER_BASE, "lib", "python" + sys.version[:3], "site-packages") if ENABLE_USER_SITE and os.path.isdir(USER_SITE): addsitedir(USER_SITE, known_paths) if ENABLE_USER_SITE: for dist_libdir in ("lib", "local/lib"): user_site = os.path.join(USER_BASE, dist_libdir, "python" + sys.version[:3], "dist-packages") if os.path.isdir(user_site): addsitedir(user_site, known_paths) return known_paths def setBEGINLIBPATH(): """The OS/2 EMX port has optional extension modules that do double duty as DLLs (and must use the .DLL file extension) for other extensions. The library search path needs to be amended so these will be found during module import. Use BEGINLIBPATH so that these are at the start of the library search path. """ dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload") libpath = os.environ['BEGINLIBPATH'].split(';') if libpath[-1]: libpath.append(dllpath) else: libpath[-1] = dllpath os.environ['BEGINLIBPATH'] = ';'.join(libpath) def setquit(): """Define new built-ins 'quit' and 'exit'. These are simply strings that display a hint on how to exit. """ if os.sep == ':': eof = 'Cmd-Q' elif os.sep == '\\': eof = 'Ctrl-Z plus Return' else: eof = 'Ctrl-D (i.e. EOF)' class Quitter(object): def __init__(self, name): self.name = name def __repr__(self): return 'Use %s() or %s to exit' % (self.name, eof) def __call__(self, code=None): # Shells like IDLE catch the SystemExit, but listen when their # stdin wrapper is closed. try: sys.stdin.close() except: pass raise SystemExit(code) builtins.quit = Quitter('quit') builtins.exit = Quitter('exit') class _Printer(object): """interactive prompt objects for printing the license text, a list of contributors and the copyright notice.""" MAXLINES = 23 def __init__(self, name, data, files=(), dirs=()): self.__name = name self.__data = data self.__files = files self.__dirs = dirs self.__lines = None def __setup(self): if self.__lines: return data = None for dir in self.__dirs: for filename in self.__files: filename = os.path.join(dir, filename) try: fp = open(filename, "rU") data = fp.read() fp.close() break except IOError: pass if data: break if not data: data = self.__data self.__lines = data.split('\n') self.__linecnt = len(self.__lines) def __repr__(self): self.__setup() if len(self.__lines) <= self.MAXLINES: return "\n".join(self.__lines) else: return "Type %s() to see the full %s text" % ((self.__name,)*2) def __call__(self): self.__setup() prompt = 'Hit Return for more, or q (and Return) to quit: ' lineno = 0 while 1: try: for i in range(lineno, lineno + self.MAXLINES): print(self.__lines[i]) except IndexError: break else: lineno += self.MAXLINES key = None while key is None: try: key = raw_input(prompt) except NameError: key = input(prompt) if key not in ('', 'q'): key = None if key == 'q': break def setcopyright(): """Set 'copyright' and 'credits' in __builtin__""" builtins.copyright = _Printer("copyright", sys.copyright) if _is_jython: builtins.credits = _Printer( "credits", "Jython is maintained by the Jython developers (www.jython.org).") elif _is_pypy: builtins.credits = _Printer( "credits", "PyPy is maintained by the PyPy developers: http://pypy.org/") else: builtins.credits = _Printer("credits", """\ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands for supporting Python development. See www.python.org for more information.""") here = os.path.dirname(os.__file__) builtins.license = _Printer( "license", "See http://www.python.org/%.3s/license.html" % sys.version, ["LICENSE.txt", "LICENSE"], [os.path.join(here, os.pardir), here, os.curdir]) class _Helper(object): """Define the built-in 'help'. This is a wrapper around pydoc.help (with a twist). """ def __repr__(self): return "Type help() for interactive help, " \ "or help(object) for help about object." def __call__(self, *args, **kwds): import pydoc return pydoc.help(*args, **kwds) def sethelper(): builtins.help = _Helper() def aliasmbcs(): """On Windows, some default encodings are not provided by Python, while they are always available as "mbcs" in each locale. Make them usable by aliasing to "mbcs" in such a case.""" if sys.platform == 'win32': import locale, codecs enc = locale.getdefaultlocale()[1] if enc.startswith('cp'): # "cp***" ? try: codecs.lookup(enc) except LookupError: import encodings encodings._cache[enc] = encodings._unknown encodings.aliases.aliases[enc] = 'mbcs' def setencoding(): """Set the string encoding used by the Unicode implementation. The default is 'ascii', but if you're willing to experiment, you can change this.""" encoding = "ascii" # Default value set by _PyUnicode_Init() if 0: # Enable to support locale aware default string encodings. import locale loc = locale.getdefaultlocale() if loc[1]: encoding = loc[1] if 0: # Enable to switch off string to Unicode coercion and implicit # Unicode to string conversion. encoding = "undefined" if encoding != "ascii": # On Non-Unicode builds this will raise an AttributeError... sys.setdefaultencoding(encoding) # Needs Python Unicode build ! def execsitecustomize(): """Run custom site specific code, if available.""" try: import sitecustomize except ImportError: pass def virtual_install_main_packages(): f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt')) sys.real_prefix = f.read().strip() f.close() pos = 2 hardcoded_relative_dirs = [] if sys.path[0] == '': pos += 1 if _is_jython: paths = [os.path.join(sys.real_prefix, 'Lib')] elif _is_pypy: if sys.version_info > (3, 2): cpyver = '%d' % sys.version_info[0] elif sys.pypy_version_info >= (1, 5): cpyver = '%d.%d' % sys.version_info[:2] else: cpyver = '%d.%d.%d' % sys.version_info[:3] paths = [os.path.join(sys.real_prefix, 'lib_pypy'), os.path.join(sys.real_prefix, 'lib-python', cpyver)] if sys.pypy_version_info < (1, 9): paths.insert(1, os.path.join(sys.real_prefix, 'lib-python', 'modified-%s' % cpyver)) hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below # # This is hardcoded in the Python executable, but relative to sys.prefix: for path in paths[:]: plat_path = os.path.join(path, 'plat-%s' % sys.platform) if os.path.exists(plat_path): paths.append(plat_path) elif sys.platform == 'win32': paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')] else: paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])] hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3]) if os.path.exists(lib64_path): if _is_64bit: paths.insert(0, lib64_path) else: paths.append(lib64_path) # This is hardcoded in the Python executable, but relative to # sys.prefix. Debian change: we need to add the multiarch triplet # here, which is where the real stuff lives. As per PEP 421, in # Python 3.3+, this lives in sys.implementation, while in Python 2.7 # it lives in sys. try: arch = getattr(sys, 'implementation', sys)._multiarch except AttributeError: # This is a non-multiarch aware Python. Fallback to the old way. arch = sys.platform plat_path = os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3], 'plat-%s' % arch) if os.path.exists(plat_path): paths.append(plat_path) # This is hardcoded in the Python executable, but # relative to sys.prefix, so we have to fix up: for path in list(paths): tk_dir = os.path.join(path, 'lib-tk') if os.path.exists(tk_dir): paths.append(tk_dir) # These are hardcoded in the Apple's Python executable, # but relative to sys.prefix, so we have to fix them up: if sys.platform == 'darwin': hardcoded_paths = [os.path.join(relative_dir, module) for relative_dir in hardcoded_relative_dirs for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')] for path in hardcoded_paths: if os.path.exists(path): paths.append(path) sys.path.extend(paths) def force_global_eggs_after_local_site_packages(): """ Force easy_installed eggs in the global environment to get placed in sys.path after all packages inside the virtualenv. This maintains the "least surprise" result that packages in the virtualenv always mask global packages, never the other way around. """ egginsert = getattr(sys, '__egginsert', 0) for i, path in enumerate(sys.path): if i > egginsert and path.startswith(sys.prefix): egginsert = i sys.__egginsert = egginsert + 1 def virtual_addsitepackages(known_paths): force_global_eggs_after_local_site_packages() return addsitepackages(known_paths, sys_prefix=sys.real_prefix) def fixclasspath(): """Adjust the special classpath sys.path entries for Jython. These entries should follow the base virtualenv lib directories. """ paths = [] classpaths = [] for path in sys.path: if path == '__classpath__' or path.startswith('__pyclasspath__'): classpaths.append(path) else: paths.append(path) sys.path = paths sys.path.extend(classpaths) def execusercustomize(): """Run custom user specific code, if available.""" try: import usercustomize except ImportError: pass def main(): global ENABLE_USER_SITE virtual_install_main_packages() abs__file__() paths_in_sys = removeduppaths() if (os.name == "posix" and sys.path and os.path.basename(sys.path[-1]) == "Modules"): addbuilddir() if _is_jython: fixclasspath() GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt')) if not GLOBAL_SITE_PACKAGES: ENABLE_USER_SITE = False if ENABLE_USER_SITE is None: ENABLE_USER_SITE = check_enableusersite() paths_in_sys = addsitepackages(paths_in_sys) paths_in_sys = addusersitepackages(paths_in_sys) if GLOBAL_SITE_PACKAGES: paths_in_sys = virtual_addsitepackages(paths_in_sys) if sys.platform == 'os2emx': setBEGINLIBPATH() setquit() setcopyright() sethelper() aliasmbcs() setencoding() execsitecustomize() if ENABLE_USER_SITE: execusercustomize() # Remove sys.setdefaultencoding() so that users cannot change the # encoding after initialization. The test for presence is needed when # this module is run as a script, because this code is executed twice. if hasattr(sys, "setdefaultencoding"): del sys.setdefaultencoding main() def _script(): help = """\ %s [--user-base] [--user-site] Without arguments print some useful information With arguments print the value of USER_BASE and/or USER_SITE separated by '%s'. Exit codes with --user-base or --user-site: 0 - user site directory is enabled 1 - user site directory is disabled by user 2 - uses site directory is disabled by super user or for security reasons >2 - unknown error """ args = sys.argv[1:] if not args: print("sys.path = [") for dir in sys.path: print(" %r," % (dir,)) print("]") def exists(path): if os.path.isdir(path): return "exists" else: return "doesn't exist" print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE))) print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE))) print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE) sys.exit(0) buffer = [] if '--user-base' in args: buffer.append(USER_BASE) if '--user-site' in args: buffer.append(USER_SITE) if buffer: print(os.pathsep.join(buffer)) if ENABLE_USER_SITE: sys.exit(0) elif ENABLE_USER_SITE is False: sys.exit(1) elif ENABLE_USER_SITE is None: sys.exit(2) else: sys.exit(3) else: import textwrap print(textwrap.dedent(help % (sys.argv[0], os.pathsep))) sys.exit(10) if __name__ == '__main__': _script()
mit
dandv/selenium
py/selenium/webdriver/firefox/extension_connection.py
66
2846
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import time from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.common import utils from selenium.webdriver.remote.command import Command from selenium.webdriver.remote.remote_connection import RemoteConnection from selenium.webdriver.firefox.firefox_binary import FirefoxBinary LOGGER = logging.getLogger(__name__) PORT = 0 # HOST = None _URL = "" class ExtensionConnection(RemoteConnection): def __init__(self, host, firefox_profile, firefox_binary=None, timeout=30): self.profile = firefox_profile self.binary = firefox_binary HOST = host if self.binary is None: self.binary = FirefoxBinary() if HOST is None: HOST = "127.0.0.1" PORT = utils.free_port() self.profile.port = PORT self.profile.update_preferences() self.profile.add_extension() self.binary.launch_browser(self.profile) _URL = "http://%s:%d/hub" % (HOST, PORT) RemoteConnection.__init__( self, _URL, keep_alive=True) def quit(self, sessionId=None): self.execute(Command.QUIT, {'sessionId':sessionId}) while self.is_connectable(): LOGGER.info("waiting to quit") time.sleep(1) def connect(self): """Connects to the extension and retrieves the session id.""" return self.execute(Command.NEW_SESSION, {'desiredCapabilities': DesiredCapabilities.FIREFOX}) @classmethod def connect_and_quit(self): """Connects to an running browser and quit immediately.""" self._request('%s/extensions/firefox/quit' % _URL) @classmethod def is_connectable(self): """Trys to connect to the extension but do not retrieve context.""" utils.is_connectable(self.profile.port) class ExtensionConnectionError(Exception): """An internal error occurred int the extension. Might be caused by bad input or bugs in webdriver """ pass
apache-2.0
jgraham/treeherder
tests/model/derived/sample_data_generator.py
5
3570
""" Functions for flexible generation of sample input job JSON. """ import json import os import time from datetime import timedelta def ref_data_json(): """Return reference data json structure""" filename = os.path.join( os.path.abspath(os.path.dirname(__file__)), "ref_data.json", ) with open(filename) as f: return f.read() def job_json(**kwargs): """ return a tuple containing a job guid and its json data """ data = job_data(**kwargs) return json.dumps(data), data['job']['job_guid'] def job_data(**kwargs): jobs_obj = { "revision_hash": kwargs.get("revision_hash", "24fd64b8251fac5cf60b54a915bffa7e51f636b5"), "job": { u"build_platform": build_platform(**kwargs.pop("build_platform", {})), u"submit_timestamp": kwargs.pop("submit_timestamp", submit_timestamp()), u"start_timestamp": kwargs.pop("start_timestamp", start_timestamp()), u"name": kwargs.pop("name", u"mochitest-5"), u"option_collection": option_collection( **kwargs.pop("option_collection", {})), u"log_references": log_references(kwargs.pop("log_references", [])), u"who": kwargs.pop("who", u"sendchange-unittest"), u"reason": kwargs.pop("reason", u"scheduler"), u"artifact": kwargs.pop("artifact", {}), u"machine_platform": machine_platform( **kwargs.pop("machine_platform", {})), u"machine": kwargs.pop("machine", u"talos-r3-xp-088"), u"state": kwargs.pop("state", u"completed"), u"result": kwargs.pop("result", 0), u"job_guid": kwargs.pop( u"job_guid", u"f3e3a9e6526881c39a3b2b6ff98510f213b3d4ed"), u"product_name": kwargs.pop("product_name", u"firefox"), u"end_timestamp": kwargs.pop("end_timestamp", end_timestamp()), } } # defaults.update(kwargs) return jobs_obj def to_seconds(td): return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6 ) / 10 ** 6 def get_timestamp_days_ago(days_ago): now = int(time.time()) return now - to_seconds(timedelta(int(days_ago))) def submit_timestamp(): """3 days ago""" return get_timestamp_days_ago(3) def start_timestamp(): """2 days ago""" return get_timestamp_days_ago(2) def end_timestamp(): """1 day ago""" return get_timestamp_days_ago(1) def option_collection(**kwargs): """ Return a sample data structure, with default values. """ defaults = { u"debug": True } defaults.update(kwargs) return defaults def log_references(log_refs=None): if not log_refs: log_refs = [ { u"url": u"http://ftp.mozilla.org/pub/...", u"name": u"unittest" } ] return log_refs def build_platform(**kwargs): """ Return a sample data structure, with default values. """ defaults = { u"platform": u"WINNT5.1", u"os_name": u"win", u"architecture": u"x86", u"vm": False } defaults.update(kwargs) return defaults def machine_platform(**kwargs): """ Return a sample data structure, with default values. """ defaults = { u"platform": u"WINNT5.1", u"os_name": u"win", u"architecture": u"x86", u"vm": False } defaults.update(kwargs) return defaults
mpl-2.0
nttks/jenkins-test
cms/djangoapps/contentstore/features/video_handout.py
116
1458
# -*- coding: utf-8 -*- # disable missing docstring # pylint: disable=missing-docstring from lettuce import world, step from nose.tools import assert_true # pylint: disable=no-name-in-module from video_editor import RequestHandlerWithSessionId, success_upload_file @step('I (?:upload|replace) handout file(?: by)? "([^"]*)"$') def upload_handout(step, filename): world.css_click('.wrapper-comp-setting.file-uploader .upload-action') success_upload_file(filename) @step('I can download handout file( in editor)? with mime type "([^"]*)"$') def i_can_download_handout_with_mime_type(_step, is_editor, mime_type): if is_editor: selector = '.wrapper-comp-setting.file-uploader .download-action' else: selector = '.video-handout.video-download-button a' button = world.css_find(selector).first url = button['href'] request = RequestHandlerWithSessionId() assert_true(request.get(url).is_success()) assert_true(request.check_header('content-type', mime_type)) @step('I clear handout$') def clear_handout(_step): world.css_click('.wrapper-comp-setting.file-uploader .setting-clear') @step('I have created a Video component with handout file "([^"]*)"') def create_video_with_handout(_step, filename): _step.given('I have created a Video component') _step.given('I edit the component') _step.given('I open tab "Advanced"') _step.given('I upload handout file "{0}"'.format(filename))
agpl-3.0
ironbox360/django
tests/absolute_url_overrides/tests.py
284
2007
from django.db import models from django.test import SimpleTestCase class AbsoluteUrlOverrideTests(SimpleTestCase): def test_get_absolute_url(self): """ get_absolute_url() functions as a normal method. """ get_absolute_url = lambda o: '/test-a/%s/' % o.pk TestA = self._create_model_class('TestA', get_absolute_url) self.assertTrue(hasattr(TestA, 'get_absolute_url')) obj = TestA(pk=1, name='Foo') self.assertEqual('/test-a/%s/' % obj.pk, obj.get_absolute_url()) def test_override_get_absolute_url(self): """ ABSOLUTE_URL_OVERRIDES should override get_absolute_url(). """ get_absolute_url = lambda o: '/test-b/%s/' % o.pk with self.settings( ABSOLUTE_URL_OVERRIDES={ 'absolute_url_overrides.testb': lambda o: '/overridden-test-b/%s/' % o.pk, }, ): TestB = self._create_model_class('TestB', get_absolute_url) obj = TestB(pk=1, name='Foo') self.assertEqual('/overridden-test-b/%s/' % obj.pk, obj.get_absolute_url()) def test_insert_get_absolute_url(self): """ ABSOLUTE_URL_OVERRIDES should work even if the model doesn't have a get_absolute_url() method. """ with self.settings( ABSOLUTE_URL_OVERRIDES={ 'absolute_url_overrides.testc': lambda o: '/test-c/%s/' % o.pk, }, ): TestC = self._create_model_class('TestC') obj = TestC(pk=1, name='Foo') self.assertEqual('/test-c/%s/' % obj.pk, obj.get_absolute_url()) def _create_model_class(self, class_name, get_absolute_url_method=None): attrs = { 'name': models.CharField(max_length=50), '__module__': 'absolute_url_overrides', } if get_absolute_url_method: attrs['get_absolute_url'] = get_absolute_url_method return type(class_name, (models.Model,), attrs)
bsd-3-clause
RafaelTorrealba/odoo
addons/l10n_multilang/l10n_multilang.py
378
8428
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import os from openerp.tools.translate import _ import logging _logger = logging.getLogger(__name__) class wizard_multi_charts_accounts(osv.osv_memory): """ Change wizard that a new account chart for a company. * Add option to install languages during the setup * Copy translations for COA, Tax, Tax Code and Fiscal Position from templates to target objects. """ _inherit = 'wizard.multi.charts.accounts' # FIXME: in trunk, drop the force_write param entirely def process_translations(self, cr, uid, langs, in_obj, in_field, in_ids, out_obj, out_ids, force_write=False, context=None): """ This method copies translations values of templates into new Accounts/Taxes/Journals for languages selected :param cr: A database cursor :param uid: ID of the user currently logged in :param langs: List of languages to load for new records :param in_field: Name of the translatable field of source templates :param in_obj: Name of source object of templates. :param in_ids: List of ids of source object :param out_obj: Destination object for which translation is to be copied :param out_ids: List of ids of destination object :param force_write: Deprecated as of 7.0, do not use :param context: usual context information. May contain the key 'lang', which is the language of the user running the wizard, that will be used if force_write is True :return: True """ if context is None: context = {} src = {} xlat_obj = self.pool.get('ir.translation') #find the source from Account Template for x in in_obj.browse(cr, uid, in_ids): src.update({x.id: x.name}) for lang in langs: #find the value from Translation value = xlat_obj._get_ids(cr, uid, in_obj._name + ',' + in_field, 'model', lang, in_ids) for j in range(len(in_ids)): in_id = in_ids[j] if value[in_id]: #copy Translation from Source to Destination object xlat_obj.create(cr, uid, { 'name': out_obj._name + ',' + in_field, 'type': 'model', 'res_id': out_ids[j], 'lang': lang, 'src': src[in_id], 'value': value[in_id], }) else: _logger.info('Language: %s. Translation from template: there is no translation available for %s!' %(lang, src[in_id]))#out_obj._name)) return True def execute(self, cr, uid, ids, context=None): if not context: context = {} # remove the lang to get the untranslated value ctx = dict(context, lang=None) res = super(wizard_multi_charts_accounts, self).execute(cr, uid, ids, context=ctx) obj_multi = self.browse(cr, uid, ids[0], context=context) company_id = obj_multi.company_id.id # load languages langs = [] res_lang_obj = self.pool.get('res.lang') installed_lang_ids = res_lang_obj.search(cr, uid, []) installed_langs = [x.code for x in res_lang_obj.browse(cr, uid, installed_lang_ids, context=context)] if obj_multi.chart_template_id.spoken_languages: for lang in obj_multi.chart_template_id.spoken_languages.split(';'): if lang not in installed_langs: # the language is not installed, so we don't need to load its translations continue else: # the language was already installed, so the po files have been loaded at the installation time # and now we need to copy the translations of templates to the right objects langs.append(lang) if langs: # write account.account translations in the real COA self._process_accounts_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context) # copy account.tax.code translations self._process_tax_codes_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context) # copy account.tax translations self._process_taxes_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context) # copy account.fiscal.position translations self._process_fiscal_pos_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context) return res def _process_accounts_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None): obj_acc_template = self.pool.get('account.account.template') obj_acc = self.pool.get('account.account') acc_template_root_id = obj_multi.chart_template_id.account_root_id.id acc_root_id = obj_acc.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', None)])[0] in_ids = obj_acc_template.search(cr, uid, [('id', 'child_of', [acc_template_root_id])], order='id')[1:] out_ids = obj_acc.search(cr, uid, [('id', 'child_of', [acc_root_id])], order='id')[1:] return self.process_translations(cr, uid, langs, obj_acc_template, field, in_ids, obj_acc, out_ids, context=context) def _process_tax_codes_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None): obj_tax_code_template = self.pool.get('account.tax.code.template') obj_tax_code = self.pool.get('account.tax.code') tax_code_template_root_id = obj_multi.chart_template_id.tax_code_root_id.id tax_code_root_id = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', None)])[0] in_ids = obj_tax_code_template.search(cr, uid, [('id', 'child_of', [tax_code_template_root_id])], order='id')[1:] out_ids = obj_tax_code.search(cr, uid, [('id', 'child_of', [tax_code_root_id])], order='id')[1:] return self.process_translations(cr, uid, langs, obj_tax_code_template, field, in_ids, obj_tax_code, out_ids, context=context) def _process_taxes_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None): obj_tax_template = self.pool.get('account.tax.template') obj_tax = self.pool.get('account.tax') in_ids = [x.id for x in obj_multi.chart_template_id.tax_template_ids] out_ids = obj_tax.search(cr, uid, [('company_id', '=', company_id)], order='id') return self.process_translations(cr, uid, langs, obj_tax_template, field, in_ids, obj_tax, out_ids, context=context) def _process_fiscal_pos_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None): obj_fiscal_position_template = self.pool.get('account.fiscal.position.template') obj_fiscal_position = self.pool.get('account.fiscal.position') in_ids = obj_fiscal_position_template.search(cr, uid, [('chart_template_id', '=', obj_multi.chart_template_id.id)], order='id') out_ids = obj_fiscal_position.search(cr, uid, [('company_id', '=', company_id)], order='id') return self.process_translations(cr, uid, langs, obj_fiscal_position_template, field, in_ids, obj_fiscal_position, out_ids, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
RenaudParis/servo
tests/wpt/harness/wptrunner/browsers/__init__.py
134
1535
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. """Subpackage where each product is defined. Each product is created by adding a a .py file containing a __wptrunner__ variable in the global scope. This must be a dictionary with the fields "product": Name of the product, assumed to be unique. "browser": String indicating the Browser implementation used to launch that product. "executor": Dictionary with keys as supported test types and values as the name of the Executor implemantation that will be used to run that test type. "browser_kwargs": String naming function that takes product, binary, prefs_root and the wptrunner.run_tests kwargs dict as arguments and returns a dictionary of kwargs to use when creating the Browser class. "executor_kwargs": String naming a function that takes http server url and timeout multiplier and returns kwargs to use when creating the executor class. "env_options": String naming a funtion of no arguments that returns the arguments passed to the TestEnvironment. All classes and functions named in the above dict must be imported into the module global scope. """ product_list = ["b2g", "chrome", "firefox", "servo", "servodriver"]
mpl-2.0
grazcoin/mastercoin-tools
msc_utils_general.py
1
6299
#!/usr/bin/python ####################################################### # # # Copyright Masterchain Grazcoin Grimentz 2013-2014 # # https://github.com/grazcoin/mastercoin-tools # # https://masterchain.info # # masterchain@@bitmessage.ch # # License AGPLv3 # # # ####################################################### import subprocess import inspect import json import time import git import os import msc_globals LAST_BLOCK_NUMBER_FILE='last_block.txt' # needed for deciding on error format: exodus_address='1EXoDusjGwvnjZUyKkxZ4UHEf77z6A5S4P' def run_command(command, input_str=None, ignore_stderr=False): if ignore_stderr: if input_str!=None: p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) return p.communicate(input_str) else: p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) return p.communicate() else: if input_str!=None: p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return p.communicate(input_str) else: p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return p.communicate() def error(msg): last_block_msg='' func_name='unknown' try: func_name=inspect.stack()[1][3] except IndexError: pass # on parse: update last block if func_name.startswith('parse'): # store last parsed block try: if msc_globals.exodus_scan == exodus_address: f=open(LAST_BLOCK_NUMBER_FILE,'w') f.write(str(msc_globals.last_block)+'\n') f.close() last_block_msg=' ('+str(msc_globals.last_block)+')' else: last_block_msg='['+msc_globals.exodus_scan+'] ('+str(msc_globals.last_block)+')' except IOError: pass print '[E] '+func_name+': '+str(msg)+last_block_msg exit(1) def info(msg): func_name='unknown' try: func_name=inspect.stack()[1][3] except IndexError: pass print '[I] '+func_name+': '+str(msg) def debug(msg): if msc_globals.d == True: func_name='unknown' try: func_name=inspect.stack()[1][3] except IndexError: pass print '[D] '+func_name+': '+str(msg) def formatted_decimal(float_number): s=str("{0:.8f}".format(float_number)) if s.strip('0.') == '': # only zero and/or decimal point return '0.0' else: trimmed=s.rstrip('0') # remove zeros on the right if trimmed.endswith('.'): # make sure there is at least one zero on the right return trimmed+'0' else: if trimmed.find('.')==-1: return trimmed+'.0' else: return trimmed def format_time_from_struct(st, short=False): if short: return time.strftime('%Y%m%d',st) else: return time.strftime('%d %b %Y %H:%M:%S GMT',st) def format_time_from_epoch(epoch, short=False): return format_time_from_struct(time.localtime(int(epoch)), short) def get_git_details(directory="~/mastercoin-tools"): repo = git.Repo(directory) assert repo.bare == False head_commit=repo.head.commit timestamp=format_time_from_epoch(int(head_commit.authored_date), True) return(head_commit.hexsha,timestamp) def archive_repo(directory="~/mastercoin-tools"): (commit_hexsha, timestamp)=get_git_details( directory ) assert repo.bare == False archive_name='www/downloads/mastercoin-tools-src-'+timestamp+'-'+commit_hexsha[:8]+'-'+timestamp+'.tar' repo = git.Repo(directory) repo.archive(open(archive_name,'w')) def archive_parsed_data(directory="~/mastercoin-tools"): (commit_hexsha, timestamp)=get_git_details( directory ) archive_name='www/downloads/mastercoin-tools-parse-snapshot-'+timestamp+'-'+commit_hexsha[:8]+'.tar.gz' path_to_archive='www/revision.json tx addr general offers' out, err = run_command("tar cz "+path_to_archive+" -f "+archive_name) if err != None: return err else: return out def get_now(): return format_time_from_struct(time.gmtime()) def get_today(): return format_time_from_struct(time.gmtime(), True) def get_revision_dict( last_block, directory="~/mastercoin-tools" ): rev={} git_details=get_git_details( directory ) hexsha=git_details[0] commit_time=git_details[1] rev['commit_hexsha']=hexsha rev['commit_time']=commit_time rev['url']='https://github.com/grazcoin/mastercoin-tools/commit/'+hexsha rev['last_parsed']=get_now() rev['last_block']=last_block return rev def get_string_xor(s1,s2): result = int(s1, 16) ^ int(s2, 16) return '{:x}'.format(result) def load_dict_from_file(filename, all_list=False, skip_error=False): tmp_dict={} try: f=open(filename,'r') if all_list == False: tmp_dict=json.load(f)[0] else: tmp_dict=json.load(f) f.close() except IOError: # no such file? if skip_error: info('dict load failed. missing '+filename) else: error('dict load failed. missing '+filename) return tmp_dict # mkdir -p function def mkdirp(directory): if not os.path.isdir(directory): os.makedirs(directory) # dump json to a file, and replace it atomically def atomic_json_dump(tmp_dict, filename, add_brackets=True): # check if filename already exists # if exists, write to a tmp file first # then move atomically # make sure path exists path, only_filename = os.path.split(filename) mkdirp(path) f=open(filename,'w') if add_brackets: f.write('[') f.write(json.dumps(tmp_dict, sort_keys=True)) if add_brackets: f.write(']') f.write('\n') f.close()
agpl-3.0
nwjs/chromium.src
chrome/test/data/nacl/gdb_rsp.py
42
2542
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This file is based on gdb_rsp.py file from NaCl repository. import re import socket import time def RspChecksum(data): checksum = 0 for char in data: checksum = (checksum + ord(char)) % 0x100 return checksum class EofOnReplyException(Exception): pass class GdbRspConnection(object): def __init__(self, addr): self._socket = self._Connect(addr) def _Connect(self, addr): # We have to poll because we do not know when sel_ldr has # successfully done bind() on the TCP port. This is inherently # unreliable. # TODO(mseaborn): Add a more reliable connection mechanism to # sel_ldr's debug stub. timeout_in_seconds = 10 poll_time_in_seconds = 0.1 for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)): # On Mac OS X, we have to create a new socket FD for each retry. sock = socket.socket() try: sock.connect(addr) except socket.error: # Retry after a delay. time.sleep(poll_time_in_seconds) else: return sock raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds' % timeout_in_seconds) def _GetReply(self): reply = '' while True: data = self._socket.recv(1024) if len(data) == 0: if reply == '+': raise EofOnReplyException() raise AssertionError('EOF on socket reached with ' 'incomplete reply message: %r' % reply) reply += data if '#' in data: break match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply) if match is None: raise AssertionError('Unexpected reply message: %r' % reply) reply_body = match.group(1) checksum = match.group(2) expected_checksum = '%02x' % RspChecksum(reply_body) if checksum != expected_checksum: raise AssertionError('Bad RSP checksum: %r != %r' % (checksum, expected_checksum)) # Send acknowledgement. self._socket.send('+') return reply_body # Send an rsp message, but don't wait for or expect a reply. def RspSendOnly(self, data): msg = '$%s#%02x' % (data, RspChecksum(data)) return self._socket.send(msg) def RspRequest(self, data): self.RspSendOnly(data) return self._GetReply() def RspInterrupt(self): self._socket.send('\x03') return self._GetReply()
bsd-3-clause
robbiet480/home-assistant
homeassistant/components/slide/cover.py
16
3736
"""Support for Slide slides.""" import logging from homeassistant.components.cover import ( ATTR_POSITION, DEVICE_CLASS_CURTAIN, STATE_CLOSED, STATE_CLOSING, STATE_OPENING, CoverEntity, ) from homeassistant.const import ATTR_ID from .const import API, DEFAULT_OFFSET, DOMAIN, SLIDES _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up cover(s) for Slide platform.""" if discovery_info is None: return entities = [] for slide in hass.data[DOMAIN][SLIDES].values(): _LOGGER.debug("Setting up Slide entity: %s", slide) entities.append(SlideCover(hass.data[DOMAIN][API], slide)) async_add_entities(entities) class SlideCover(CoverEntity): """Representation of a Slide cover.""" def __init__(self, api, slide): """Initialize the cover.""" self._api = api self._slide = slide self._id = slide["id"] self._unique_id = slide["mac"] self._name = slide["name"] self._invert = slide["invert"] @property def unique_id(self): """Return the device unique id.""" return self._unique_id @property def name(self): """Return the device name.""" return self._name @property def device_state_attributes(self): """Return device specific state attributes.""" return {ATTR_ID: self._id} @property def is_opening(self): """Return if the cover is opening or not.""" return self._slide["state"] == STATE_OPENING @property def is_closing(self): """Return if the cover is closing or not.""" return self._slide["state"] == STATE_CLOSING @property def is_closed(self): """Return None if status is unknown, True if closed, else False.""" if self._slide["state"] is None: return None return self._slide["state"] == STATE_CLOSED @property def available(self): """Return False if state is not available.""" return self._slide["online"] @property def assumed_state(self): """Let HA know the integration is assumed state.""" return True @property def device_class(self): """Return the device class of the cover.""" return DEVICE_CLASS_CURTAIN @property def current_cover_position(self): """Return the current position of cover shutter.""" pos = self._slide["pos"] if pos is not None: if (1 - pos) <= DEFAULT_OFFSET or pos <= DEFAULT_OFFSET: pos = round(pos) if not self._invert: pos = 1 - pos pos = int(pos * 100) return pos async def async_open_cover(self, **kwargs): """Open the cover.""" self._slide["state"] = STATE_OPENING await self._api.slide_open(self._id) async def async_close_cover(self, **kwargs): """Close the cover.""" self._slide["state"] = STATE_CLOSING await self._api.slide_close(self._id) async def async_stop_cover(self, **kwargs): """Stop the cover.""" await self._api.slide_stop(self._id) async def async_set_cover_position(self, **kwargs): """Move the cover to a specific position.""" position = kwargs[ATTR_POSITION] / 100 if not self._invert: position = 1 - position if self._slide["pos"] is not None: if position > self._slide["pos"]: self._slide["state"] = STATE_CLOSING else: self._slide["state"] = STATE_OPENING await self._api.slide_set_position(self._id, position)
apache-2.0
rschuitema/misra
src/misra/violation.py
1
1633
""" This module represents a violation against a MISRA guideline """ class Violation: """ Represents a violation against a MISRA guideline """ def __init__(self, file, line, column, guideline, entity): """ Construct a violation """ self.file = file self.line = line self.column = column self.guideline = guideline self.entity = entity def set_file(self, file): """ Set the file in which the violation ocurred """ self.file = file def get_file(self): """ Get the file in which the violation ocurred """ return self.file def set_line(self, line): """ Set the line number in which the violation ocurred """ self.line = line def get_line(self): """ Get the line number in which the violation ocurred """ return self.line def set_column(self, column): """ Set the column number of the line in which the violation ocurred """ self.column = column def get_column(self): """ Get the column number of the line in which the violation ocurred """ return self.column def set_entity(self, entity): """ Set the entity in which violated the guideline """ self.entity = entity def get_entity(self): """ Get the entity in which violated the guideline """ return self.entity def set_guideline(self, guideline): """ Set the guideline that is violated """ self.guideline = guideline def get_guideline(self): """ Get the guideline that is violated """ return self.guideline
mit
wangshijin/cappuccino
Tools/XcodeCapp/XcodeCapp/Scripts/mod_pbxproj.py
18
43282
# Copyright 2012 Calvin Rien # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A pbxproj file is an OpenStep format plist # {} represents dictionary of key=value pairs delimited by ; # () represents list of values delimited by , # file starts with a comment specifying the character type # // !$*UTF8*$! # when adding a file to a project, create the PBXFileReference # add the PBXFileReference's guid to a group # create a PBXBuildFile with the PBXFileReference's guid # add the PBXBuildFile to the appropriate build phase # when adding a header search path add # HEADER_SEARCH_PATHS = "path/**"; # to each XCBuildConfiguration object # Xcode4 will read either a OpenStep or XML plist. # this script uses `plutil` to validate, read and write # the pbxproj file. Plutil is available in OS X 10.2 and higher # Plutil can't write OpenStep plists, so I save as XML import datetime import json import ntpath import os import plistlib import re import shutil import subprocess import uuid from UserDict import IterableUserDict from UserList import UserList regex = '[a-zA-Z0-9\\._/-]*' class PBXEncoder(json.JSONEncoder): def default(self, obj): """Tests the input object, obj, to encode as JSON.""" if isinstance(obj, (PBXList, PBXDict)): return obj.data return json.JSONEncoder.default(self, obj) class PBXDict(IterableUserDict): def __init__(self, d=None): if d: d = dict([(PBXType.Convert(k), PBXType.Convert(v)) for k, v in d.items()]) IterableUserDict.__init__(self, d) def __setitem__(self, key, value): IterableUserDict.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value)) def remove(self, key): self.data.pop(PBXType.Convert(key), None) class PBXList(UserList): def __init__(self, l=None): if isinstance(l, basestring): UserList.__init__(self) self.add(l) return elif l: l = [PBXType.Convert(v) for v in l] UserList.__init__(self, l) def add(self, value): value = PBXType.Convert(value) if value in self.data: return False self.data.append(value) return True def remove(self, value): value = PBXType.Convert(value) if value in self.data: self.data.remove(value) def __setitem__(self, key, value): UserList.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value)) class PBXType(PBXDict): def __init__(self, d=None): PBXDict.__init__(self, d) if 'isa' not in self: self['isa'] = self.__class__.__name__ self.id = None @staticmethod def Convert(o): if isinstance(o, list): return PBXList(o) elif isinstance(o, dict): isa = o.get('isa') if not isa: return PBXDict(o) cls = globals().get(isa) if cls and issubclass(cls, PBXType): return cls(o) print 'warning: unknown PBX type: %s' % isa return PBXDict(o) else: return o @staticmethod def IsGuid(o): return re.match('^[A-F0-9]{24}$', str(o)) @classmethod def GenerateId(cls): return ''.join(str(uuid.uuid4()).upper().split('-')[1:]) @classmethod def Create(cls, *args, **kwargs): return cls(*args, **kwargs) class PBXFileReference(PBXType): def __init__(self, d=None): PBXType.__init__(self, d) self.build_phase = None types = { '.a': ('archive.ar', 'PBXFrameworksBuildPhase'), '.app': ('wrapper.application', None), '.s': ('sourcecode.asm', 'PBXSourcesBuildPhase'), '.c': ('sourcecode.c.c', 'PBXSourcesBuildPhase'), '.cpp': ('sourcecode.cpp.cpp', 'PBXSourcesBuildPhase'), '.framework': ('wrapper.framework', 'PBXFrameworksBuildPhase'), '.h': ('sourcecode.c.h', None), '.icns': ('image.icns', 'PBXResourcesBuildPhase'), '.m': ('sourcecode.c.objc', 'PBXSourcesBuildPhase'), '.j': ('sourcecode.c.objc', 'PBXSourcesBuildPhase'), '.mm': ('sourcecode.cpp.objcpp', 'PBXSourcesBuildPhase'), '.nib': ('wrapper.nib', 'PBXResourcesBuildPhase'), '.plist': ('text.plist.xml', 'PBXResourcesBuildPhase'), '.json': ('text.json', 'PBXResourcesBuildPhase'), '.png': ('image.png', 'PBXResourcesBuildPhase'), '.rtf': ('text.rtf', 'PBXResourcesBuildPhase'), '.tiff': ('image.tiff', 'PBXResourcesBuildPhase'), '.txt': ('text', 'PBXResourcesBuildPhase'), '.xcodeproj': ('wrapper.pb-project', None), '.xib': ('file.xib', 'PBXResourcesBuildPhase'), '.strings': ('text.plist.strings', 'PBXResourcesBuildPhase'), '.bundle': ('wrapper.plug-in', 'PBXResourcesBuildPhase'), '.dylib': ('compiled.mach-o.dylib', 'PBXFrameworksBuildPhase') } trees = [ '<absolute>', '<group>', 'BUILT_PRODUCTS_DIR', 'DEVELOPER_DIR', 'SDKROOT', 'SOURCE_ROOT', ] def guess_file_type(self, ignore_unknown_type=False): self.remove('explicitFileType') self.remove('lastKnownFileType') if os.path.isdir(self.get('path')): f_type = 'folder' build_phase = None ext = '' else: ext = os.path.splitext(self.get('name', ''))[1] f_type, build_phase = PBXFileReference.types.get(ext, ('?', None)) self['lastKnownFileType'] = f_type self.build_phase = build_phase if f_type == '?' and not ignore_unknown_type: print 'unknown file extension: %s' % ext print 'please add extension and Xcode type to PBXFileReference.types' return f_type def set_file_type(self, ft): self.remove('explicitFileType') self.remove('lastKnownFileType') self['explicitFileType'] = ft @classmethod def Create(cls, os_path, tree='SOURCE_ROOT', ignore_unknown_type=False): if tree not in cls.trees: print 'Not a valid sourceTree type: %s' % tree return None fr = cls() fr.id = cls.GenerateId() fr['path'] = os_path fr['name'] = os.path.split(os_path)[1] fr['sourceTree'] = '<absolute>' if os.path.isabs(os_path) else tree fr.guess_file_type(ignore_unknown_type=ignore_unknown_type) return fr class PBXBuildFile(PBXType): def set_weak_link(self, weak=False): k_settings = 'settings' k_attributes = 'ATTRIBUTES' s = self.get(k_settings) if not s: if weak: self[k_settings] = PBXDict({k_attributes: PBXList(['Weak'])}) return True atr = s.get(k_attributes) if not atr: if weak: atr = PBXList() else: return False if weak: atr.add('Weak') else: atr.remove('Weak') self[k_settings][k_attributes] = atr return True def add_compiler_flag(self, flag): k_settings = 'settings' k_attributes = 'COMPILER_FLAGS' if k_settings not in self: self[k_settings] = PBXDict() if k_attributes not in self[k_settings]: self[k_settings][k_attributes] = flag return True flags = self[k_settings][k_attributes].split(' ') if flag in flags: return False flags.append(flag) self[k_settings][k_attributes] = ' '.join(flags) @classmethod def Create(cls, file_ref, weak=False): if isinstance(file_ref, PBXFileReference): file_ref = file_ref.id bf = cls() bf.id = cls.GenerateId() bf['fileRef'] = file_ref if weak: bf.set_weak_link(True) return bf class PBXGroup(PBXType): def add_child(self, ref): if not isinstance(ref, PBXDict): return None isa = ref.get('isa') if isa != 'PBXFileReference' and isa != 'PBXGroup': return None if 'children' not in self: self['children'] = PBXList() self['children'].add(ref.id) return ref.id def remove_child(self, id): if 'children' not in self: self['children'] = PBXList() return if not PBXType.IsGuid(id): id = id.id self['children'].remove(id) def has_child(self, id): if 'children' not in self: self['children'] = PBXList() return False if not PBXType.IsGuid(id): id = id.id return id in self['children'] def get_name(self): path_name = os.path.split(self.get('path', ''))[1] return self.get('name', path_name) @classmethod def Create(cls, name, path=None, tree='SOURCE_ROOT'): grp = cls() grp.id = cls.GenerateId() grp['name'] = name grp['children'] = PBXList() if path: grp['path'] = path grp['sourceTree'] = tree else: grp['sourceTree'] = '<group>' return grp class PBXNativeTarget(PBXType): pass class PBXProject(PBXType): pass class PBXContainerItemProxy(PBXType): pass class PBXReferenceProxy(PBXType): pass class PBXVariantGroup(PBXType): pass class PBXTargetDependency(PBXType): pass class PBXBuildPhase(PBXType): def add_build_file(self, bf): if bf.get('isa') != 'PBXBuildFile': return False if 'files' not in self: self['files'] = PBXList() self['files'].add(bf.id) return True def remove_build_file(self, id): if 'files' not in self: self['files'] = PBXList() return self['files'].remove(id) def has_build_file(self, id): if 'files' not in self: self['files'] = PBXList() return False if not PBXType.IsGuid(id): id = id.id return id in self['files'] class PBXFrameworksBuildPhase(PBXBuildPhase): pass class PBXResourcesBuildPhase(PBXBuildPhase): pass class PBXShellScriptBuildPhase(PBXBuildPhase): pass class PBXSourcesBuildPhase(PBXBuildPhase): pass class PBXCopyFilesBuildPhase(PBXBuildPhase): pass class XCBuildConfiguration(PBXType): def add_search_paths(self, paths, base, key, recursive=True, escape=True): modified = False if not isinstance(paths, list): paths = [paths] if base not in self: self[base] = PBXDict() for path in paths: if recursive and not path.endswith('/**'): path = os.path.join(path, '**') if key not in self[base]: self[base][key] = PBXList() elif isinstance(self[base][key], basestring): self[base][key] = PBXList(self[base][key]) if escape: if self[base][key].add('\\"%s\\"' % path): # '\\"%s\\"' % path modified = True else: if self[base][key].add(path): # '\\"%s\\"' % path modified = True return modified def add_header_search_paths(self, paths, recursive=True): return self.add_search_paths(paths, 'buildSettings', 'HEADER_SEARCH_PATHS', recursive=recursive) def add_library_search_paths(self, paths, recursive=True): return self.add_search_paths(paths, 'buildSettings', 'LIBRARY_SEARCH_PATHS', recursive=recursive) def add_framework_search_paths(self, paths, recursive=True): return self.add_search_paths(paths, 'buildSettings', 'FRAMEWORK_SEARCH_PATHS', recursive=recursive, escape=False) def add_other_cflags(self, flags): modified = False base = 'buildSettings' key = 'OTHER_CFLAGS' if isinstance(flags, basestring): flags = PBXList(flags) if base not in self: self[base] = PBXDict() for flag in flags: if key not in self[base]: self[base][key] = PBXList() elif isinstance(self[base][key], basestring): self[base][key] = PBXList(self[base][key]) if self[base][key].add(flag): self[base][key] = [e for e in self[base][key] if e] modified = True return modified def add_other_ldflags(self, flags): modified = False base = 'buildSettings' key = 'OTHER_LDFLAGS' if isinstance(flags, basestring): flags = PBXList(flags) if base not in self: self[base] = PBXDict() for flag in flags: if key not in self[base]: self[base][key] = PBXList() elif isinstance(self[base][key], basestring): self[base][key] = PBXList(self[base][key]) if self[base][key].add(flag): self[base][key] = [e for e in self[base][key] if e] modified = True return modified def remove_other_ldflags(self, flags): modified = False base = 'buildSettings' key = 'OTHER_LDFLAGS' if isinstance(flags, basestring): flags = PBXList(flags) if base in self: # there are flags, so we can "remove" something for flag in flags: if key not in self[base]: return False elif isinstance(self[base][key], basestring): self[base][key] = PBXList(self[base][key]) if self[base][key].remove(flag): self[base][key] = [e for e in self[base][key] if e] modified = True return modified class XCConfigurationList(PBXType): pass class XcodeProject(PBXDict): plutil_path = 'plutil' special_folders = ['.bundle', '.framework', '.xcodeproj'] def __init__(self, d=None, path=None): if not path: path = os.path.join(os.getcwd(), 'project.pbxproj') self.pbxproj_path = os.path.abspath(path) self.source_root = os.path.abspath(os.path.join(os.path.split(path)[0], '..')) IterableUserDict.__init__(self, d) self.data = PBXDict(self.data) self.objects = self.get('objects') self.modified = False root_id = self.get('rootObject') if root_id: self.root_object = self.objects[root_id] root_group_id = self.root_object.get('mainGroup') self.root_group = self.objects[root_group_id] else: print "error: project has no root object" self.root_object = None self.root_group = None for k, v in self.objects.iteritems(): v.id = k def add_other_cflags(self, flags): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_other_cflags(flags): self.modified = True def add_other_ldflags(self, flags): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_other_ldflags(flags): self.modified = True def remove_other_ldflags(self, flags): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.remove_other_ldflags(flags): self.modified = True def add_header_search_paths(self, paths, recursive=True): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_header_search_paths(paths, recursive): self.modified = True def add_framework_search_paths(self, paths, recursive=True): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_framework_search_paths(paths, recursive): self.modified = True def add_library_search_paths(self, paths, recursive=True): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_library_search_paths(paths, recursive): self.modified = True # TODO: need to return value if project has been modified def get_obj(self, id): return self.objects.get(id) def get_ids(self): return self.objects.keys() def get_files_by_os_path(self, os_path, tree='SOURCE_ROOT'): files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('path') == os_path and f.get('sourceTree') == tree] return files def get_files_by_name(self, name, parent=None): if parent: files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get(name) == name and parent.has_child(f)] else: files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get(name) == name] return files def get_build_files(self, id): files = [f for f in self.objects.values() if f.get('isa') == 'PBXBuildFile' and f.get('fileRef') == id] return files def get_groups_by_name(self, name, parent=None): if parent: groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name and parent.has_child(g)] else: groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name] return groups def get_or_create_group(self, name, path=None, parent=None): if not name: return None if not parent: parent = self.root_group elif not isinstance(parent, PBXGroup): # assume it's an id parent = self.objects.get(parent, self.root_group) groups = self.get_groups_by_name(name) for grp in groups: if parent.has_child(grp.id): return grp grp = PBXGroup.Create(name, path) parent.add_child(grp) self.objects[grp.id] = grp self.modified = True return grp def get_groups_by_os_path(self, path): path = os.path.abspath(path) groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and os.path.abspath(g.get('path', '/dev/null')) == path] return groups def get_build_phases(self, phase_name): phases = [p for p in self.objects.values() if p.get('isa') == phase_name] return phases def get_relative_path(self, os_path): return os.path.relpath(os_path, self.source_root) def verify_files(self, file_list, parent=None): # returns list of files not in the current project. if not file_list: return [] if parent: exists_list = [f.get('name') for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') in file_list and parent.has_child(f)] else: exists_list = [f.get('name') for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') in file_list] return set(file_list).difference(exists_list) def add_folder(self, os_path, parent=None, excludes=None, recursive=True, create_build_files=True): if not os.path.isdir(os_path): return [] if not excludes: excludes = [] results = [] if not parent: parent = self.root_group elif not isinstance(parent, PBXGroup): # assume it's an id parent = self.objects.get(parent, self.root_group) path_dict = {os.path.split(os_path)[0]: parent} special_list = [] for (grp_path, subdirs, files) in os.walk(os_path): parent_folder, folder_name = os.path.split(grp_path) parent = path_dict.get(parent_folder, parent) if [sp for sp in special_list if parent_folder.startswith(sp)]: continue if folder_name.startswith('.'): special_list.append(grp_path) continue if os.path.splitext(grp_path)[1] in XcodeProject.special_folders: # if this file has a special extension (bundle or framework mainly) treat it as a file special_list.append(grp_path) new_files = self.verify_files([folder_name], parent=parent) if new_files: results.extend(self.add_file(grp_path, parent, create_build_files=create_build_files)) continue # create group grp = self.get_or_create_group(folder_name, path=self.get_relative_path(grp_path), parent=parent) path_dict[grp_path] = grp results.append(grp) file_dict = {} for f in files: if f[0] == '.' or [m for m in excludes if re.match(m, f)]: continue kwds = { 'create_build_files': create_build_files, 'parent': grp, 'name': f } f_path = os.path.join(grp_path, f) file_dict[f_path] = kwds new_files = self.verify_files([n.get('name') for n in file_dict.values()], parent=grp) add_files = [(k, v) for k, v in file_dict.items() if v.get('name') in new_files] for path, kwds in add_files: kwds.pop('name', None) self.add_file(path, **kwds) if not recursive: break for r in results: self.objects[r.id] = r return results def path_leaf(self, path): head, tail = ntpath.split(path) return tail or ntpath.basename(head) def add_file_if_doesnt_exist(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False, ignore_unknown_type=False): for obj in self.objects.values(): if 'path' in obj: if self.path_leaf(f_path) == self.path_leaf(obj.get('path')): return [] return self.add_file(f_path, parent, tree, create_build_files, weak, ignore_unknown_type=ignore_unknown_type) def add_file(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False, ignore_unknown_type=False): results = [] abs_path = '' if os.path.isabs(f_path): abs_path = f_path if not os.path.exists(f_path): return results elif tree == 'SOURCE_ROOT': f_path = os.path.relpath(f_path, self.source_root) else: tree = '<absolute>' if not parent: parent = self.root_group elif not isinstance(parent, PBXGroup): # assume it's an id parent = self.objects.get(parent, self.root_group) file_ref = PBXFileReference.Create(f_path, tree, ignore_unknown_type=ignore_unknown_type) parent.add_child(file_ref) results.append(file_ref) # create a build file for the file ref if file_ref.build_phase and create_build_files: phases = self.get_build_phases(file_ref.build_phase) for phase in phases: build_file = PBXBuildFile.Create(file_ref, weak=weak) phase.add_build_file(build_file) results.append(build_file) if abs_path and tree == 'SOURCE_ROOT' \ and os.path.isfile(abs_path) \ and file_ref.build_phase == 'PBXFrameworksBuildPhase': library_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0]) self.add_library_search_paths([library_path], recursive=False) if abs_path and tree == 'SOURCE_ROOT' \ and not os.path.isfile(abs_path) \ and file_ref.build_phase == 'PBXFrameworksBuildPhase': framework_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0]) self.add_framework_search_paths([framework_path, '$(inherited)'], recursive=False) for r in results: self.objects[r.id] = r if results: self.modified = True return results def check_and_repair_framework(self, base): name = os.path.basename(base) if ".framework" in name: basename = name[:-len(".framework")] finalHeaders = os.path.join(base, "Headers") finalCurrent = os.path.join(base, "Versions/Current") finalLib = os.path.join(base, basename) srcHeaders = "Versions/A/Headers" srcCurrent = "A" srcLib = "Versions/A/" + basename if not os.path.exists(finalHeaders): os.symlink(srcHeaders, finalHeaders) if not os.path.exists(finalCurrent): os.symlink(srcCurrent, finalCurrent) if not os.path.exists(finalLib): os.symlink(srcLib, finalLib) def remove_group(self, grp): pass def remove_file(self, id, recursive=True): if not PBXType.IsGuid(id): id = id.id if id in self.objects: self.objects.remove(id) if recursive: groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup'] for group in groups: if id in group['children']: group.remove_child(id) self.modified = True def move_file(self, id, dest_grp=None): pass def apply_patch(self, patch_path, xcode_path): if not os.path.isfile(patch_path) or not os.path.isdir(xcode_path): print 'ERROR: couldn\'t apply "%s" to "%s"' % (patch_path, xcode_path) return print 'applying "%s" to "%s"' % (patch_path, xcode_path) return subprocess.call(['patch', '-p1', '--forward', '--directory=%s' % xcode_path, '--input=%s' % patch_path]) def apply_mods(self, mod_dict, default_path=None): if not default_path: default_path = os.getcwd() keys = mod_dict.keys() for k in keys: v = mod_dict.pop(k) mod_dict[k.lower()] = v parent = mod_dict.pop('group', None) if parent: parent = self.get_or_create_group(parent) excludes = mod_dict.pop('excludes', []) if excludes: excludes = [re.compile(e) for e in excludes] compiler_flags = mod_dict.pop('compiler_flags', {}) for k, v in mod_dict.items(): if k == 'patches': for p in v: if not os.path.isabs(p): p = os.path.join(default_path, p) self.apply_patch(p, self.source_root) elif k == 'folders': # get and compile excludes list # do each folder individually for folder in v: kwds = {} # if path contains ':' remove it and set recursive to False if ':' in folder: args = folder.split(':') kwds['recursive'] = False folder = args.pop(0) if os.path.isabs(folder) and os.path.isdir(folder): pass else: folder = os.path.join(default_path, folder) if not os.path.isdir(folder): continue if parent: kwds['parent'] = parent if excludes: kwds['excludes'] = excludes self.add_folder(folder, **kwds) elif k == 'headerpaths' or k == 'librarypaths': paths = [] for p in v: if p.endswith('/**'): p = os.path.split(p)[0] if not os.path.isabs(p): p = os.path.join(default_path, p) if not os.path.exists(p): continue p = self.get_relative_path(p) paths.append(os.path.join('$(SRCROOT)', p, "**")) if k == 'headerpaths': self.add_header_search_paths(paths) else: self.add_library_search_paths(paths) elif k == 'other_cflags': self.add_other_cflags(v) elif k == 'other_ldflags': self.add_other_ldflags(v) elif k == 'libs' or k == 'frameworks' or k == 'files': paths = {} for p in v: kwds = {} if ':' in p: args = p.split(':') p = args.pop(0) if 'weak' in args: kwds['weak'] = True file_path = os.path.join(default_path, p) search_path, file_name = os.path.split(file_path) if [m for m in excludes if re.match(m, file_name)]: continue try: expr = re.compile(file_name) except re.error: expr = None if expr and os.path.isdir(search_path): file_list = os.listdir(search_path) for f in file_list: if [m for m in excludes if re.match(m, f)]: continue if re.search(expr, f): kwds['name'] = f paths[os.path.join(search_path, f)] = kwds p = None if k == 'libs': kwds['parent'] = self.get_or_create_group('Libraries', parent=parent) elif k == 'frameworks': kwds['parent'] = self.get_or_create_group('Frameworks', parent=parent) if p: kwds['name'] = file_name if k == 'libs': p = os.path.join('usr', 'lib', p) kwds['tree'] = 'SDKROOT' elif k == 'frameworks': p = os.path.join('System', 'Library', 'Frameworks', p) kwds['tree'] = 'SDKROOT' elif k == 'files' and not os.path.exists(file_path): # don't add non-existent files to the project. continue paths[p] = kwds new_files = self.verify_files([n.get('name') for n in paths.values()]) add_files = [(k, v) for k, v in paths.items() if v.get('name') in new_files] for path, kwds in add_files: kwds.pop('name', None) if 'parent' not in kwds and parent: kwds['parent'] = parent self.add_file(path, **kwds) if compiler_flags: for k, v in compiler_flags.items(): filerefs = [] for f in v: filerefs.extend([fr.id for fr in self.objects.values() if fr.get('isa') == 'PBXFileReference' and fr.get('name') == f]) buildfiles = [bf for bf in self.objects.values() if bf.get('isa') == 'PBXBuildFile' and bf.get('fileRef') in filerefs] for bf in buildfiles: if bf.add_compiler_flag(k): self.modified = True def backup(self, file_name=None, backup_name=None): if not file_name: file_name = self.pbxproj_path if not backup_name: backup_name = "%s.%s.backup" % (file_name, datetime.datetime.now().strftime('%d%m%y-%H%M%S')) shutil.copy2(file_name, backup_name) def save(self, file_name=None): """Saves in old (xml) format""" if not file_name: file_name = self.pbxproj_path # This code is adapted from plistlib.writePlist with open(file_name, "w") as f: writer = PBXWriter(f) writer.writeln("<plist version=\"1.0\">") writer.writeValue(self.data) writer.writeln("</plist>") def saveFormat3_2(self, file_name=None): """Save in Xcode 3.2 compatible (new) format""" if not file_name: file_name = self.pbxproj_path # process to get the section's info and names objs = self.data.get('objects') sections = dict() uuids = dict() for key in objs: l = list() if objs.get(key).get('isa') in sections: l = sections.get(objs.get(key).get('isa')) l.append(tuple([key, objs.get(key)])) sections[objs.get(key).get('isa')] = l if 'name' in objs.get(key): uuids[key] = objs.get(key).get('name') elif 'path' in objs.get(key): uuids[key] = objs.get(key).get('path') else: if objs.get(key).get('isa') == 'PBXProject': uuids[objs.get(key).get('buildConfigurationList')] = 'Build configuration list for PBXProject "Unity-iPhone"' elif objs.get(key).get('isa')[0:3] == 'PBX': uuids[key] = objs.get(key).get('isa')[3:-10] else: uuids[key] = 'Build configuration list for PBXNativeTarget "TARGET_NAME"' ro = self.data.get('rootObject') uuids[ro] = 'Project Object' for key in objs: # transitive references (used in the BuildFile section) if 'fileRef' in objs.get(key) and objs.get(key).get('fileRef') in uuids: uuids[key] = uuids[objs.get(key).get('fileRef')] # transitive reference to the target name (used in the Native target section) if objs.get(key).get('isa') == 'PBXNativeTarget': uuids[objs.get(key).get('buildConfigurationList')] = uuids[objs.get(key).get('buildConfigurationList')].replace('TARGET_NAME', uuids[key]) self.uuids = uuids self.sections = sections out = open(file_name, 'w') out.write('// !$*UTF8*$!\n') self._printNewXCodeFormat(out, self.data, '', enters=True) out.close() @classmethod def addslashes(cls, s): d = {'"': '\\"', "'": "\\'", "\0": "\\\0", "\\": "\\\\"} return ''.join(d.get(c, c) for c in s) def _printNewXCodeFormat(self, out, root, deep, enters=True): if isinstance(root, IterableUserDict): out.write('{') if enters: out.write('\n') isa = root.pop('isa', '') if isa != '': # keep the isa in the first spot if enters: out.write('\t' + deep) out.write('isa = ') self._printNewXCodeFormat(out, isa, '\t' + deep, enters=enters) out.write(';') if enters: out.write('\n') else: out.write(' ') for key in sorted(root.iterkeys()): # keep the same order as Apple. if enters: out.write('\t' + deep) if re.match(regex, key).group(0) == key: out.write(key.encode("utf-8") + ' = ') else: out.write('"' + key.encode("utf-8") + '" = ') if key == 'objects': out.write('{') # open the objects section if enters: out.write('\n') #root.remove('objects') # remove it to avoid problems sections = [ ('PBXBuildFile', False), ('PBXCopyFilesBuildPhase', True), ('PBXFileReference', False), ('PBXFrameworksBuildPhase', True), ('PBXGroup', True), ('PBXNativeTarget', True), ('PBXProject', True), ('PBXResourcesBuildPhase', True), ('PBXShellScriptBuildPhase', True), ('PBXSourcesBuildPhase', True), ('XCBuildConfiguration', True), ('XCConfigurationList', True), ('PBXTargetDependency', True), ('PBXVariantGroup', True), ('PBXReferenceProxy', True), ('PBXContainerItemProxy', True)] for section in sections: # iterate over the sections if self.sections.get(section[0]) is None: continue out.write('\n/* Begin %s section */' % section[0].encode("utf-8")) self.sections.get(section[0]).sort(cmp=lambda x, y: cmp(x[0], y[0])) for pair in self.sections.get(section[0]): key = pair[0] value = pair[1] out.write('\n') if enters: out.write('\t\t' + deep) out.write(key.encode("utf-8")) if key in self.uuids: out.write(" /* " + self.uuids[key].encode("utf-8") + " */") out.write(" = ") self._printNewXCodeFormat(out, value, '\t\t' + deep, enters=section[1]) out.write(';') out.write('\n/* End %s section */\n' % section[0].encode("utf-8")) out.write(deep + '\t}') # close of the objects section else: self._printNewXCodeFormat(out, root[key], '\t' + deep, enters=enters) out.write(';') if enters: out.write('\n') else: out.write(' ') root['isa'] = isa # restore the isa for further calls if enters: out.write(deep) out.write('}') elif isinstance(root, UserList): out.write('(') if enters: out.write('\n') for value in root: if enters: out.write('\t' + deep) self._printNewXCodeFormat(out, value, '\t' + deep, enters=enters) out.write(',') if enters: out.write('\n') if enters: out.write(deep) out.write(')') else: if len(root) > 0 and re.match(regex, root).group(0) == root: out.write(root.encode("utf-8")) else: out.write('"' + XcodeProject.addslashes(root.encode("utf-8")) + '"') if root in self.uuids: out.write(" /* " + self.uuids[root].encode("utf-8") + " */") @classmethod def Load(cls, path): cls.plutil_path = os.path.join(os.path.split(__file__)[0], 'plutil') if not os.path.isfile(XcodeProject.plutil_path): cls.plutil_path = 'plutil' # load project by converting to xml and then convert that using plistlib p = subprocess.Popen([XcodeProject.plutil_path, '-convert', 'xml1', '-o', '-', path], stdout=subprocess.PIPE) stdout, stderr = p.communicate() # If the plist was malformed, returncode will be non-zero if p.returncode != 0: print stdout return None tree = plistlib.readPlistFromString(stdout) return XcodeProject(tree, path) # The code below was adapted from plistlib.py. class PBXWriter(plistlib.PlistWriter): def writeValue(self, value): if isinstance(value, (PBXList, PBXDict)): plistlib.PlistWriter.writeValue(self, value.data) else: plistlib.PlistWriter.writeValue(self, value) def simpleElement(self, element, value=None): """ We have to override this method to deal with Unicode text correctly. Non-ascii characters have to get encoded as character references. """ if value is not None: value = _escapeAndEncode(value) self.writeln("<%s>%s</%s>" % (element, value, element)) else: self.writeln("<%s/>" % element) # Regex to find any control chars, except for \t \n and \r _controlCharPat = re.compile( r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f" r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]") def _escapeAndEncode(text): m = _controlCharPat.search(text) if m is not None: raise ValueError("strings can't contains control characters; " "use plistlib.Data instead") text = text.replace("\r\n", "\n") # convert DOS line endings text = text.replace("\r", "\n") # convert Mac line endings text = text.replace("&", "&amp;") # escape '&' text = text.replace("<", "&lt;") # escape '<' text = text.replace(">", "&gt;") # escape '>' return text.encode("ascii", "xmlcharrefreplace") # encode as ascii with xml character references
lgpl-2.1
jskrzypek/flask-video-streaming
video_deque.py
1
2258
# video_deque.py from PIL import Image from fs.memoryfs import MemoryFS import fs.path import PIL import io import numpy as np import cv2 import collections import datetime import tempfile import string import random def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) class VideoDeque(object): def __init__(self): self.frame_deque = collections.deque(maxlen=100) self.face_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv3/3.0.0/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml') self.eye_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv3/3.0.0/share/OpenCV/haarcascades/haarcascade_eye.xml') def enque_frame(self, frame): frame_io = io.BytesIO() frame.save(frame_io) self.frame_deque.append(frame_io) print('new length is '+str(len(self.frame_deque))) def deque_frame(self): print('deque length is '+str(len(self.frame_deque))) if len(self.frame_deque) <= 0: return None print('dequeing image') frame_io = self.frame_deque.popleft() nparr = np.array(frame_io.getbuffer(), np.ubyte) image = cv2.imdecode(nparr,1) print(image.size) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if self.face_cascade.empty() != True: faces = self.face_cascade.detectMultiScale(gray, 1.3, 5) print('faces length is '+str(len(self.frame_deque))) for (x,y,w,h) in faces: cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2) if self.face_cascade.empty() != True: roi_gray = gray[y:y+h, x:x+w] roi_color = image[y:y+h, x:x+w] eyes = self.eye_cascade.detectMultiScale(roi_gray) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) else: print('face cascade not open') imagergb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) ret, jpeg = cv2.imencode('.jpg', imagergb) if ret == True: return jpeg.tobytes() print('cv2 failed') return b''
mit
ClayDowling/GURPSChase
gtest/test/gtest_filter_unittest.py
2826
21261
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
diofeher/django-nfa
build/lib/django/utils/decorators.py
38
2464
"Functions that help with dynamically creating decorators for views." import types try: from functools import wraps except ImportError: from django.utils.functional import wraps # Python 2.3, 2.4 fallback. def decorator_from_middleware(middleware_class): """ Given a middleware class (not an instance), returns a view decorator. This lets you use middleware functionality on a per-view basis. """ def _decorator_from_middleware(*args, **kwargs): # For historical reasons, these "decorators" are also called as # dec(func, *args) instead of dec(*args)(func). We handle both forms # for backwards compatibility. has_func = True try: view_func = kwargs.pop('view_func') except KeyError: if len(args): view_func, args = args[0], args[1:] else: has_func = False if not (has_func and isinstance(view_func, types.FunctionType)): # We are being called as a decorator. if has_func: args = (view_func,) + args middleware = middleware_class(*args, **kwargs) def decorator_func(fn): return _decorator_from_middleware(fn, *args, **kwargs) return decorator_func middleware = middleware_class(*args, **kwargs) def _wrapped_view(request, *args, **kwargs): if hasattr(middleware, 'process_request'): result = middleware.process_request(request) if result is not None: return result if hasattr(middleware, 'process_view'): result = middleware.process_view(request, view_func, args, kwargs) if result is not None: return result try: response = view_func(request, *args, **kwargs) except Exception, e: if hasattr(middleware, 'process_exception'): result = middleware.process_exception(request, e) if result is not None: return result raise if hasattr(middleware, 'process_response'): result = middleware.process_response(request, response) if result is not None: return result return response return wraps(view_func)(_wrapped_view) return _decorator_from_middleware
bsd-3-clause
sauloal/pycluster
pypy-1.9_64/lib-python/2.7/hashlib.py
110
5013
# $Id$ # # Copyright (C) 2005 Gregory P. Smith (greg@krypto.org) # Licensed to PSF under a Contributor Agreement. # __doc__ = """hashlib module - A common interface to many hash functions. new(name, string='') - returns a new hash object implementing the given hash function; initializing the hash using the given string data. Named constructor functions are also available, these are much faster than using new(): md5(), sha1(), sha224(), sha256(), sha384(), and sha512() More algorithms may be available on your platform but the above are guaranteed to exist. NOTE: If you want the adler32 or crc32 hash functions they are available in the zlib module. Choose your hash function wisely. Some have known collision weaknesses. sha384 and sha512 will be slow on 32 bit platforms. Hash objects have these methods: - update(arg): Update the hash object with the string arg. Repeated calls are equivalent to a single call with the concatenation of all the arguments. - digest(): Return the digest of the strings passed to the update() method so far. This may contain non-ASCII characters, including NUL bytes. - hexdigest(): Like digest() except the digest is returned as a string of double length, containing only hexadecimal digits. - copy(): Return a copy (clone) of the hash object. This can be used to efficiently compute the digests of strings that share a common initial substring. For example, to obtain the digest of the string 'Nobody inspects the spammish repetition': >>> import hashlib >>> m = hashlib.md5() >>> m.update("Nobody inspects") >>> m.update(" the spammish repetition") >>> m.digest() '\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9' More condensed: >>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest() 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' """ # This tuple and __get_builtin_constructor() must be modified if a new # always available algorithm is added. __always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') algorithms = __always_supported __all__ = __always_supported + ('new', 'algorithms') def __get_builtin_constructor(name): try: if name in ('SHA1', 'sha1'): import _sha return _sha.new elif name in ('MD5', 'md5'): import _md5 return _md5.new elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'): import _sha256 bs = name[3:] if bs == '256': return _sha256.sha256 elif bs == '224': return _sha256.sha224 elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'): import _sha512 bs = name[3:] if bs == '512': return _sha512.sha512 elif bs == '384': return _sha512.sha384 except ImportError: pass # no extension module, this hash is unsupported. raise ValueError('unsupported hash type %s' % name) def __get_openssl_constructor(name): try: f = getattr(_hashlib, 'openssl_' + name) # Allow the C module to raise ValueError. The function will be # defined but the hash not actually available thanks to OpenSSL. f() # Use the C function directly (very fast) return f except (AttributeError, ValueError): return __get_builtin_constructor(name) def __py_new(name, string=''): """new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string. """ return __get_builtin_constructor(name)(string) def __hash_new(name, string=''): """new(name, string='') - Return a new hashing object using the named algorithm; optionally initialized with a string. """ try: return _hashlib.new(name, string) except ValueError: # If the _hashlib module (OpenSSL) doesn't support the named # hash, try using our builtin implementations. # This allows for SHA224/256 and SHA384/512 support even though # the OpenSSL library prior to 0.9.8 doesn't provide them. return __get_builtin_constructor(name)(string) try: import _hashlib new = __hash_new __get_hash = __get_openssl_constructor except ImportError: new = __py_new __get_hash = __get_builtin_constructor for __func_name in __always_supported: # try them all, some may not work due to the OpenSSL # version not supporting that algorithm. try: globals()[__func_name] = __get_hash(__func_name) except ValueError: import logging logging.exception('code for hash %s was not found.', __func_name) # Cleanup locals() del __always_supported, __func_name, __get_hash del __py_new, __hash_new, __get_openssl_constructor
mit
PermutaTriangle/PermStruct
scratch/test_avoiders_of_short_patterns.py
1
2398
import permstruct import permstruct.dag # Since we usually don't want overlays: overlays = False #------------------------------------------------# # Avoidance of one classical pattern of length 1 # As is expected the first dag suffices # perm_prop = lambda p: p.avoids([1]) # perm_bound = 6 # inp_dag = permstruct.dag.N_P(perm_bound) # # inp_dag = permstruct.dag.N_P_X(perm_prop, perm_bound) # max_rule_size = (1, 1) # max_non_empty = 1 # max_rules = 100 # ignored = 0 #------------------------------------------------# # Avoidance of one classical pattern of length 2 # As expected the first dag suffices. Using the X1 and X2 # versions will require more rules, as there are special # cases which can be combined by the N_P_X dag # perm_prop = lambda p: p.avoids([1,2]) # perm_bound = 6 # inp_dag = permstruct.dag.N_P_X(perm_prop, perm_bound) # inp_dag = permstruct.dag.N_P_X1(perm_prop, perm_bound) # inp_dag = permstruct.dag.N_P_X2(perm_prop, perm_bound) # max_rule_size = (2, 2) # max_non_empty = 3 # max_rules = 100 # ignored = 1 #------------------------------------------------# # Avoidance of two classical patterns of length 2 # As expected this find the unique rule that only generates # the permutation 1 perm_prop = lambda p: p.avoids([1,2]) and p.avoids([2,1]) perm_bound = 6 inp_dag = permstruct.dag.N_P(perm_bound) max_rule_size = (2, 2) max_non_empty = 3 max_rules = 100 ignored = 1 #------------------------------------------------# if not overlays: permstruct.exhaustive(perm_prop, perm_bound, inp_dag, max_rule_size, max_non_empty, max_rules, ignore_first = ignored) else: permstruct.exhaustive_with_overlays(perm_prop, perm_bound, inp_dag, max_rule_size, max_non_empty, max_rules, overlay_dag, max_overlay_cnt, max_overlay_size, min_rule_size=(1,1))
bsd-3-clause
imsparsh/python-for-android
python3-alpha/python3-src/Lib/doctest.py
45
99902
# Module doctest. # Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). # Major enhancements and refactoring by: # Jim Fulton # Edward Loper # Provided as-is; use at your own risk; no warranty; no promises; enjoy! r"""Module doctest -- a framework for running examples in docstrings. In simplest use, end each module M to be tested with: def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test() Then running the module as a script will cause the examples in the docstrings to get executed and verified: python M.py This won't display anything unless an example fails, in which case the failing example(s) and the cause(s) of the failure(s) are printed to stdout (why not stderr? because stderr is a lame hack <0.2 wink>), and the final line of output is "Test failed.". Run it with the -v switch instead: python M.py -v and a detailed report of all examples tried is printed to stdout, along with assorted summaries at the end. You can force verbose mode by passing "verbose=True" to testmod, or prohibit it by passing "verbose=False". In either of those cases, sys.argv is not examined by testmod. There are a variety of other ways to run doctests, including integration with the unittest framework, and support for running non-Python text files containing doctests. There are also many ways to override parts of doctest's default behaviors. See the Library Reference Manual for details. """ __docformat__ = 'reStructuredText en' __all__ = [ # 0, Option Flags 'register_optionflag', 'DONT_ACCEPT_TRUE_FOR_1', 'DONT_ACCEPT_BLANKLINE', 'NORMALIZE_WHITESPACE', 'ELLIPSIS', 'SKIP', 'IGNORE_EXCEPTION_DETAIL', 'COMPARISON_FLAGS', 'REPORT_UDIFF', 'REPORT_CDIFF', 'REPORT_NDIFF', 'REPORT_ONLY_FIRST_FAILURE', 'REPORTING_FLAGS', # 1. Utility Functions # 2. Example & DocTest 'Example', 'DocTest', # 3. Doctest Parser 'DocTestParser', # 4. Doctest Finder 'DocTestFinder', # 5. Doctest Runner 'DocTestRunner', 'OutputChecker', 'DocTestFailure', 'UnexpectedException', 'DebugRunner', # 6. Test Functions 'testmod', 'testfile', 'run_docstring_examples', # 7. Unittest Support 'DocTestSuite', 'DocFileSuite', 'set_unittest_reportflags', # 8. Debugging Support 'script_from_examples', 'testsource', 'debug_src', 'debug', ] import __future__ import difflib import inspect import linecache import os import pdb import re import sys import traceback import unittest from io import StringIO from collections import namedtuple TestResults = namedtuple('TestResults', 'failed attempted') # There are 4 basic classes: # - Example: a <source, want> pair, plus an intra-docstring line number. # - DocTest: a collection of examples, parsed from a docstring, plus # info about where the docstring came from (name, filename, lineno). # - DocTestFinder: extracts DocTests from a given object's docstring and # its contained objects' docstrings. # - DocTestRunner: runs DocTest cases, and accumulates statistics. # # So the basic picture is: # # list of: # +------+ +---------+ +-------+ # |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| # +------+ +---------+ +-------+ # | Example | # | ... | # | Example | # +---------+ # Option constants. OPTIONFLAGS_BY_NAME = {} def register_optionflag(name): # Create a new flag unless `name` is already known. return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') ELLIPSIS = register_optionflag('ELLIPSIS') SKIP = register_optionflag('SKIP') IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | DONT_ACCEPT_BLANKLINE | NORMALIZE_WHITESPACE | ELLIPSIS | SKIP | IGNORE_EXCEPTION_DETAIL) REPORT_UDIFF = register_optionflag('REPORT_UDIFF') REPORT_CDIFF = register_optionflag('REPORT_CDIFF') REPORT_NDIFF = register_optionflag('REPORT_NDIFF') REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') REPORTING_FLAGS = (REPORT_UDIFF | REPORT_CDIFF | REPORT_NDIFF | REPORT_ONLY_FIRST_FAILURE) # Special string markers for use in `want` strings: BLANKLINE_MARKER = '<BLANKLINE>' ELLIPSIS_MARKER = '...' ###################################################################### ## Table of Contents ###################################################################### # 1. Utility Functions # 2. Example & DocTest -- store test cases # 3. DocTest Parser -- extracts examples from strings # 4. DocTest Finder -- extracts test cases from objects # 5. DocTest Runner -- runs test cases # 6. Test Functions -- convenient wrappers for testing # 7. Unittest Support # 8. Debugging Support # 9. Example Usage ###################################################################### ## 1. Utility Functions ###################################################################### def _extract_future_flags(globs): """ Return the compiler-flags associated with the future features that have been imported into the given namespace (globs). """ flags = 0 for fname in __future__.all_feature_names: feature = globs.get(fname, None) if feature is getattr(__future__, fname): flags |= feature.compiler_flag return flags def _normalize_module(module, depth=2): """ Return the module specified by `module`. In particular: - If `module` is a module, then return module. - If `module` is a string, then import and return the module with that name. - If `module` is None, then return the calling module. The calling module is assumed to be the module of the stack frame at the given depth in the call stack. """ if inspect.ismodule(module): return module elif isinstance(module, str): return __import__(module, globals(), locals(), ["*"]) elif module is None: return sys.modules[sys._getframe(depth).f_globals['__name__']] else: raise TypeError("Expected a module, string, or None") def _load_testfile(filename, package, module_relative, encoding): if module_relative: package = _normalize_module(package, 3) filename = _module_relative_path(package, filename) if hasattr(package, '__loader__'): if hasattr(package.__loader__, 'get_data'): file_contents = package.__loader__.get_data(filename) file_contents = file_contents.decode(encoding) # get_data() opens files as 'rb', so one must do the equivalent # conversion as universal newlines would do. return file_contents.replace(os.linesep, '\n'), filename with open(filename, encoding=encoding) as f: return f.read(), filename def _indent(s, indent=4): """ Add the given number of space characters to the beginning of every non-blank line in `s`, and return the result. """ # This regexp matches the start of non-blank lines: return re.sub('(?m)^(?!$)', indent*' ', s) def _exception_traceback(exc_info): """ Return a string containing a traceback message for the given exc_info tuple (as returned by sys.exc_info()). """ # Get a traceback message. excout = StringIO() exc_type, exc_val, exc_tb = exc_info traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) return excout.getvalue() # Override some StringIO methods. class _SpoofOut(StringIO): def getvalue(self): result = StringIO.getvalue(self) # If anything at all was written, make sure there's a trailing # newline. There's no way for the expected output to indicate # that a trailing newline is missing. if result and not result.endswith("\n"): result += "\n" return result def truncate(self, size=None): self.seek(size) StringIO.truncate(self) # Worst-case linear-time ellipsis matching. def _ellipsis_match(want, got): """ Essentially the only subtle case: >>> _ellipsis_match('aa...aa', 'aaa') False """ if ELLIPSIS_MARKER not in want: return want == got # Find "the real" strings. ws = want.split(ELLIPSIS_MARKER) assert len(ws) >= 2 # Deal with exact matches possibly needed at one or both ends. startpos, endpos = 0, len(got) w = ws[0] if w: # starts with exact match if got.startswith(w): startpos = len(w) del ws[0] else: return False w = ws[-1] if w: # ends with exact match if got.endswith(w): endpos -= len(w) del ws[-1] else: return False if startpos > endpos: # Exact end matches required more characters than we have, as in # _ellipsis_match('aa...aa', 'aaa') return False # For the rest, we only need to find the leftmost non-overlapping # match for each piece. If there's no overall match that way alone, # there's no overall match period. for w in ws: # w may be '' at times, if there are consecutive ellipses, or # due to an ellipsis at the start or end of `want`. That's OK. # Search for an empty string succeeds, and doesn't change startpos. startpos = got.find(w, startpos, endpos) if startpos < 0: return False startpos += len(w) return True def _comment_line(line): "Return a commented form of the given line" line = line.rstrip() if line: return '# '+line else: return '#' class _OutputRedirectingPdb(pdb.Pdb): """ A specialized version of the python debugger that redirects stdout to a given stream when interacting with the user. Stdout is *not* redirected when traced code is executed. """ def __init__(self, out): self.__out = out self.__debugger_used = False # do not play signal games in the pdb pdb.Pdb.__init__(self, stdout=out, nosigint=True) # still use input() to get user input self.use_rawinput = 1 def set_trace(self, frame=None): self.__debugger_used = True if frame is None: frame = sys._getframe().f_back pdb.Pdb.set_trace(self, frame) def set_continue(self): # Calling set_continue unconditionally would break unit test # coverage reporting, as Bdb.set_continue calls sys.settrace(None). if self.__debugger_used: pdb.Pdb.set_continue(self) def trace_dispatch(self, *args): # Redirect stdout to the given stream. save_stdout = sys.stdout sys.stdout = self.__out # Call Pdb's trace dispatch method. try: return pdb.Pdb.trace_dispatch(self, *args) finally: sys.stdout = save_stdout # [XX] Normalize with respect to os.path.pardir? def _module_relative_path(module, path): if not inspect.ismodule(module): raise TypeError('Expected a module: %r' % module) if path.startswith('/'): raise ValueError('Module-relative files may not have absolute paths') # Find the base directory for the path. if hasattr(module, '__file__'): # A normal module/package basedir = os.path.split(module.__file__)[0] elif module.__name__ == '__main__': # An interactive session. if len(sys.argv)>0 and sys.argv[0] != '': basedir = os.path.split(sys.argv[0])[0] else: basedir = os.curdir else: # A module w/o __file__ (this includes builtins) raise ValueError("Can't resolve paths relative to the module " + module + " (it has no __file__)") # Combine the base directory and the path. return os.path.join(basedir, *(path.split('/'))) ###################################################################### ## 2. Example & DocTest ###################################################################### ## - An "example" is a <source, want> pair, where "source" is a ## fragment of source code, and "want" is the expected output for ## "source." The Example class also includes information about ## where the example was extracted from. ## ## - A "doctest" is a collection of examples, typically extracted from ## a string (such as an object's docstring). The DocTest class also ## includes information about where the string was extracted from. class Example: """ A single doctest example, consisting of source code and expected output. `Example` defines the following attributes: - source: A single Python statement, always ending with a newline. The constructor adds a newline if needed. - want: The expected output from running the source code (either from stdout, or a traceback in case of exception). `want` ends with a newline unless it's empty, in which case it's an empty string. The constructor adds a newline if needed. - exc_msg: The exception message generated by the example, if the example is expected to generate an exception; or `None` if it is not expected to generate an exception. This exception message is compared against the return value of `traceback.format_exception_only()`. `exc_msg` ends with a newline unless it's `None`. The constructor adds a newline if needed. - lineno: The line number within the DocTest string containing this Example where the Example begins. This line number is zero-based, with respect to the beginning of the DocTest. - indent: The example's indentation in the DocTest string. I.e., the number of space characters that preceed the example's first prompt. - options: A dictionary mapping from option flags to True or False, which is used to override default options for this example. Any option flags not contained in this dictionary are left at their default value (as specified by the DocTestRunner's optionflags). By default, no options are set. """ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, options=None): # Normalize inputs. if not source.endswith('\n'): source += '\n' if want and not want.endswith('\n'): want += '\n' if exc_msg is not None and not exc_msg.endswith('\n'): exc_msg += '\n' # Store properties. self.source = source self.want = want self.lineno = lineno self.indent = indent if options is None: options = {} self.options = options self.exc_msg = exc_msg class DocTest: """ A collection of doctest examples that should be run in a single namespace. Each `DocTest` defines the following attributes: - examples: the list of examples. - globs: The namespace (aka globals) that the examples should be run in. - name: A name identifying the DocTest (typically, the name of the object whose docstring this DocTest was extracted from). - filename: The name of the file that this DocTest was extracted from, or `None` if the filename is unknown. - lineno: The line number within filename where this DocTest begins, or `None` if the line number is unavailable. This line number is zero-based, with respect to the beginning of the file. - docstring: The string that the examples were extracted from, or `None` if the string is unavailable. """ def __init__(self, examples, globs, name, filename, lineno, docstring): """ Create a new DocTest containing the given examples. The DocTest's globals are initialized with a copy of `globs`. """ assert not isinstance(examples, str), \ "DocTest no longer accepts str; use DocTestParser instead" self.examples = examples self.docstring = docstring self.globs = globs.copy() self.name = name self.filename = filename self.lineno = lineno def __repr__(self): if len(self.examples) == 0: examples = 'no examples' elif len(self.examples) == 1: examples = '1 example' else: examples = '%d examples' % len(self.examples) return ('<DocTest %s from %s:%s (%s)>' % (self.name, self.filename, self.lineno, examples)) # This lets us sort tests by name: def __lt__(self, other): if not isinstance(other, DocTest): return NotImplemented return ((self.name, self.filename, self.lineno, id(self)) < (other.name, other.filename, other.lineno, id(other))) ###################################################################### ## 3. DocTestParser ###################################################################### class DocTestParser: """ A class used to parse strings containing doctest examples. """ # This regular expression is used to find doctest examples in a # string. It defines three groups: `source` is the source code # (including leading indentation and prompts); `indent` is the # indentation of the first (PS1) line of the source code; and # `want` is the expected output (including leading indentation). _EXAMPLE_RE = re.compile(r''' # Source consists of a PS1 line followed by zero or more PS2 lines. (?P<source> (?:^(?P<indent> [ ]*) >>> .*) # PS1 line (?:\n [ ]* \.\.\. .*)*) # PS2 lines \n? # Want consists of any non-blank lines that do not start with PS1. (?P<want> (?:(?![ ]*$) # Not a blank line (?![ ]*>>>) # Not a line starting with PS1 .*$\n? # But any other line )*) ''', re.MULTILINE | re.VERBOSE) # A regular expression for handling `want` strings that contain # expected exceptions. It divides `want` into three pieces: # - the traceback header line (`hdr`) # - the traceback stack (`stack`) # - the exception message (`msg`), as generated by # traceback.format_exception_only() # `msg` may have multiple lines. We assume/require that the # exception message is the first non-indented line starting with a word # character following the traceback header line. _EXCEPTION_RE = re.compile(r""" # Grab the traceback header. Different versions of Python have # said different things on the first traceback line. ^(?P<hdr> Traceback\ \( (?: most\ recent\ call\ last | innermost\ last ) \) : ) \s* $ # toss trailing whitespace on the header. (?P<stack> .*?) # don't blink: absorb stuff until... ^ (?P<msg> \w+ .*) # a line *starts* with alphanum. """, re.VERBOSE | re.MULTILINE | re.DOTALL) # A callable returning a true value iff its argument is a blank line # or contains a single comment. _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match def parse(self, string, name='<string>'): """ Divide the given string into examples and intervening text, and return them as a list of alternating Examples and strings. Line numbers for the Examples are 0-based. The optional argument `name` is a name identifying this string, and is only used for error messages. """ string = string.expandtabs() # If all lines begin with the same indentation, then strip it. min_indent = self._min_indent(string) if min_indent > 0: string = '\n'.join([l[min_indent:] for l in string.split('\n')]) output = [] charno, lineno = 0, 0 # Find all doctest examples in the string: for m in self._EXAMPLE_RE.finditer(string): # Add the pre-example text to `output`. output.append(string[charno:m.start()]) # Update lineno (lines before this example) lineno += string.count('\n', charno, m.start()) # Extract info from the regexp match. (source, options, want, exc_msg) = \ self._parse_example(m, name, lineno) # Create an Example, and add it to the list. if not self._IS_BLANK_OR_COMMENT(source): output.append( Example(source, want, exc_msg, lineno=lineno, indent=min_indent+len(m.group('indent')), options=options) ) # Update lineno (lines inside this example) lineno += string.count('\n', m.start(), m.end()) # Update charno. charno = m.end() # Add any remaining post-example text to `output`. output.append(string[charno:]) return output def get_doctest(self, string, globs, name, filename, lineno): """ Extract all doctest examples from the given string, and collect them into a `DocTest` object. `globs`, `name`, `filename`, and `lineno` are attributes for the new `DocTest` object. See the documentation for `DocTest` for more information. """ return DocTest(self.get_examples(string, name), globs, name, filename, lineno, string) def get_examples(self, string, name='<string>'): """ Extract all doctest examples from the given string, and return them as a list of `Example` objects. Line numbers are 0-based, because it's most common in doctests that nothing interesting appears on the same line as opening triple-quote, and so the first interesting line is called \"line 1\" then. The optional argument `name` is a name identifying this string, and is only used for error messages. """ return [x for x in self.parse(string, name) if isinstance(x, Example)] def _parse_example(self, m, name, lineno): """ Given a regular expression match from `_EXAMPLE_RE` (`m`), return a pair `(source, want)`, where `source` is the matched example's source code (with prompts and indentation stripped); and `want` is the example's expected output (with indentation stripped). `name` is the string's name, and `lineno` is the line number where the example starts; both are used for error messages. """ # Get the example's indentation level. indent = len(m.group('indent')) # Divide source into lines; check that they're properly # indented; and then strip their indentation & prompts. source_lines = m.group('source').split('\n') self._check_prompt_blank(source_lines, indent, name, lineno) self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) source = '\n'.join([sl[indent+4:] for sl in source_lines]) # Divide want into lines; check that it's properly indented; and # then strip the indentation. Spaces before the last newline should # be preserved, so plain rstrip() isn't good enough. want = m.group('want') want_lines = want.split('\n') if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): del want_lines[-1] # forget final newline & spaces after it self._check_prefix(want_lines, ' '*indent, name, lineno + len(source_lines)) want = '\n'.join([wl[indent:] for wl in want_lines]) # If `want` contains a traceback message, then extract it. m = self._EXCEPTION_RE.match(want) if m: exc_msg = m.group('msg') else: exc_msg = None # Extract options from the source. options = self._find_options(source, name, lineno) return source, options, want, exc_msg # This regular expression looks for option directives in the # source code of an example. Option directives are comments # starting with "doctest:". Warning: this may give false # positives for string-literals that contain the string # "#doctest:". Eliminating these false positives would require # actually parsing the string; but we limit them by ignoring any # line containing "#doctest:" that is *followed* by a quote mark. _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', re.MULTILINE) def _find_options(self, source, name, lineno): """ Return a dictionary containing option overrides extracted from option directives in the given source string. `name` is the string's name, and `lineno` is the line number where the example starts; both are used for error messages. """ options = {} # (note: with the current regexp, this will match at most once:) for m in self._OPTION_DIRECTIVE_RE.finditer(source): option_strings = m.group(1).replace(',', ' ').split() for option in option_strings: if (option[0] not in '+-' or option[1:] not in OPTIONFLAGS_BY_NAME): raise ValueError('line %r of the doctest for %s ' 'has an invalid option: %r' % (lineno+1, name, option)) flag = OPTIONFLAGS_BY_NAME[option[1:]] options[flag] = (option[0] == '+') if options and self._IS_BLANK_OR_COMMENT(source): raise ValueError('line %r of the doctest for %s has an option ' 'directive on a line with no example: %r' % (lineno, name, source)) return options # This regular expression finds the indentation of every non-blank # line in a string. _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) def _min_indent(self, s): "Return the minimum indentation of any non-blank line in `s`" indents = [len(indent) for indent in self._INDENT_RE.findall(s)] if len(indents) > 0: return min(indents) else: return 0 def _check_prompt_blank(self, lines, indent, name, lineno): """ Given the lines of a source string (including prompts and leading indentation), check to make sure that every prompt is followed by a space character. If any line is not followed by a space character, then raise ValueError. """ for i, line in enumerate(lines): if len(line) >= indent+4 and line[indent+3] != ' ': raise ValueError('line %r of the docstring for %s ' 'lacks blank after %s: %r' % (lineno+i+1, name, line[indent:indent+3], line)) def _check_prefix(self, lines, prefix, name, lineno): """ Check that every line in the given list starts with the given prefix; if any line does not, then raise a ValueError. """ for i, line in enumerate(lines): if line and not line.startswith(prefix): raise ValueError('line %r of the docstring for %s has ' 'inconsistent leading whitespace: %r' % (lineno+i+1, name, line)) ###################################################################### ## 4. DocTest Finder ###################################################################### class DocTestFinder: """ A class used to extract the DocTests that are relevant to a given object, from its docstring and the docstrings of its contained objects. Doctests can currently be extracted from the following object types: modules, functions, classes, methods, staticmethods, classmethods, and properties. """ def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, exclude_empty=True): """ Create a new doctest finder. The optional argument `parser` specifies a class or function that should be used to create new DocTest objects (or objects that implement the same interface as DocTest). The signature for this factory function should match the signature of the DocTest constructor. If the optional argument `recurse` is false, then `find` will only examine the given object, and not any contained objects. If the optional argument `exclude_empty` is false, then `find` will include tests for objects with empty docstrings. """ self._parser = parser self._verbose = verbose self._recurse = recurse self._exclude_empty = exclude_empty def find(self, obj, name=None, module=None, globs=None, extraglobs=None): """ Return a list of the DocTests that are defined by the given object's docstring, or by any of its contained objects' docstrings. The optional parameter `module` is the module that contains the given object. If the module is not specified or is None, then the test finder will attempt to automatically determine the correct module. The object's module is used: - As a default namespace, if `globs` is not specified. - To prevent the DocTestFinder from extracting DocTests from objects that are imported from other modules. - To find the name of the file containing the object. - To help find the line number of the object within its file. Contained objects whose module does not match `module` are ignored. If `module` is False, no attempt to find the module will be made. This is obscure, of use mostly in tests: if `module` is False, or is None but cannot be found automatically, then all objects are considered to belong to the (non-existent) module, so all contained objects will (recursively) be searched for doctests. The globals for each DocTest is formed by combining `globs` and `extraglobs` (bindings in `extraglobs` override bindings in `globs`). A new copy of the globals dictionary is created for each DocTest. If `globs` is not specified, then it defaults to the module's `__dict__`, if specified, or {} otherwise. If `extraglobs` is not specified, then it defaults to {}. """ # If name was not specified, then extract it from the object. if name is None: name = getattr(obj, '__name__', None) if name is None: raise ValueError("DocTestFinder.find: name must be given " "when obj.__name__ doesn't exist: %r" % (type(obj),)) # Find the module that contains the given object (if obj is # a module, then module=obj.). Note: this may fail, in which # case module will be None. if module is False: module = None elif module is None: module = inspect.getmodule(obj) # Read the module's source code. This is used by # DocTestFinder._find_lineno to find the line number for a # given object's docstring. try: file = inspect.getsourcefile(obj) except TypeError: source_lines = None else: if not file: # Check to see if it's one of our special internal "files" # (see __patched_linecache_getlines). file = inspect.getfile(obj) if not file[0]+file[-2:] == '<]>': file = None if file is None: source_lines = None else: if module is not None: # Supply the module globals in case the module was # originally loaded via a PEP 302 loader and # file is not a valid filesystem path source_lines = linecache.getlines(file, module.__dict__) else: # No access to a loader, so assume it's a normal # filesystem path source_lines = linecache.getlines(file) if not source_lines: source_lines = None # Initialize globals, and merge in extraglobs. if globs is None: if module is None: globs = {} else: globs = module.__dict__.copy() else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: globs['__name__'] = '__main__' # provide a default module name # Recursively expore `obj`, extracting DocTests. tests = [] self._find(tests, obj, name, module, source_lines, globs, {}) # Sort the tests by alpha order of names, for consistency in # verbose-mode output. This was a feature of doctest in Pythons # <= 2.3 that got lost by accident in 2.4. It was repaired in # 2.4.4 and 2.5. tests.sort() return tests def _from_module(self, module, object): """ Return true if the given object is defined in the given module. """ if module is None: return True elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.isclass(object): return module.__name__ == object.__module__ elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) # Look for tests in a module's contained objects. if inspect.ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): valname = '%s.%s' % (name, valname) # Recurse to functions & classes. if ((inspect.isfunction(val) or inspect.isclass(val)) and self._from_module(module, val)): self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a module's __test__ dictionary. if inspect.ismodule(obj) and self._recurse: for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError("DocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError("DocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj) and self._recurse: for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(val) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) def _get_test(self, obj, name, module, globs, source_lines): """ Return a DocTest for the given object, if it defines a docstring; otherwise, return None. """ # Extract the object's docstring. If it doesn't have one, # then return None (no test for this object). if isinstance(obj, str): docstring = obj else: try: if obj.__doc__ is None: docstring = '' else: docstring = obj.__doc__ if not isinstance(docstring, str): docstring = str(docstring) except (TypeError, AttributeError): docstring = '' # Find the docstring's location in the file. lineno = self._find_lineno(obj, source_lines) # Don't bother if the docstring is empty. if self._exclude_empty and not docstring: return None # Return a DocTest for this object. if module is None: filename = None else: filename = getattr(module, '__file__', module.__name__) if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] return self._parser.get_doctest(docstring, globs, name, filename, lineno) def _find_lineno(self, obj, source_lines): """ Return a line number of the given object's docstring. Note: this method assumes that the object has a docstring. """ lineno = None # Find the line number for modules. if inspect.ismodule(obj): lineno = 0 # Find the line number for classes. # Note: this could be fooled if a class is defined multiple # times in a single file. if inspect.isclass(obj): if source_lines is None: return None pat = re.compile(r'^\s*class\s*%s\b' % getattr(obj, '__name__', '-')) for i, line in enumerate(source_lines): if pat.match(line): lineno = i break # Find the line number for functions & methods. if inspect.ismethod(obj): obj = obj.__func__ if inspect.isfunction(obj): obj = obj.__code__ if inspect.istraceback(obj): obj = obj.tb_frame if inspect.isframe(obj): obj = obj.f_code if inspect.iscode(obj): lineno = getattr(obj, 'co_firstlineno', None)-1 # Find the line number where the docstring starts. Assume # that it's the first line that begins with a quote mark. # Note: this could be fooled by a multiline function # signature, where a continuation line begins with a quote # mark. if lineno is not None: if source_lines is None: return lineno+1 pat = re.compile('(^|.*:)\s*\w*("|\')') for lineno in range(lineno, len(source_lines)): if pat.match(source_lines[lineno]): return lineno # We couldn't find the line number. return None ###################################################################### ## 5. DocTest Runner ###################################################################### class DocTestRunner: """ A class used to run DocTest test cases, and accumulate statistics. The `run` method is used to process a single DocTest case. It returns a tuple `(f, t)`, where `t` is the number of test cases tried, and `f` is the number of test cases that failed. >>> tests = DocTestFinder().find(_TestClass) >>> runner = DocTestRunner(verbose=False) >>> tests.sort(key = lambda test: test.name) >>> for test in tests: ... print(test.name, '->', runner.run(test)) _TestClass -> TestResults(failed=0, attempted=2) _TestClass.__init__ -> TestResults(failed=0, attempted=2) _TestClass.get -> TestResults(failed=0, attempted=2) _TestClass.square -> TestResults(failed=0, attempted=1) The `summarize` method prints a summary of all the test cases that have been run by the runner, and returns an aggregated `(f, t)` tuple: >>> runner.summarize(verbose=1) 4 items passed all tests: 2 tests in _TestClass 2 tests in _TestClass.__init__ 2 tests in _TestClass.get 1 tests in _TestClass.square 7 tests in 4 items. 7 passed and 0 failed. Test passed. TestResults(failed=0, attempted=7) The aggregated number of tried examples and failed examples is also available via the `tries` and `failures` attributes: >>> runner.tries 7 >>> runner.failures 0 The comparison between expected outputs and actual outputs is done by an `OutputChecker`. This comparison may be customized with a number of option flags; see the documentation for `testmod` for more information. If the option flags are insufficient, then the comparison may also be customized by passing a subclass of `OutputChecker` to the constructor. The test runner's display output can be controlled in two ways. First, an output function (`out) can be passed to `TestRunner.run`; this function will be called with strings that should be displayed. It defaults to `sys.stdout.write`. If capturing the output is not sufficient, then the display output can be also customized by subclassing DocTestRunner, and overriding the methods `report_start`, `report_success`, `report_unexpected_exception`, and `report_failure`. """ # This divider string is used to separate failure messages, and to # separate sections of the summary. DIVIDER = "*" * 70 def __init__(self, checker=None, verbose=None, optionflags=0): """ Create a new test runner. Optional keyword arg `checker` is the `OutputChecker` that should be used to compare the expected outputs and actual outputs of doctest examples. Optional keyword arg 'verbose' prints lots of stuff if true, only failures if false; by default, it's true iff '-v' is in sys.argv. Optional argument `optionflags` can be used to control how the test runner compares expected output to actual output, and how it displays failures. See the documentation for `testmod` for more information. """ self._checker = checker or OutputChecker() if verbose is None: verbose = '-v' in sys.argv self._verbose = verbose self.optionflags = optionflags self.original_optionflags = optionflags # Keep track of the examples we've run. self.tries = 0 self.failures = 0 self._name2ft = {} # Create a fake output target for capturing doctest output. self._fakeout = _SpoofOut() #///////////////////////////////////////////////////////////////// # Reporting methods #///////////////////////////////////////////////////////////////// def report_start(self, out, test, example): """ Report that the test runner is about to process the given example. (Only displays a message if verbose=True) """ if self._verbose: if example.want: out('Trying:\n' + _indent(example.source) + 'Expecting:\n' + _indent(example.want)) else: out('Trying:\n' + _indent(example.source) + 'Expecting nothing\n') def report_success(self, out, test, example, got): """ Report that the given example ran successfully. (Only displays a message if verbose=True) """ if self._verbose: out("ok\n") def report_failure(self, out, test, example, got): """ Report that the given example failed. """ out(self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags)) def report_unexpected_exception(self, out, test, example, exc_info): """ Report that the given example raised an unexpected exception. """ out(self._failure_header(test, example) + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) def _failure_header(self, test, example): out = [self.DIVIDER] if test.filename: if test.lineno is not None and example.lineno is not None: lineno = test.lineno + example.lineno + 1 else: lineno = '?' out.append('File "%s", line %s, in %s' % (test.filename, lineno, test.name)) else: out.append('Line %s, in %s' % (example.lineno+1, test.name)) out.append('Failed example:') source = example.source out.append(_indent(source)) return '\n'.join(out) #///////////////////////////////////////////////////////////////// # DocTest Running #///////////////////////////////////////////////////////////////// def __run(self, test, compileflags, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. The examples are run in the namespace `test.globs`. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and failures > 0) # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # If 'SKIP' is set, then skip this example. if self.optionflags & SKIP: continue # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Use a special filename for compile(), so we can retrieve # the source code during interactive debugging (see # __patched_linecache_getlines). filename = '<doctest %s[%d]>' % (test.name, examplenum) # Run the example in the given context (globs), and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) try: # Don't blink! This is where the user's code gets run. exec(compile(example.source, filename, "single", compileflags, 1), test.globs) self.debugger.set_continue() # ==== Example Finished ==== exception = None except KeyboardInterrupt: raise except: exception = sys.exc_info() self.debugger.set_continue() # ==== Example Finished ==== got = self._fakeout.getvalue() # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proved innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception is None: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: exc_msg = traceback.format_exception_only(*exception[:2])[-1] if not quiet: got += _exception_traceback(exception) # If `example.exc_msg` is None, then we weren't expecting # an exception. if example.exc_msg is None: outcome = BOOM # We expected an exception: see whether it matches. elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS # Another chance if they didn't care about the detail. elif self.optionflags & IGNORE_EXCEPTION_DETAIL: m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg) m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg) if m1 and m2 and check(m1.group(1), m2.group(1), self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exception) failures += 1 else: assert False, ("unknown outcome", outcome) # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self.__record_outcome(test, failures, tries) return TestResults(failures, tries) def __record_outcome(self, test, f, t): """ Record the fact that the given DocTest (`test`) generated `f` failures out of `t` tried examples. """ f2, t2 = self._name2ft.get(test.name, (0,0)) self._name2ft[test.name] = (f+f2, t+t2) self.failures += f self.tries += t __LINECACHE_FILENAME_RE = re.compile(r'<doctest ' r'(?P<name>.+)' r'\[(?P<examplenum>\d+)\]>$') def __patched_linecache_getlines(self, filename, module_globals=None): m = self.__LINECACHE_FILENAME_RE.match(filename) if m and m.group('name') == self.test.name: example = self.test.examples[int(m.group('examplenum'))] return example.source.splitlines(True) else: return self.save_linecache_getlines(filename, module_globals) def run(self, test, compileflags=None, out=None, clear_globs=True): """ Run the examples in `test`, and display the results using the writer function `out`. The examples are run in the namespace `test.globs`. If `clear_globs` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use `clear_globs=False`. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. The output of each example is checked using `DocTestRunner.check_output`, and the results are formatted by the `DocTestRunner.report_*` methods. """ self.test = test if compileflags is None: compileflags = _extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: encoding = save_stdout.encoding if encoding is None or encoding.lower() == 'utf-8': out = save_stdout.write else: # Use backslashreplace error handling on write def out(s): s = str(s.encode(encoding, 'backslashreplace'), encoding) save_stdout.write(s) sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_set_trace = pdb.set_trace self.debugger = _OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = linecache.getlines linecache.getlines = self.__patched_linecache_getlines # Make sure sys.displayhook just prints the value to stdout save_displayhook = sys.displayhook sys.displayhook = sys.__displayhook__ try: return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines sys.displayhook = save_displayhook if clear_globs: test.globs.clear() import builtins builtins._ = None #///////////////////////////////////////////////////////////////// # Summarization #///////////////////////////////////////////////////////////////// def summarize(self, verbose=None): """ Print a summary of all the test cases that have been run by this DocTestRunner, and return a tuple `(f, t)`, where `f` is the total number of failed examples, and `t` is the total number of tried examples. The optional `verbose` argument controls how detailed the summary is. If the verbosity is not specified, then the DocTestRunner's verbosity is used. """ if verbose is None: verbose = self._verbose notests = [] passed = [] failed = [] totalt = totalf = 0 for x in self._name2ft.items(): name, (f, t) = x assert f <= t totalt += t totalf += f if t == 0: notests.append(name) elif f == 0: passed.append( (name, t) ) else: failed.append(x) if verbose: if notests: print(len(notests), "items had no tests:") notests.sort() for thing in notests: print(" ", thing) if passed: print(len(passed), "items passed all tests:") passed.sort() for thing, count in passed: print(" %3d tests in %s" % (count, thing)) if failed: print(self.DIVIDER) print(len(failed), "items had failures:") failed.sort() for thing, (f, t) in failed: print(" %3d of %3d in %s" % (f, t, thing)) if verbose: print(totalt, "tests in", len(self._name2ft), "items.") print(totalt - totalf, "passed and", totalf, "failed.") if totalf: print("***Test Failed***", totalf, "failures.") elif verbose: print("Test passed.") return TestResults(totalf, totalt) #///////////////////////////////////////////////////////////////// # Backward compatibility cruft to maintain doctest.master. #///////////////////////////////////////////////////////////////// def merge(self, other): d = self._name2ft for name, (f, t) in other._name2ft.items(): if name in d: # Don't print here by default, since doing # so breaks some of the buildbots #print("*** DocTestRunner.merge: '" + name + "' in both" \ # " testers; summing outcomes.") f2, t2 = d[name] f = f + f2 t = t + t2 d[name] = f, t class OutputChecker: """ A class used to check the whether the actual output from a doctest example matches the expected output. `OutputChecker` defines two methods: `check_output`, which compares a given pair of outputs, and returns true if they match; and `output_difference`, which returns a string describing the differences between two outputs. """ def _toAscii(self, s): """ Convert string to hex-escaped ASCII string. """ return str(s.encode('ASCII', 'backslashreplace'), "ASCII") def check_output(self, want, got, optionflags): """ Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. """ # If `want` contains hex-escaped character such as "\u1234", # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]). # On the other hand, `got` could be an another sequence of # characters such as [\u1234], so `want` and `got` should # be folded to hex-escaped ASCII string to compare. got = self._toAscii(got) want = self._toAscii(want) # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # The values True and False replaced 1 and 0 as the return # value for boolean comparisons in Python 2.3. if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): if (got,want) == ("True\n", "1\n"): return True if (got,want) == ("False\n", "0\n"): return True # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub('(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & ELLIPSIS: if _ellipsis_match(want, got): return True # We didn't find any match; return false. return False # Should we do a fancy diff? def _do_a_fancy_diff(self, want, got, optionflags): # Not unless they asked for a fancy diff. if not optionflags & (REPORT_UDIFF | REPORT_CDIFF | REPORT_NDIFF): return False # If expected output uses ellipsis, a meaningful fancy diff is # too hard ... or maybe not. In two real-life failures Tim saw, # a diff was a major help anyway, so this is commented out. # [todo] _ellipsis_match() knows which pieces do and don't match, # and could be the basis for a kick-ass diff in this case. ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: ## return False # ndiff does intraline difference marking, so can be useful even # for 1-line differences. if optionflags & REPORT_NDIFF: return True # The other diff types need at least a few lines to be helpful. return want.count('\n') > 2 and got.count('\n') > 2 def output_difference(self, example, got, optionflags): """ Return a string describing the differences between the expected output for a given example (`example`) and the actual output (`got`). `optionflags` is the set of option flags used to compare `want` and `got`. """ want = example.want # If <BLANKLINE>s are being used, then replace blank lines # with <BLANKLINE> in the actual output string. if not (optionflags & DONT_ACCEPT_BLANKLINE): got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) # Check if we should use diff. if self._do_a_fancy_diff(want, got, optionflags): # Split want & got into lines. want_lines = want.splitlines(True) # True == keep line ends got_lines = got.splitlines(True) # Use difflib to find their differences. if optionflags & REPORT_UDIFF: diff = difflib.unified_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'unified diff with -expected +actual' elif optionflags & REPORT_CDIFF: diff = difflib.context_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'context diff with expected followed by actual' elif optionflags & REPORT_NDIFF: engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) diff = list(engine.compare(want_lines, got_lines)) kind = 'ndiff with -expected +actual' else: assert 0, 'Bad diff option' # Remove trailing whitespace on diff output. diff = [line.rstrip() + '\n' for line in diff] return 'Differences (%s):\n' % kind + _indent(''.join(diff)) # If we're not using diff, then simply list the expected # output followed by the actual output. if want and got: return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) elif want: return 'Expected:\n%sGot nothing\n' % _indent(want) elif got: return 'Expected nothing\nGot:\n%s' % _indent(got) else: return 'Expected nothing\nGot nothing\n' class DocTestFailure(Exception): """A DocTest example has failed in debugging mode. The exception instance has variables: - test: the DocTest object being run - example: the Example object that failed - got: the actual output """ def __init__(self, test, example, got): self.test = test self.example = example self.got = got def __str__(self): return str(self.test) class UnexpectedException(Exception): """A DocTest example has encountered an unexpected exception The exception instance has variables: - test: the DocTest object being run - example: the Example object that failed - exc_info: the exception info """ def __init__(self, test, example, exc_info): self.test = test self.example = example self.exc_info = exc_info def __str__(self): return str(self.test) class DebugRunner(DocTestRunner): r"""Run doc tests but raise an exception as soon as there is a failure. If an unexpected exception occurs, an UnexpectedException is raised. It contains the test, the example, and the original exception: >>> runner = DebugRunner(verbose=False) >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', ... {}, 'foo', 'foo.py', 0) >>> try: ... runner.run(test) ... except UnexpectedException as f: ... failure = f >>> failure.test is test True >>> failure.example.want '42\n' >>> exc_info = failure.exc_info >>> raise exc_info[1] # Already has the traceback Traceback (most recent call last): ... KeyError We wrap the original exception to give the calling application access to the test and example information. If the output doesn't match, then a DocTestFailure is raised: >>> test = DocTestParser().get_doctest(''' ... >>> x = 1 ... >>> x ... 2 ... ''', {}, 'foo', 'foo.py', 0) >>> try: ... runner.run(test) ... except DocTestFailure as f: ... failure = f DocTestFailure objects provide access to the test: >>> failure.test is test True As well as to the example: >>> failure.example.want '2\n' and the actual output: >>> failure.got '1\n' If a failure or error occurs, the globals are left intact: >>> del test.globs['__builtins__'] >>> test.globs {'x': 1} >>> test = DocTestParser().get_doctest(''' ... >>> x = 2 ... >>> raise KeyError ... ''', {}, 'foo', 'foo.py', 0) >>> runner.run(test) Traceback (most recent call last): ... doctest.UnexpectedException: <DocTest foo from foo.py:0 (2 examples)> >>> del test.globs['__builtins__'] >>> test.globs {'x': 2} But the globals are cleared if there is no error: >>> test = DocTestParser().get_doctest(''' ... >>> x = 2 ... ''', {}, 'foo', 'foo.py', 0) >>> runner.run(test) TestResults(failed=0, attempted=1) >>> test.globs {} """ def run(self, test, compileflags=None, out=None, clear_globs=True): r = DocTestRunner.run(self, test, compileflags, out, False) if clear_globs: test.globs.clear() return r def report_unexpected_exception(self, out, test, example, exc_info): raise UnexpectedException(test, example, exc_info) def report_failure(self, out, test, example, got): raise DocTestFailure(test, example, got) ###################################################################### ## 6. Test Functions ###################################################################### # These should be backwards compatible. # For backward compatibility, a global instance of a DocTestRunner # class, updated by testmod. master = None def testmod(m=None, name=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, exclude_empty=False): """m=None, name=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, exclude_empty=False Test examples in docstrings in functions and classes reachable from module m (or the current module if m is not supplied), starting with m.__doc__. Also test examples reachable from dict m.__test__ if it exists and is not None. m.__test__ maps names to functions, classes and strings; function and class docstrings are tested even if the name is private; strings are tested directly, as if they were docstrings. Return (#failures, #tests). See help(doctest) for an overview. Optional keyword arg "name" gives the name of the module; by default use m.__name__. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use m.__dict__. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "extraglobs" gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. This is new in 2.4. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. This is new in 2.3. Possible values (see the docs for details): DONT_ACCEPT_TRUE_FOR_1 DONT_ACCEPT_BLANKLINE NORMALIZE_WHITESPACE ELLIPSIS SKIP IGNORE_EXCEPTION_DETAIL REPORT_UDIFF REPORT_CDIFF REPORT_NDIFF REPORT_ONLY_FIRST_FAILURE Optional keyword arg "raise_on_error" raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master # If no module was given, then use __main__. if m is None: # DWA - m will still be None if this wasn't invoked from the command # line, in which case the following TypeError is about as good an error # as we should expect m = sys.modules.get('__main__') # Check that we were actually given a module. if not inspect.ismodule(m): raise TypeError("testmod: module required; %r" % (m,)) # If no name was given, then use the module's name. if name is None: name = m.__name__ # Find, parse, and run all tests in the given module. finder = DocTestFinder(exclude_empty=exclude_empty) if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return TestResults(runner.failures, runner.tries) def testfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=DocTestParser(), encoding=None): """ Test examples in the given file. Return (#failures, #tests). Optional keyword arg "module_relative" specifies how filenames should be interpreted: - If "module_relative" is True (the default), then "filename" specifies a module-relative path. By default, this path is relative to the calling module's directory; but if the "package" argument is specified, then it is relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and should not be an absolute path (i.e., it may not begin with "/"). - If "module_relative" is False, then "filename" specifies an os-specific path. The path may be absolute or relative (to the current working directory). Optional keyword arg "name" gives the name of the test; by default use the file's basename. Optional keyword argument "package" is a Python package or the name of a Python package whose directory should be used as the base directory for a module relative filename. If no package is specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use {}. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "extraglobs" gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. Possible values (see the docs for details): DONT_ACCEPT_TRUE_FOR_1 DONT_ACCEPT_BLANKLINE NORMALIZE_WHITESPACE ELLIPSIS SKIP IGNORE_EXCEPTION_DETAIL REPORT_UDIFF REPORT_CDIFF REPORT_NDIFF REPORT_ONLY_FIRST_FAILURE Optional keyword arg "raise_on_error" raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Optional keyword arg "parser" specifies a DocTestParser (or subclass) that should be used to extract tests from the files. Optional keyword arg "encoding" specifies an encoding that should be used to convert the file to unicode. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path text, filename = _load_testfile(filename, package, module_relative, encoding or "utf-8") # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: globs['__name__'] = '__main__' if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) # Read the file, convert it to a test, and run it. test = parser.get_doctest(text, globs, name, filename, 0) runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return TestResults(runner.failures, runner.tries) def run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): """ Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information. """ # Find, parse, and run all tests in the given module. finder = DocTestFinder(verbose=verbose, recurse=False) runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(f, name, globs=globs): runner.run(test, compileflags=compileflags) ###################################################################### ## 7. Unittest Support ###################################################################### _unittest_reportflags = 0 def set_unittest_reportflags(flags): """Sets the unittest option flags. The old flag is returned so that a runner could restore the old value if it wished to: >>> import doctest >>> old = doctest._unittest_reportflags >>> doctest.set_unittest_reportflags(REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) == old True >>> doctest._unittest_reportflags == (REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) True Only reporting flags can be set: >>> doctest.set_unittest_reportflags(ELLIPSIS) Traceback (most recent call last): ... ValueError: ('Only reporting flags allowed', 8) >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) True """ global _unittest_reportflags if (flags & REPORTING_FLAGS) != flags: raise ValueError("Only reporting flags allowed", flags) old = _unittest_reportflags _unittest_reportflags = flags return old class DocTestCase(unittest.TestCase): def __init__(self, test, optionflags=0, setUp=None, tearDown=None, checker=None): unittest.TestCase.__init__(self) self._dt_optionflags = optionflags self._dt_checker = checker self._dt_test = test self._dt_setUp = setUp self._dt_tearDown = tearDown def setUp(self): test = self._dt_test if self._dt_setUp is not None: self._dt_setUp(test) def tearDown(self): test = self._dt_test if self._dt_tearDown is not None: self._dt_tearDown(test) test.globs.clear() def runTest(self): test = self._dt_test old = sys.stdout new = StringIO() optionflags = self._dt_optionflags if not (optionflags & REPORTING_FLAGS): # The option flags don't include any reporting flags, # so add the default reporting flags optionflags |= _unittest_reportflags runner = DocTestRunner(optionflags=optionflags, checker=self._dt_checker, verbose=False) try: runner.DIVIDER = "-"*70 failures, tries = runner.run( test, out=new.write, clear_globs=False) finally: sys.stdout = old if failures: raise self.failureException(self.format_failure(new.getvalue())) def format_failure(self, err): test = self._dt_test if test.lineno is None: lineno = 'unknown line number' else: lineno = '%s' % test.lineno lname = '.'.join(test.name.split('.')[-1:]) return ('Failed doctest test for %s\n' ' File "%s", line %s, in %s\n\n%s' % (test.name, test.filename, lineno, lname, err) ) def debug(self): r"""Run the test case without results and without catching exceptions The unit test framework includes a debug method on test cases and test suites to support post-mortem debugging. The test code is run in such a way that errors are not caught. This way a caller can catch the errors and initiate post-mortem debugging. The DocTestCase provides a debug method that raises UnexpectedException errors if there is an unexpected exception: >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', ... {}, 'foo', 'foo.py', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except UnexpectedException as f: ... failure = f The UnexpectedException contains the test, the example, and the original exception: >>> failure.test is test True >>> failure.example.want '42\n' >>> exc_info = failure.exc_info >>> raise exc_info[1] # Already has the traceback Traceback (most recent call last): ... KeyError If the output doesn't match, then a DocTestFailure is raised: >>> test = DocTestParser().get_doctest(''' ... >>> x = 1 ... >>> x ... 2 ... ''', {}, 'foo', 'foo.py', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except DocTestFailure as f: ... failure = f DocTestFailure objects provide access to the test: >>> failure.test is test True As well as to the example: >>> failure.example.want '2\n' and the actual output: >>> failure.got '1\n' """ self.setUp() runner = DebugRunner(optionflags=self._dt_optionflags, checker=self._dt_checker, verbose=False) runner.run(self._dt_test, clear_globs=False) self.tearDown() def id(self): return self._dt_test.name def __repr__(self): name = self._dt_test.name.split('.') return "%s (%s)" % (name[-1], '.'.join(name[:-1])) __str__ = __repr__ def shortDescription(self): return "Doctest: " + self._dt_test.name class SkipDocTestCase(DocTestCase): def __init__(self): DocTestCase.__init__(self, None) def setUp(self): self.skipTest("DocTestSuite will not work with -O2 and above") def test_skip(self): pass def shortDescription(self): return "Skipping tests from %s" % module.__name__ def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, **options): """ Convert doctest tests for a module to a unittest test suite. This converts each documentation string in a module that contains doctest tests to a unittest test case. If any of the tests in a doc string fail, then the test case fails. An exception is raised showing the name of the file containing the test and a (sometimes approximate) line number. The `module` argument provides the module to be tested. The argument can be either a module or a module name. If no argument is given, the calling module is used. A number of options may be provided as keyword arguments: setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. """ if test_finder is None: test_finder = DocTestFinder() module = _normalize_module(module) tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) if not tests and sys.flags.optimize >=2: # Skip doctests when running with -O2 suite = unittest.TestSuite() suite.addTest(SkipDocTestCase()) return suite elif not tests: # Why do we want to do this? Because it reveals a bug that might # otherwise be hidden. raise ValueError(module, "has no tests") tests.sort() suite = unittest.TestSuite() for test in tests: if len(test.examples) == 0: continue if not test.filename: filename = module.__file__ if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] test.filename = filename suite.addTest(DocTestCase(test, **options)) return suite class DocFileCase(DocTestCase): def id(self): return '_'.join(self._dt_test.name.split('.')) def __repr__(self): return self._dt_test.filename __str__ = __repr__ def format_failure(self, err): return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' % (self._dt_test.name, self._dt_test.filename, err) ) def DocFileTest(path, module_relative=True, package=None, globs=None, parser=DocTestParser(), encoding=None, **options): if globs is None: globs = {} else: globs = globs.copy() if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path. doc, path = _load_testfile(path, package, module_relative, encoding or "utf-8") if "__file__" not in globs: globs["__file__"] = path # Find the file and read it. name = os.path.basename(path) # Convert it to a test, and wrap it in a DocFileCase. test = parser.get_doctest(doc, globs, name, path, 0) return DocFileCase(test, **options) def DocFileSuite(*paths, **kw): """A unittest suite for one or more doctest files. The path to each doctest file is given as a string; the interpretation of that string depends on the keyword argument "module_relative". A number of options may be provided as keyword arguments: module_relative If "module_relative" is True, then the given file paths are interpreted as os-independent module-relative paths. By default, these paths are relative to the calling module's directory; but if the "package" argument is specified, then they are relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and may not be an absolute path (i.e., it may not begin with "/"). If "module_relative" is False, then the given file paths are interpreted as os-specific paths. These paths may be absolute or relative (to the current working directory). package A Python package or the name of a Python package whose directory should be used as the base directory for module relative paths. If "package" is not specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. parser A DocTestParser (or subclass) that should be used to extract tests from the files. encoding An encoding that will be used to convert the files to unicode. """ suite = unittest.TestSuite() # We do this here so that _normalize_module is called at the right # level. If it were called in DocFileTest, then this function # would be the caller and we might guess the package incorrectly. if kw.get('module_relative', True): kw['package'] = _normalize_module(kw.get('package')) for path in paths: suite.addTest(DocFileTest(path, **kw)) return suite ###################################################################### ## 8. Debugging Support ###################################################################### def script_from_examples(s): r"""Extract script from text with examples. Converts text with examples to a Python script. Example input is converted to regular code. Example output and all other words are converted to comments: >>> text = ''' ... Here are examples of simple math. ... ... Python has super accurate integer addition ... ... >>> 2 + 2 ... 5 ... ... And very friendly error messages: ... ... >>> 1/0 ... To Infinity ... And ... Beyond ... ... You can use logic if you want: ... ... >>> if 0: ... ... blah ... ... blah ... ... ... ... Ho hum ... ''' >>> print(script_from_examples(text)) # Here are examples of simple math. # # Python has super accurate integer addition # 2 + 2 # Expected: ## 5 # # And very friendly error messages: # 1/0 # Expected: ## To Infinity ## And ## Beyond # # You can use logic if you want: # if 0: blah blah # # Ho hum <BLANKLINE> """ output = [] for piece in DocTestParser().parse(s): if isinstance(piece, Example): # Add the example's source code (strip trailing NL) output.append(piece.source[:-1]) # Add the expected output: want = piece.want if want: output.append('# Expected:') output += ['## '+l for l in want.split('\n')[:-1]] else: # Add non-example text. output += [_comment_line(l) for l in piece.split('\n')[:-1]] # Trim junk on both ends. while output and output[-1] == '#': output.pop() while output and output[0] == '#': output.pop(0) # Combine the output, and return it. # Add a courtesy newline to prevent exec from choking (see bug #1172785) return '\n'.join(output) + '\n' def testsource(module, name): """Extract the test sources from a doctest docstring as a script. Provide the module (or dotted name of the module) containing the test to be debugged and the name (within the module) of the object with the doc string with tests to be debugged. """ module = _normalize_module(module) tests = DocTestFinder().find(module) test = [t for t in tests if t.name == name] if not test: raise ValueError(name, "not found in tests") test = test[0] testsrc = script_from_examples(test.docstring) return testsrc def debug_src(src, pm=False, globs=None): """Debug a single doctest docstring, in argument `src`'""" testsrc = script_from_examples(src) debug_script(testsrc, pm, globs) def debug_script(src, pm=False, globs=None): "Debug a test script. `src` is the script, as a string." import pdb if globs: globs = globs.copy() else: globs = {} if pm: try: exec(src, globs, globs) except: print(sys.exc_info()[1]) p = pdb.Pdb(nosigint=True) p.reset() p.interaction(None, sys.exc_info()[2]) else: pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs) def debug(module, name, pm=False): """Debug a single doctest docstring. Provide the module (or dotted name of the module) containing the test to be debugged and the name (within the module) of the object with the docstring with tests to be debugged. """ module = _normalize_module(module) testsrc = testsource(module, name) debug_script(testsrc, pm, module.__dict__) ###################################################################### ## 9. Example Usage ###################################################################### class _TestClass: """ A pointless class, for sanity-checking of docstring testing. Methods: square() get() >>> _TestClass(13).get() + _TestClass(-12).get() 1 >>> hex(_TestClass(13).square().get()) '0xa9' """ def __init__(self, val): """val -> _TestClass object with associated value val. >>> t = _TestClass(123) >>> print(t.get()) 123 """ self.val = val def square(self): """square() -> square TestClass's associated value >>> _TestClass(13).square().get() 169 """ self.val = self.val ** 2 return self def get(self): """get() -> return TestClass's associated value. >>> x = _TestClass(-42) >>> print(x.get()) -42 """ return self.val __test__ = {"_TestClass": _TestClass, "string": r""" Example of a string object, searched as-is. >>> x = 1; y = 2 >>> x + y, x * y (3, 2) """, "bool-int equivalence": r""" In 2.2, boolean expressions displayed 0 or 1. By default, we still accept them. This can be disabled by passing DONT_ACCEPT_TRUE_FOR_1 to the new optionflags argument. >>> 4 == 4 1 >>> 4 == 4 True >>> 4 > 4 0 >>> 4 > 4 False """, "blank lines": r""" Blank lines can be marked with <BLANKLINE>: >>> print('foo\n\nbar\n') foo <BLANKLINE> bar <BLANKLINE> """, "ellipsis": r""" If the ellipsis flag is used, then '...' can be used to elide substrings in the desired output: >>> print(list(range(1000))) #doctest: +ELLIPSIS [0, 1, 2, ..., 999] """, "whitespace normalization": r""" If the whitespace normalization flag is used, then differences in whitespace are ignored. >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] """, } def _test(): testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-'] if not testfiles: name = os.path.basename(sys.argv[0]) if '__loader__' in globals(): # python -m name, _ = os.path.splitext(name) print("usage: {0} [-v] file ...".format(name)) return 2 for filename in testfiles: if filename.endswith(".py"): # It is a module -- insert its dir into sys.path and try to # import it. If it is part of a package, that possibly # won't work because of package imports. dirname, filename = os.path.split(filename) sys.path.insert(0, dirname) m = __import__(filename[:-3]) del sys.path[0] failures, _ = testmod(m) else: failures, _ = testfile(filename, module_relative=False) if failures: return 1 return 0 if __name__ == "__main__": sys.exit(_test())
apache-2.0
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/boto/dynamodb/schema.py
185
3978
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # class Schema(object): """ Represents a DynamoDB schema. :ivar hash_key_name: The name of the hash key of the schema. :ivar hash_key_type: The DynamoDB type specification for the hash key of the schema. :ivar range_key_name: The name of the range key of the schema or None if no range key is defined. :ivar range_key_type: The DynamoDB type specification for the range key of the schema or None if no range key is defined. :ivar dict: The underlying Python dictionary that needs to be passed to Layer1 methods. """ def __init__(self, schema_dict): self._dict = schema_dict def __repr__(self): if self.range_key_name: s = 'Schema(%s:%s)' % (self.hash_key_name, self.range_key_name) else: s = 'Schema(%s)' % self.hash_key_name return s @classmethod def create(cls, hash_key, range_key=None): """Convenience method to create a schema object. Example usage:: schema = Schema.create(hash_key=('foo', 'N')) schema2 = Schema.create(hash_key=('foo', 'N'), range_key=('bar', 'S')) :type hash_key: tuple :param hash_key: A tuple of (hash_key_name, hash_key_type) :type range_key: tuple :param hash_key: A tuple of (range_key_name, range_key_type) """ reconstructed = { 'HashKeyElement': { 'AttributeName': hash_key[0], 'AttributeType': hash_key[1], } } if range_key is not None: reconstructed['RangeKeyElement'] = { 'AttributeName': range_key[0], 'AttributeType': range_key[1], } instance = cls(None) instance._dict = reconstructed return instance @property def dict(self): return self._dict @property def hash_key_name(self): return self._dict['HashKeyElement']['AttributeName'] @property def hash_key_type(self): return self._dict['HashKeyElement']['AttributeType'] @property def range_key_name(self): name = None if 'RangeKeyElement' in self._dict: name = self._dict['RangeKeyElement']['AttributeName'] return name @property def range_key_type(self): type = None if 'RangeKeyElement' in self._dict: type = self._dict['RangeKeyElement']['AttributeType'] return type def __eq__(self, other): return (self.hash_key_name == other.hash_key_name and self.hash_key_type == other.hash_key_type and self.range_key_name == other.range_key_name and self.range_key_type == other.range_key_type)
agpl-3.0
yewang15215/django
tests/template_tests/syntax_tests/test_invalid_string.py
440
2310
from django.test import SimpleTestCase from ..utils import setup class InvalidStringTests(SimpleTestCase): libraries = {'i18n': 'django.templatetags.i18n'} @setup({'invalidstr01': '{{ var|default:"Foo" }}'}) def test_invalidstr01(self): output = self.engine.render_to_string('invalidstr01') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, 'Foo') @setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'}) def test_invalidstr02(self): output = self.engine.render_to_string('invalidstr02') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'}) def test_invalidstr03(self): output = self.engine.render_to_string('invalidstr03') self.assertEqual(output, '') @setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'}) def test_invalidstr04(self): output = self.engine.render_to_string('invalidstr04') self.assertEqual(output, 'No') @setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'}) def test_invalidstr04_2(self): output = self.engine.render_to_string('invalidstr04_2') self.assertEqual(output, 'Yes') @setup({'invalidstr05': '{{ var }}'}) def test_invalidstr05(self): output = self.engine.render_to_string('invalidstr05') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'invalidstr06': '{{ var.prop }}'}) def test_invalidstr06(self): output = self.engine.render_to_string('invalidstr06') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'}) def test_invalidstr07(self): output = self.engine.render_to_string('invalidstr07') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '')
bsd-3-clause
elitak/catalyst
catalyst/base/stagebase.py
1
61101
import os import imp import shutil import sys from stat import ST_UID, ST_GID, ST_MODE # for convienience pjoin = os.path.join from snakeoil import fileutils from DeComp.compress import CompressMap from catalyst import log from catalyst.defaults import (SOURCE_MOUNT_DEFAULTS, TARGET_MOUNT_DEFAULTS, PORT_LOGDIR_CLEAN) from catalyst.support import (CatalystError, file_locate, normpath, cmd, list_bashify, read_makeconf, ismount, file_check) from catalyst.base.targetbase import TargetBase from catalyst.base.clearbase import ClearBase from catalyst.base.genbase import GenBase from catalyst.lock import LockDir, LockInUse from catalyst.fileops import ensure_dirs, pjoin from catalyst.base.resume import AutoResume if sys.version_info[0] >= 3: py_input = input else: py_input = raw_input # pylint: disable=undefined-variable class StageBase(TargetBase, ClearBase, GenBase): """ This class does all of the chroot setup, copying of files, etc. It is the driver class for pretty much everything that Catalyst does. """ def __init__(self,myspec,addlargs): self.required_values.extend(["version_stamp","target","subarch",\ "rel_type","profile","snapshot","source_subpath"]) self.valid_values.extend(["version_stamp","target","subarch", "rel_type","profile","snapshot","source_subpath","portage_confdir", "cflags","cxxflags","fcflags","fflags","ldflags","asflags","cbuild","hostuse","portage_overlay", "distcc_hosts","makeopts","pkgcache_path","kerncache_path", "compression_mode", "decompression_mode"]) self.set_valid_build_kernel_vars(addlargs) TargetBase.__init__(self, myspec, addlargs) GenBase.__init__(self, myspec) ClearBase.__init__(self, myspec) # The semantics of subarchmap and machinemap changed a bit in 2.0.3 to # work better with vapier's CBUILD stuff. I've removed the "monolithic" # machinemap from this file and split up its contents amongst the # various arch/foo.py files. # # When register() is called on each module in the arch/ dir, it now # returns a tuple instead of acting on the subarchmap dict that is # passed to it. The tuple contains the values that were previously # added to subarchmap as well as a new list of CHOSTs that go along # with that arch. This allows us to build machinemap on the fly based # on the keys in subarchmap and the values of the 2nd list returned # (tmpmachinemap). # # Also, after talking with vapier. I have a slightly better idea of what # certain variables are used for and what they should be set to. Neither # 'buildarch' or 'hostarch' are used directly, so their value doesn't # really matter. They are just compared to determine if we are # cross-compiling. Because of this, they are just set to the name of the # module in arch/ that the subarch is part of to make things simpler. # The entire build process is still based off of 'subarch' like it was # previously. -agaffney self.makeconf = {} self.archmap = {} self.subarchmap = {} machinemap = {} arch_dir = self.settings["archdir"] + "/" for x in [x[:-3] for x in os.listdir(arch_dir) if x.endswith(".py") and x != "__init__.py"]: try: fh=open(arch_dir + x + ".py") # This next line loads the plugin as a module and assigns it to # archmap[x] self.archmap[x]=imp.load_module(x,fh, arch_dir + x + ".py", (".py", "r", imp.PY_SOURCE)) # This next line registers all the subarches supported in the # plugin tmpsubarchmap, tmpmachinemap = self.archmap[x].register() self.subarchmap.update(tmpsubarchmap) for machine in tmpmachinemap: machinemap[machine] = x for subarch in tmpsubarchmap: machinemap[subarch] = x fh.close() except IOError: # This message should probably change a bit, since everything in # the dir should load just fine. If it doesn't, it's probably a # syntax error in the module log.warning("Can't find/load %s.py plugin in %s", x, arch_dir) if "chost" in self.settings: hostmachine = self.settings["chost"].split("-")[0] if hostmachine not in machinemap: raise CatalystError("Unknown host machine type "+hostmachine) self.settings["hostarch"]=machinemap[hostmachine] else: hostmachine = self.settings["subarch"] if hostmachine in machinemap: hostmachine = machinemap[hostmachine] self.settings["hostarch"]=hostmachine if "cbuild" in self.settings: buildmachine = self.settings["cbuild"].split("-")[0] else: buildmachine = os.uname()[4] if buildmachine not in machinemap: raise CatalystError("Unknown build machine type "+buildmachine) self.settings["buildarch"]=machinemap[buildmachine] self.settings["crosscompile"]=(self.settings["hostarch"]!=\ self.settings["buildarch"]) # Call arch constructor, pass our settings try: self.arch=self.subarchmap[self.settings["subarch"]](self.settings) except KeyError: log.critical( 'Invalid subarch: %s\n' 'Choose one of the following:\n' ' %s', self.settings['subarch'], ' '.join(self.subarchmap)) log.notice('Using target: %s', self.settings['target']) # Print a nice informational message if self.settings["buildarch"]==self.settings["hostarch"]: log.info('Building natively for %s', self.settings['hostarch']) elif self.settings["crosscompile"]: log.info('Cross-compiling on %s for different machine type %s', self.settings['buildarch'], self.settings['hostarch']) else: log.info('Building on %s for alternate personality type %s', self.settings['buildarch'], self.settings['hostarch']) # This must be set first as other set_ options depend on this self.set_spec_prefix() # Initialize our (de)compressor's) self.decompressor = CompressMap(self.settings["decompress_definitions"], env=self.env, search_order=self.settings["decompressor_search_order"]) self.accepted_extensions = self.decompressor.search_order_extensions( self.settings["decompressor_search_order"]) log.notice("Source file specification matching setting is: %s", self.settings["source_matching"]) log.notice("Accepted source file extensions search order: %s", self.accepted_extensions) # save resources, it is not always needed self.compressor = None # Define all of our core variables self.set_target_profile() self.set_target_subpath() self.set_source_subpath() # Set paths self.set_snapshot_path() self.set_root_path() self.set_source_path() self.set_snapcache_path() self.set_chroot_path() self.set_autoresume_path() self.set_dest_path() self.set_stage_path() self.set_target_path() self.set_controller_file() self.set_default_action_sequence() self.set_use() self.set_cleanables() self.set_iso_volume_id() self.set_build_kernel_vars() self.set_fsscript() self.set_install_mask() self.set_rcadd() self.set_rcdel() self.set_cdtar() self.set_fstype() self.set_fsops() self.set_iso() self.set_packages() self.set_rm() self.set_linuxrc() self.set_busybox_config() self.set_overlay() self.set_portage_overlay() self.set_root_overlay() # This next line checks to make sure that the specified variables exist # on disk. #pdb.set_trace() file_locate(self.settings,["distdir"],\ expand=0) # If we are using portage_confdir, check that as well. if "portage_confdir" in self.settings: file_locate(self.settings,["portage_confdir"],expand=0) # Setup our mount points. # initialize our target mounts. self.target_mounts = TARGET_MOUNT_DEFAULTS.copy() self.mounts = ["proc", "dev", "portdir", "distdir", "port_tmpdir"] # initialize our source mounts self.mountmap = SOURCE_MOUNT_DEFAULTS.copy() # update them from settings self.mountmap["distdir"] = self.settings["distdir"] if "snapcache" not in self.settings["options"]: self.mounts.remove("portdir") self.mountmap["portdir"] = None else: self.mountmap["portdir"] = normpath("/".join([ self.settings["snapshot_cache_path"], self.settings["repo_name"], ])) if os.uname()[0] == "Linux": self.mounts.append("devpts") self.mounts.append("shm") self.set_mounts() # Configure any user specified options (either in catalyst.conf or on # the command line). if "pkgcache" in self.settings["options"]: self.set_pkgcache_path() log.info('Location of the package cache is %s', self.settings['pkgcache_path']) self.mounts.append("packagedir") self.mountmap["packagedir"] = self.settings["pkgcache_path"] if "kerncache" in self.settings["options"]: self.set_kerncache_path() log.info('Location of the kerncache is %s', self.settings['kerncache_path']) self.mounts.append("kerncache") self.mountmap["kerncache"] = self.settings["kerncache_path"] if "ccache" in self.settings["options"]: if "CCACHE_DIR" in os.environ: ccdir=os.environ["CCACHE_DIR"] del os.environ["CCACHE_DIR"] else: ccdir="/root/.ccache" if not os.path.isdir(ccdir): raise CatalystError( "Compiler cache support can't be enabled (can't find "+\ ccdir+")") self.mounts.append("ccache") self.mountmap["ccache"] = ccdir # for the chroot: self.env["CCACHE_DIR"] = self.target_mounts["ccache"] if "icecream" in self.settings["options"]: self.mounts.append("icecream") self.mountmap["icecream"] = self.settings["icecream"] self.env["PATH"] = self.target_mounts["icecream"] + ":" + \ self.env["PATH"] if "port_logdir" in self.settings: self.mounts.append("port_logdir") self.mountmap["port_logdir"] = self.settings["port_logdir"] self.env["PORT_LOGDIR"] = self.settings["port_logdir"] self.env["PORT_LOGDIR_CLEAN"] = PORT_LOGDIR_CLEAN def override_cbuild(self): if "CBUILD" in self.makeconf: self.settings["CBUILD"]=self.makeconf["CBUILD"] def override_chost(self): if "CHOST" in self.makeconf: self.settings["CHOST"]=self.makeconf["CHOST"] def override_cflags(self): if "CFLAGS" in self.makeconf: self.settings["CFLAGS"]=self.makeconf["CFLAGS"] def override_cxxflags(self): if "CXXFLAGS" in self.makeconf: self.settings["CXXFLAGS"]=self.makeconf["CXXFLAGS"] def override_fcflags(self): if "FCFLAGS" in self.makeconf: self.settings["FCFLAGS"]=self.makeconf["FCFLAGS"] def override_fflags(self): if "FFLAGS" in self.makeconf: self.settings["FFLAGS"]=self.makeconf["FFLAGS"] def override_ldflags(self): if "LDFLAGS" in self.makeconf: self.settings["LDFLAGS"]=self.makeconf["LDFLAGS"] def override_asflags(self): if "ASFLAGS" in self.makeconf: self.settings["ASFLAGS"]=self.makeconf["ASFLAGS"] def set_install_mask(self): if "install_mask" in self.settings: if not isinstance(self.settings['install_mask'], str): self.settings["install_mask"]=\ ' '.join(self.settings["install_mask"]) def set_spec_prefix(self): self.settings["spec_prefix"]=self.settings["target"] def set_target_profile(self): self.settings["target_profile"]=self.settings["profile"] def set_target_subpath(self): self.settings["target_subpath"]=self.settings["rel_type"]+"/"+\ self.settings["target"]+"-"+self.settings["subarch"]+"-"+\ self.settings["version_stamp"] +'/' def set_source_subpath(self): if not isinstance(self.settings['source_subpath'], str): raise CatalystError( "source_subpath should have been a string. Perhaps you have " +\ "something wrong in your spec file?") def set_pkgcache_path(self): if "pkgcache_path" in self.settings: if not isinstance(self.settings['pkgcache_path'], str): self.settings["pkgcache_path"]=\ normpath(self.settings["pkgcache_path"]) else: self.settings["pkgcache_path"]=\ normpath(self.settings["storedir"]+"/packages/"+\ self.settings["target_subpath"]+"/") def set_kerncache_path(self): if "kerncache_path" in self.settings: if not isinstance(self.settings['kerncache_path'], str): self.settings["kerncache_path"]=\ normpath(self.settings["kerncache_path"]) else: self.settings["kerncache_path"]=normpath(self.settings["storedir"]+\ "/kerncache/"+self.settings["target_subpath"]) def set_target_path(self): self.settings["target_path"]=normpath(self.settings["storedir"]+\ "/builds/"+self.settings["target_subpath"]) if "autoresume" in self.settings["options"]\ and self.resume.is_enabled("setup_target_path"): log.notice('Resume point detected, skipping target path setup operation...') else: self.resume.enable("setup_target_path") ensure_dirs(self.settings["storedir"] + "/builds") def set_fsscript(self): if self.settings["spec_prefix"]+"/fsscript" in self.settings: self.settings["fsscript"]=\ self.settings[self.settings["spec_prefix"]+"/fsscript"] del self.settings[self.settings["spec_prefix"]+"/fsscript"] def set_rcadd(self): if self.settings["spec_prefix"]+"/rcadd" in self.settings: self.settings["rcadd"]=\ self.settings[self.settings["spec_prefix"]+"/rcadd"] del self.settings[self.settings["spec_prefix"]+"/rcadd"] def set_rcdel(self): if self.settings["spec_prefix"]+"/rcdel" in self.settings: self.settings["rcdel"]=\ self.settings[self.settings["spec_prefix"]+"/rcdel"] del self.settings[self.settings["spec_prefix"]+"/rcdel"] def set_cdtar(self): if self.settings["spec_prefix"]+"/cdtar" in self.settings: self.settings["cdtar"]=\ normpath(self.settings[self.settings["spec_prefix"]+"/cdtar"]) del self.settings[self.settings["spec_prefix"]+"/cdtar"] def set_iso(self): if self.settings["spec_prefix"]+"/iso" in self.settings: if self.settings[self.settings["spec_prefix"]+"/iso"].startswith('/'): self.settings["iso"]=\ normpath(self.settings[self.settings["spec_prefix"]+"/iso"]) else: # This automatically prepends the build dir to the ISO output path # if it doesn't start with a / self.settings["iso"] = normpath(self.settings["storedir"] + \ "/builds/" + self.settings["rel_type"] + "/" + \ self.settings[self.settings["spec_prefix"]+"/iso"]) del self.settings[self.settings["spec_prefix"]+"/iso"] def set_fstype(self): if self.settings["spec_prefix"]+"/fstype" in self.settings: self.settings["fstype"]=\ self.settings[self.settings["spec_prefix"]+"/fstype"] del self.settings[self.settings["spec_prefix"]+"/fstype"] if "fstype" not in self.settings: self.settings["fstype"]="normal" for x in self.valid_values: if x == self.settings["spec_prefix"]+"/fstype": log.info('%s/fstype is being set to the default of "normal"', self.settings['spec_prefix']) def set_fsops(self): if "fstype" in self.settings: self.valid_values.append("fsops") if self.settings["spec_prefix"]+"/fsops" in self.settings: self.settings["fsops"]=\ self.settings[self.settings["spec_prefix"]+"/fsops"] del self.settings[self.settings["spec_prefix"]+"/fsops"] def set_source_path(self): if "seedcache" in self.settings["options"]\ and os.path.isdir(normpath(self.settings["storedir"]+"/tmp/"+\ self.settings["source_subpath"]+"/")): self.settings["source_path"] = normpath(self.settings["storedir"] + "/tmp/" + self.settings["source_subpath"] + "/") else: log.debug('Checking source path existence and ' 'get the final filepath. subpath: %s', self.settings["source_subpath"]) self.settings["source_path"] = file_check( normpath(self.settings["storedir"] + "/builds/" + self.settings["source_subpath"]), self.accepted_extensions, self.settings["source_matching"] in ["strict"] ) log.debug('Source path returned from file_check is: %s', self.settings["source_path"]) if os.path.isfile(self.settings["source_path"]): # XXX: Is this even necessary if the previous check passes? if os.path.exists(self.settings["source_path"]): self.settings["source_path_hash"] = \ self.settings["hash_map"].generate_hash( self.settings["source_path"], hash_=self.settings["hash_function"]) log.notice('Source path set to %s', self.settings['source_path']) def set_dest_path(self): if "root_path" in self.settings: self.settings["destpath"]=normpath(self.settings["chroot_path"]+\ self.settings["root_path"]) else: self.settings["destpath"]=normpath(self.settings["chroot_path"]) def set_cleanables(self): self.settings["cleanables"]=["/etc/resolv.conf","/var/tmp/*","/tmp/*",\ "/root/*", self.settings["portdir"]] def set_snapshot_path(self): self.settings["snapshot_path"]= file_check( normpath(self.settings["storedir"]+\ "/snapshots/" + self.settings["snapshot_name"] + self.settings["snapshot"]), self.accepted_extensions, self.settings["source_matching"] is "strict" ) log.info('SNAPSHOT_PATH set to: %s', self.settings['snapshot_path']) self.settings["snapshot_path_hash"] = \ self.settings["hash_map"].generate_hash( self.settings["snapshot_path"], hash_=self.settings["hash_function"]) def set_snapcache_path(self): self.settings["snapshot_cache_path"]=\ normpath(pjoin(self.settings["snapshot_cache"], self.settings["snapshot"])) if "snapcache" in self.settings["options"]: self.settings["snapshot_cache_path"] = \ normpath(pjoin(self.settings["snapshot_cache"], self.settings["snapshot"])) self.snapcache_lock=\ LockDir(self.settings["snapshot_cache_path"]) log.info('Setting snapshot cache to %s', self.settings['snapshot_cache_path']) def set_chroot_path(self): """ NOTE: the trailing slash has been removed Things *could* break if you don't use a proper join() """ self.settings["chroot_path"]=normpath(self.settings["storedir"]+\ "/tmp/"+self.settings["target_subpath"].rstrip('/')) self.chroot_lock=LockDir(self.settings["chroot_path"]) def set_autoresume_path(self): self.settings["autoresume_path"] = normpath(pjoin( self.settings["storedir"], "tmp", self.settings["rel_type"], ".autoresume-%s-%s-%s" %(self.settings["target"], self.settings["subarch"], self.settings["version_stamp"]) )) if "autoresume" in self.settings["options"]: log.info('The autoresume path is %s', self.settings['autoresume_path']) self.resume = AutoResume(self.settings["autoresume_path"], mode=0o755) def set_controller_file(self): self.settings["controller_file"]=normpath(self.settings["sharedir"]+\ "/targets/"+self.settings["target"]+"/"+self.settings["target"]+\ "-controller.sh") def set_iso_volume_id(self): if self.settings["spec_prefix"]+"/volid" in self.settings: self.settings["iso_volume_id"]=\ self.settings[self.settings["spec_prefix"]+"/volid"] if len(self.settings["iso_volume_id"])>32: raise CatalystError( "ISO volume ID must not exceed 32 characters.") else: self.settings["iso_volume_id"]="catalyst "+self.settings["snapshot"] def set_default_action_sequence(self): """ Default action sequence for run method. This method sets the optional purgeonly action sequence and returns. Or it calls the normal set_action_sequence() for the target stage. """ if "purgeonly" in self.settings["options"]: self.settings["action_sequence"] = ["remove_chroot"] return self.set_action_sequence() def set_action_sequence(self): """Set basic stage1, 2, 3 action sequences""" self.settings["action_sequence"]=["unpack","unpack_snapshot",\ "setup_confdir","portage_overlay",\ "base_dirs","bind","chroot_setup","setup_environment",\ "run_local","preclean","unbind","clean"] self.set_completion_action_sequences() def set_completion_action_sequences(self): if "fetch" not in self.settings["options"]: self.settings["action_sequence"].append("capture") if "keepwork" in self.settings["options"]: self.settings["action_sequence"].append("clear_autoresume") elif "seedcache" in self.settings["options"]: self.settings["action_sequence"].append("remove_autoresume") else: self.settings["action_sequence"].append("remove_autoresume") self.settings["action_sequence"].append("remove_chroot") return def set_use(self): if self.settings["spec_prefix"]+"/use" in self.settings: self.settings["use"]=\ self.settings[self.settings["spec_prefix"]+"/use"] del self.settings[self.settings["spec_prefix"]+"/use"] if "use" not in self.settings: self.settings["use"]="" if isinstance(self.settings['use'], str): self.settings["use"]=self.settings["use"].split() # Force bindist when options ask for it if "BINDIST" in self.settings: self.settings["use"].append("bindist") def set_stage_path(self): self.settings["stage_path"]=normpath(self.settings["chroot_path"]) def set_mounts(self): pass def set_packages(self): pass def set_rm(self): if self.settings["spec_prefix"]+"/rm" in self.settings: if isinstance(self.settings[self.settings['spec_prefix']+'/rm'], str): self.settings[self.settings["spec_prefix"]+"/rm"]=\ self.settings[self.settings["spec_prefix"]+"/rm"].split() def set_linuxrc(self): if self.settings["spec_prefix"]+"/linuxrc" in self.settings: if isinstance(self.settings[self.settings['spec_prefix']+'/linuxrc'], str): self.settings["linuxrc"]=\ self.settings[self.settings["spec_prefix"]+"/linuxrc"] del self.settings[self.settings["spec_prefix"]+"/linuxrc"] def set_busybox_config(self): if self.settings["spec_prefix"]+"/busybox_config" in self.settings: if isinstance(self.settings[self.settings['spec_prefix']+'/busybox_config'], str): self.settings["busybox_config"]=\ self.settings[self.settings["spec_prefix"]+"/busybox_config"] del self.settings[self.settings["spec_prefix"]+"/busybox_config"] def set_portage_overlay(self): if "portage_overlay" in self.settings: if isinstance(self.settings['portage_overlay'], str): self.settings["portage_overlay"]=\ self.settings["portage_overlay"].split() log.info('portage_overlay directories are set to: %s', ' '.join(self.settings['portage_overlay'])) def set_overlay(self): if self.settings["spec_prefix"]+"/overlay" in self.settings: if isinstance(self.settings[self.settings['spec_prefix']+'/overlay'], str): self.settings[self.settings["spec_prefix"]+"/overlay"]=\ self.settings[self.settings["spec_prefix"]+\ "/overlay"].split() def set_root_overlay(self): if self.settings["spec_prefix"]+"/root_overlay" in self.settings: if isinstance(self.settings[self.settings['spec_prefix']+'/root_overlay'], str): self.settings[self.settings["spec_prefix"]+"/root_overlay"]=\ self.settings[self.settings["spec_prefix"]+\ "/root_overlay"].split() def set_root_path(self): """ ROOT= variable for emerges """ self.settings["root_path"]="/" def set_valid_build_kernel_vars(self,addlargs): if "boot/kernel" in addlargs: if isinstance(addlargs['boot/kernel'], str): loopy=[addlargs["boot/kernel"]] else: loopy=addlargs["boot/kernel"] for x in loopy: self.valid_values.append("boot/kernel/"+x+"/aliases") self.valid_values.append("boot/kernel/"+x+"/config") self.valid_values.append("boot/kernel/"+x+"/console") self.valid_values.append("boot/kernel/"+x+"/extraversion") self.valid_values.append("boot/kernel/"+x+"/gk_action") self.valid_values.append("boot/kernel/"+x+"/gk_kernargs") self.valid_values.append("boot/kernel/"+x+"/initramfs_overlay") self.valid_values.append("boot/kernel/"+x+"/machine_type") self.valid_values.append("boot/kernel/"+x+"/sources") self.valid_values.append("boot/kernel/"+x+"/softlevel") self.valid_values.append("boot/kernel/"+x+"/use") self.valid_values.append("boot/kernel/"+x+"/packages") self.valid_values.append("boot/kernel/"+x+"/kernelopts") if "boot/kernel/"+x+"/packages" in addlargs: if isinstance(addlargs['boot/kernel/'+x+'/packages'], str): addlargs["boot/kernel/"+x+"/packages"]=\ [addlargs["boot/kernel/"+x+"/packages"]] def set_build_kernel_vars(self): if self.settings["spec_prefix"]+"/gk_mainargs" in self.settings: self.settings["gk_mainargs"]=\ self.settings[self.settings["spec_prefix"]+"/gk_mainargs"] del self.settings[self.settings["spec_prefix"]+"/gk_mainargs"] def kill_chroot_pids(self): log.info('Checking for processes running in chroot and killing them.') # Force environment variables to be exported so script can see them self.setup_environment() killcmd = normpath(self.settings["sharedir"] + self.settings["shdir"] + "/support/kill-chroot-pids.sh") if os.path.exists(killcmd): cmd(killcmd, "kill-chroot-pids script failed.",env=self.env) def mount_safety_check(self): """ Check and verify that none of our paths in mypath are mounted. We don't want to clean up with things still mounted, and this allows us to check. Returns 1 on ok, 0 on "something is still mounted" case. """ if not os.path.exists(self.settings["chroot_path"]): return log.debug('self.mounts = %s', self.mounts) for x in self.mounts: target = normpath(self.settings["chroot_path"] + self.target_mounts[x]) log.debug('mount_safety_check() x = %s %s', x, target) if not os.path.exists(target): continue if ismount(target): # Something is still mounted try: log.warning('%s is still mounted; performing auto-bind-umount...', target) # Try to umount stuff ourselves self.unbind() if ismount(target): raise CatalystError("Auto-unbind failed for " + target) else: log.notice('Auto-unbind successful...') except CatalystError: raise CatalystError("Unable to auto-unbind " + target) def unpack(self): _unpack=True clst_unpack_hash = self.resume.get("unpack") unpack_info = { 'source': self.settings["source_path"], "destination": self.settings["chroot_path"], 'mode': None, 'auto-ext': False, } display_msg = ( 'Starting %(mode)s from %(source)s\nto ' '%(destination)s (this may take some time) ..') error_msg="'%(mode)s' extraction of %(source)s to %(destination)s failed." if "seedcache" in self.settings["options"]: if os.path.isdir(unpack_info["source"]): # SEEDCACHE Is a directory, use rsync unpack_info['mode'] = "rsync" else: # SEEDCACHE is a not a directory, try untar'ing log.notice('Referenced SEEDCACHE does not appear to be a directory, trying to untar...') unpack_info['source'] = file_check(unpack_info['source']) else: # No SEEDCACHE, use tar unpack_info['source'] = file_check(unpack_info['source']) # endif "seedcache" if "autoresume" in self.settings["options"]: if os.path.isdir(self.settings["source_path"]) \ and self.resume.is_enabled("unpack"): # Autoresume is valid, SEEDCACHE is valid _unpack=False invalid_snapshot=False elif os.path.isfile(self.settings["source_path"]) \ and self.settings["source_path_hash"]==clst_unpack_hash: # Autoresume is valid, tarball is valid _unpack=False invalid_snapshot=False elif os.path.isdir(self.settings["source_path"]) \ and self.resume.is_disabled("unpack"): # Autoresume is invalid, SEEDCACHE _unpack=True invalid_snapshot=True # check and reset the unpack_info['source'] unpack_info['source'] = file_check(unpack_info['source']) elif os.path.isfile(self.settings["source_path"]) \ and self.settings["source_path_hash"]!=clst_unpack_hash: # Autoresume is invalid, tarball _unpack=True invalid_snapshot=True unpack_info['source'] = file_check(unpack_info['source']) else: # No autoresume, SEEDCACHE if "seedcache" in self.settings["options"]: # SEEDCACHE so let's run rsync and let it clean up if os.path.isdir(self.settings["source_path"]): _unpack=True invalid_snapshot=False elif os.path.isfile(self.settings["source_path"]): # Tarball so unpack and remove anything already there _unpack=True invalid_snapshot=True # No autoresume, no SEEDCACHE else: # Tarball so unpack and remove anything already there if os.path.isfile(self.settings["source_path"]): _unpack=True invalid_snapshot=True elif os.path.isdir(self.settings["source_path"]): # We should never reach this, so something is very wrong raise CatalystError( "source path is a dir but seedcache is not enabled: %s" % self.settings["source_path"]) if _unpack: self.mount_safety_check() if invalid_snapshot: if "autoresume" in self.settings["options"]: log.notice('No Valid Resume point detected, cleaning up...') self.clear_autoresume() self.clear_chroot() ensure_dirs(self.settings["chroot_path"]) ensure_dirs(self.settings["chroot_path"]+"/tmp",mode=1777) if "pkgcache" in self.settings["options"]: ensure_dirs(self.settings["pkgcache_path"],mode=0o755) if "kerncache" in self.settings["options"]: ensure_dirs(self.settings["kerncache_path"],mode=0o755) log.notice('%s', display_msg % unpack_info) # now run the decompressor if not self.decompressor.extract(unpack_info): log.error('%s', error_msg % unpack_info) if "source_path_hash" in self.settings: self.resume.enable("unpack", data=self.settings["source_path_hash"]) else: self.resume.enable("unpack") else: log.notice('Resume point detected, skipping unpack operation...') def unpack_snapshot(self): unpack=True snapshot_hash = self.resume.get("unpack_portage") unpack_errmsg="Error unpacking snapshot using mode %(mode)s" unpack_info = { 'source': self.settings["snapshot_path"], 'destination': self.settings["snapshot_cache_path"], 'mode': None, 'auto-ext': False, } target_portdir = normpath(self.settings["chroot_path"] + self.settings["repo_basedir"] + "/" + self.settings["repo_name"]) log.info('%s', self.settings['chroot_path']) log.info('unpack(), target_portdir = %s', target_portdir) if "snapcache" in self.settings["options"]: snapshot_cache_hash_path = pjoin( self.settings['snapshot_cache_path'], 'catalyst-hash') snapshot_cache_hash = fileutils.readfile(snapshot_cache_hash_path, True) unpack_info['mode'] = self.decompressor.determine_mode( unpack_info['source']) cleanup_msg="Cleaning up invalid snapshot cache at \n\t"+\ self.settings["snapshot_cache_path"]+\ " (this can take a long time)..." cleanup_errmsg="Error removing existing snapshot cache directory." if self.settings["snapshot_path_hash"]==snapshot_cache_hash: log.info('Valid snapshot cache, skipping unpack of portage tree...') unpack=False else: cleanup_errmsg="Error removing existing snapshot directory." cleanup_msg=\ 'Cleaning up existing portage tree (this can take a long time)...' unpack_info['destination'] = normpath( self.settings["chroot_path"] + self.settings["repo_basedir"]) unpack_info['mode'] = self.decompressor.determine_mode( unpack_info['source']) if "autoresume" in self.settings["options"] \ and os.path.exists(target_portdir) \ and self.resume.is_enabled("unpack_portage") \ and self.settings["snapshot_path_hash"] == snapshot_hash: log.notice('Valid Resume point detected, skipping unpack of portage tree...') unpack = False if unpack: if "snapcache" in self.settings["options"]: self.snapcache_lock.write_lock() if os.path.exists(target_portdir): log.info('%s', cleanup_msg) cleanup_cmd = "rm -rf " + target_portdir log.info('unpack() cleanup_cmd = %s', cleanup_cmd) cmd(cleanup_cmd,cleanup_errmsg,env=self.env) ensure_dirs(target_portdir, mode=0o755) log.notice('Unpacking portage tree (this can take a long time) ...') if not self.decompressor.extract(unpack_info): log.error('%s', unpack_errmsg % unpack_info) if "snapcache" in self.settings["options"]: with open(snapshot_cache_hash_path, 'w') as myf: myf.write(self.settings["snapshot_path_hash"]) else: log.info('Setting snapshot autoresume point') self.resume.enable("unpack_portage", data=self.settings["snapshot_path_hash"]) if "snapcache" in self.settings["options"]: self.snapcache_lock.unlock() def config_profile_link(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("config_profile_link"): log.notice('Resume point detected, skipping config_profile_link operation...') else: # TODO: zmedico and I discussed making this a directory and pushing # in a parent file, as well as other user-specified configuration. log.info('Configuring profile link...') cmd("rm -f " + self.settings["chroot_path"] + self.settings["port_conf"] + "/make.profile", "Error zapping profile link",env=self.env) cmd("mkdir -p " + self.settings["chroot_path"] + self.settings["port_conf"]) cmd("ln -sf ../.." + self.settings["portdir"] + "/profiles/" + self.settings["target_profile"] + " " + self.settings["chroot_path"] + self.settings["port_conf"] + "/make.profile", "Error creating profile link",env=self.env) self.resume.enable("config_profile_link") def setup_confdir(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("setup_confdir"): log.notice('Resume point detected, skipping setup_confdir operation...') else: if "portage_confdir" in self.settings: log.info('Configuring %s...', self.settings['port_conf']) dest = normpath(self.settings['chroot_path'] + '/' + self.settings['port_conf']) ensure_dirs(dest) # The trailing slashes on both paths are important: # We want to make sure rsync copies the dirs into each # other and not as subdirs. cmd('rsync -a %s/ %s/' % (self.settings['portage_confdir'], dest), "Error copying %s" % self.settings["port_conf"], env=self.env) self.resume.enable("setup_confdir") def portage_overlay(self): """ We copy the contents of our overlays to /usr/local/portage """ if "portage_overlay" in self.settings: for x in self.settings["portage_overlay"]: if os.path.exists(x): log.info('Copying overlay dir %s', x) cmd("mkdir -p "+self.settings["chroot_path"]+\ self.settings["local_overlay"],\ "Could not make portage_overlay dir",env=self.env) cmd("cp -a "+x+"/* "+self.settings["chroot_path"]+\ self.settings["local_overlay"],\ "Could not copy portage_overlay",env=self.env) def root_overlay(self): """ Copy over the root_overlay """ if self.settings["spec_prefix"]+"/root_overlay" in self.settings: for x in self.settings[self.settings["spec_prefix"]+\ "/root_overlay"]: if os.path.exists(x): log.info('Copying root_overlay: %s', x) cmd("rsync -a "+x+"/ "+\ self.settings["chroot_path"],\ self.settings["spec_prefix"]+"/root_overlay: "+x+\ " copy failed.",env=self.env) def base_dirs(self): pass def bind(self): for x in self.mounts: _cmd = '' log.debug('bind(); x = %s', x) target = normpath(self.settings["chroot_path"] + self.target_mounts[x]) ensure_dirs(target, mode=0o755) if not os.path.exists(self.mountmap[x]): if self.mountmap[x] not in ["tmpfs", "shmfs"]: ensure_dirs(self.mountmap[x], mode=0o755) src=self.mountmap[x] log.debug('bind(); src = %s', src) if "snapcache" in self.settings["options"] and x == "portdir": self.snapcache_lock.read_lock() if os.uname()[0] == "FreeBSD": if src == "/dev": _cmd = "mount -t devfs none " + target else: _cmd = "mount_nullfs " + src + " " + target else: if src == "tmpfs": if "var_tmpfs_portage" in self.settings: _cmd = "mount -t tmpfs -o size=" + \ self.settings["var_tmpfs_portage"] + "G " + \ src + " " + target elif src == "shmfs": _cmd = "mount -t tmpfs -o noexec,nosuid,nodev shm " + target else: _cmd = "mount --bind " + src + " " + target log.debug('bind(); _cmd = %s', _cmd) cmd(_cmd, "Bind mounting Failed", env=self.env, fail_func=self.unbind) log.debug('bind(); finished :D') def unbind(self): ouch=0 mypath=self.settings["chroot_path"] myrevmounts=self.mounts[:] myrevmounts.reverse() # Unmount in reverse order for nested bind-mounts for x in myrevmounts: target = normpath(mypath + self.target_mounts[x]) if not os.path.exists(target): continue if not ismount(target): continue retval=os.system("umount " + target) if retval!=0: log.warning('First attempt to unmount failed: %s', target) log.warning('Killing any pids still running in the chroot') self.kill_chroot_pids() retval2 = os.system("umount " + target) if retval2!=0: ouch=1 log.warning("Couldn't umount bind mount: %s", target) if "snapcache" in self.settings["options"] and x == "/usr/portage": try: # It's possible the snapshot lock object isn't created yet. # This is because mount safety check calls unbind before the # target is fully initialized self.snapcache_lock.unlock() except Exception: pass if ouch: # if any bind mounts really failed, then we need to raise # this to potentially prevent an upcoming bash stage cleanup script # from wiping our bind mounts. raise CatalystError( "Couldn't umount one or more bind-mounts; aborting for safety.") def chroot_setup(self): self.makeconf=read_makeconf(normpath(self.settings["chroot_path"]+ self.settings["make_conf"])) self.override_cbuild() self.override_chost() self.override_cflags() self.override_cxxflags() self.override_fcflags() self.override_fflags() self.override_ldflags() self.override_asflags() if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("chroot_setup"): log.notice('Resume point detected, skipping chroot_setup operation...') else: log.notice('Setting up chroot...') cmd("cp /etc/resolv.conf " + self.settings["chroot_path"] + "/etc/", "Could not copy resolv.conf into place.",env=self.env) # Copy over the envscript, if applicable if "envscript" in self.settings: if not os.path.exists(self.settings["envscript"]): raise CatalystError( "Can't find envscript " + self.settings["envscript"], print_traceback=True) log.warning( 'Overriding certain env variables may cause catastrophic failure.\n' 'If your build fails look here first as the possible problem.\n' 'Catalyst assumes you know what you are doing when setting these variables.\n' 'Catalyst Maintainers use VERY minimal envscripts, if used at all.\n' 'You have been warned.') cmd("cp "+self.settings["envscript"]+" "+\ self.settings["chroot_path"]+"/tmp/envscript",\ "Could not copy envscript into place.",env=self.env) # Copy over /etc/hosts from the host in case there are any # specialties in there if os.path.exists(self.settings["chroot_path"]+"/etc/hosts"): cmd("mv "+self.settings["chroot_path"]+"/etc/hosts "+\ self.settings["chroot_path"]+"/etc/hosts.catalyst",\ "Could not backup /etc/hosts",env=self.env) cmd("cp /etc/hosts "+self.settings["chroot_path"]+"/etc/hosts",\ "Could not copy /etc/hosts",env=self.env) # Modify and write out make.conf (for the chroot) makepath = normpath(self.settings["chroot_path"] + self.settings["make_conf"]) cmd("rm -f " + makepath,\ "Could not remove " + makepath, env=self.env) myf=open(makepath, "w") myf.write("# These settings were set by the catalyst build script " "that automatically\n# built this stage.\n") myf.write("# Please consult " "/usr/share/portage/config/make.conf.example " "for a more\n# detailed example.\n") for flags in ["CFLAGS", "CXXFLAGS", "FCFLAGS", "FFLAGS", "LDFLAGS", "ASFLAGS"]: if not flags in self.settings: continue if flags in ["LDFLAGS", "ASFLAGS"]: myf.write("# %s is unsupported. USE AT YOUR OWN RISK!\n" % flags) if (flags is not "CFLAGS" and self.settings[flags] == self.settings["CFLAGS"]): myf.write('%s="${CFLAGS}"\n' % flags) elif isinstance(self.settings[flags], list): myf.write('%s="%s"\n' % (flags, ' '.join(self.settings[flags]))) else: myf.write('%s="%s"\n' % (flags, self.settings[flags])) if "CBUILD" in self.settings: myf.write("# This should not be changed unless you know exactly" " what you are doing. You\n# should probably be " "using a different stage, instead.\n") myf.write('CBUILD="'+self.settings["CBUILD"]+'"\n') if "CHOST" in self.settings: myf.write("# WARNING: Changing your CHOST is not something " "that should be done lightly.\n# Please consult " "https://wiki.gentoo.org/wiki/Changing_the_CHOST_variable " "before changing.\n") myf.write('CHOST="'+self.settings["CHOST"]+'"\n') # Figure out what our USE vars are for building myusevars=[] if "HOSTUSE" in self.settings: myusevars.extend(self.settings["HOSTUSE"]) if "use" in self.settings: myusevars.extend(self.settings["use"]) if myusevars: myf.write("# These are the USE and USE_EXPAND flags that were " "used for\n# building in addition to what is provided " "by the profile.\n") myusevars = sorted(set(myusevars)) myf.write('USE="' + ' '.join(myusevars) + '"\n') if '-*' in myusevars: log.warning( 'The use of -* in %s/use will cause portage to ignore\n' 'package.use in the profile and portage_confdir.\n' "You've been warned!", self.settings['spec_prefix']) myuseexpandvars={} if "HOSTUSEEXPAND" in self.settings: for hostuseexpand in self.settings["HOSTUSEEXPAND"]: myuseexpandvars.update({hostuseexpand:self.settings["HOSTUSEEXPAND"][hostuseexpand]}) if myuseexpandvars: for hostuseexpand in myuseexpandvars: myf.write(hostuseexpand + '="' + ' '.join(myuseexpandvars[hostuseexpand]) + '"\n') myf.write('PORTDIR="%s"\n' % self.settings['portdir']) myf.write('DISTDIR="%s"\n' % self.settings['distdir']) myf.write('PKGDIR="%s"\n' % self.settings['packagedir']) # Setup the portage overlay if "portage_overlay" in self.settings: myf.write('PORTDIR_OVERLAY="/usr/local/portage"\n') # Set default locale for system responses. #478382 myf.write( '\n' '# This sets the language of build output to English.\n' '# Please keep this setting intact when reporting bugs.\n' 'LC_MESSAGES=C\n') myf.close() self.resume.enable("chroot_setup") def fsscript(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("fsscript"): log.notice('Resume point detected, skipping fsscript operation...') else: if "fsscript" in self.settings: if os.path.exists(self.settings["controller_file"]): cmd(self.settings["controller_file"]+\ " fsscript","fsscript script failed.",env=self.env) self.resume.enable("fsscript") def rcupdate(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("rcupdate"): log.notice('Resume point detected, skipping rcupdate operation...') else: if os.path.exists(self.settings["controller_file"]): cmd(self.settings["controller_file"]+" rc-update",\ "rc-update script failed.",env=self.env) self.resume.enable("rcupdate") def clean(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("clean"): log.notice('Resume point detected, skipping clean operation...') else: for x in self.settings["cleanables"]: log.notice('Cleaning chroot: %s', x) cmd("rm -rf "+self.settings["destpath"]+x,"Couldn't clean "+\ x,env=self.env) # Put /etc/hosts back into place if os.path.exists(self.settings["chroot_path"]+"/etc/hosts.catalyst"): cmd("mv -f "+self.settings["chroot_path"]+"/etc/hosts.catalyst "+\ self.settings["chroot_path"]+"/etc/hosts",\ "Could not replace /etc/hosts",env=self.env) # Remove our overlay if os.path.exists(self.settings["chroot_path"] + self.settings["local_overlay"]): cmd("rm -rf " + self.settings["chroot_path"] + self.settings["local_overlay"], "Could not remove " + self.settings["local_overlay"], env=self.env) cmd("sed -i '/^PORTDIR_OVERLAY/d' "+self.settings["chroot_path"]+\ self.settings["make_conf"],\ "Could not remove PORTDIR_OVERLAY from make.conf",env=self.env) # Clean up old and obsoleted files in /etc if os.path.exists(self.settings["stage_path"]+"/etc"): cmd("find "+self.settings["stage_path"]+\ "/etc -maxdepth 1 -name \"*-\" | xargs rm -f",\ "Could not remove stray files in /etc",env=self.env) if os.path.exists(self.settings["controller_file"]): cmd(self.settings["controller_file"]+" clean",\ "clean script failed.",env=self.env) self.resume.enable("clean") def empty(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("empty"): log.notice('Resume point detected, skipping empty operation...') else: if self.settings["spec_prefix"]+"/empty" in self.settings: if isinstance(self.settings[self.settings['spec_prefix']+'/empty'], str): self.settings[self.settings["spec_prefix"]+"/empty"]=\ self.settings[self.settings["spec_prefix"]+\ "/empty"].split() for x in self.settings[self.settings["spec_prefix"]+"/empty"]: myemp=self.settings["destpath"]+x if not os.path.isdir(myemp) or os.path.islink(myemp): log.warning('not a directory or does not exist, skipping "empty" operation: %s', x) continue log.info('Emptying directory %s', x) # stat the dir, delete the dir, recreate the dir and set # the proper perms and ownership mystat=os.stat(myemp) shutil.rmtree(myemp) ensure_dirs(myemp, mode=0o755) os.chown(myemp,mystat[ST_UID],mystat[ST_GID]) os.chmod(myemp,mystat[ST_MODE]) self.resume.enable("empty") def remove(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("remove"): log.notice('Resume point detected, skipping remove operation...') else: if self.settings["spec_prefix"]+"/rm" in self.settings: for x in self.settings[self.settings["spec_prefix"]+"/rm"]: # We're going to shell out for all these cleaning # operations, so we get easy glob handling. log.notice('livecd: removing %s', x) os.system("rm -rf "+self.settings["chroot_path"]+x) try: if os.path.exists(self.settings["controller_file"]): cmd(self.settings["controller_file"]+\ " clean","Clean failed.",env=self.env) self.resume.enable("remove") except: self.unbind() raise def preclean(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("preclean"): log.notice('Resume point detected, skipping preclean operation...') else: try: if os.path.exists(self.settings["controller_file"]): cmd(self.settings["controller_file"]+\ " preclean","preclean script failed.",env=self.env) self.resume.enable("preclean") except: self.unbind() raise CatalystError("Build failed, could not execute preclean") def capture(self): # initialize it here so it doesn't use # resources if it is not needed if not self.compressor: self.compressor = CompressMap(self.settings["compress_definitions"], env=self.env, default_mode=self.settings['compression_mode']) if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("capture"): log.notice('Resume point detected, skipping capture operation...') else: log.notice('Capture target in a tarball') # Remove filename from path mypath = os.path.dirname(self.settings["target_path"].rstrip('/')) # Now make sure path exists ensure_dirs(mypath) pack_info = self.compressor.create_infodict( source=".", basedir=self.settings["stage_path"], filename=self.settings["target_path"].rstrip('/'), mode=self.settings["compression_mode"], auto_extension=True ) target_filename = ".".join([self.settings["target_path"].rstrip('/'), self.compressor.extension(pack_info['mode'])]) log.notice('Creating stage tarball... mode: %s', self.settings['compression_mode']) if self.compressor.compress(pack_info): self.gen_contents_file(target_filename) self.gen_digest_file(target_filename) self.resume.enable("capture") else: log.warning("Couldn't create stage tarball: %s", target_filename) def run_local(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("run_local"): log.notice('Resume point detected, skipping run_local operation...') else: try: if os.path.exists(self.settings["controller_file"]): log.info('run_local() starting controller script...') cmd(self.settings["controller_file"]+" run",\ "run script failed.",env=self.env) self.resume.enable("run_local") else: log.info('run_local() no controller_file found... %s', self.settings['controller_file']) except CatalystError: self.unbind() raise CatalystError("Stage build aborting due to error.", print_traceback=False) def setup_environment(self): """ Modify the current environment. This is an ugly hack that should be fixed. We need this to use the os.system() call since we can't specify our own environ """ log.debug('setup_environment(); settings = %r', self.settings) for x in list(self.settings): log.debug('setup_environment(); processing: %s', x) if x == "options": #self.env['clst_' + x] = ' '.join(self.settings[x]) for opt in self.settings[x]: self.env['clst_' + opt.upper()] = "true" continue # Sanitize var names by doing "s|/-.|_|g" varname = "clst_" + x.replace("/", "_") varname = varname.replace("-", "_") varname = varname.replace(".", "_") if isinstance(self.settings[x], str): # Prefix to prevent namespace clashes #os.environ[varname] = self.settings[x] if "path" in x: self.env[varname] = self.settings[x].rstrip("/") else: self.env[varname] = self.settings[x] elif isinstance(self.settings[x], list): #os.environ[varname] = ' '.join(self.settings[x]) self.env[varname] = ' '.join(self.settings[x]) elif isinstance(self.settings[x], bool): if self.settings[x]: self.env[varname] = "true" else: self.env[varname] = "false" # This handles a dictionary of objects just one level deep and no deeper! # Its currently used only for USE_EXPAND flags which are dictionaries of # lists in arch/amd64.py and friends. If we wanted self.settigs[var] # of any depth, we should make this function recursive. elif isinstance(self.settings[x], dict): if x in ["compress_definitions", "decompress_definitions"]: continue self.env[varname] = ' '.join(self.settings[x].keys()) for y in self.settings[x].keys(): varname2 = "clst_" + y.replace("/", "_") varname2 = varname2.replace("-", "_") varname2 = varname2.replace(".", "_") if isinstance(self.settings[x][y], str): self.env[varname2] = self.settings[x][y] elif isinstance(self.settings[x][y], list): self.env[varname2] = ' '.join(self.settings[x][y]) elif isinstance(self.settings[x][y], bool): if self.settings[x][y]: self.env[varname] = "true" else: self.env[varname] = "false" if "makeopts" in self.settings: self.env["MAKEOPTS"]=self.settings["makeopts"] log.debug('setup_environment(); env = %r', self.env) def run(self): self.chroot_lock.write_lock() # Kill any pids in the chroot self.kill_chroot_pids() # Check for mounts right away and abort if we cannot unmount them self.mount_safety_check() if "clear-autoresume" in self.settings["options"]: self.clear_autoresume() if "purgetmponly" in self.settings["options"]: self.purge() return if "purgeonly" in self.settings["options"]: log.info('StageBase: run() purgeonly') self.purge() if "purge" in self.settings["options"]: log.info('StageBase: run() purge') self.purge() failure = False for x in self.settings["action_sequence"]: log.notice('--- Running action sequence: %s', x) sys.stdout.flush() try: getattr(self, x)() except LockInUse: log.error('Unable to aquire the lock...') failure = True break except Exception: log.error('Exception running action sequence %s', x, exc_info=True) failure = True break if failure: log.notice('Cleaning up... Running unbind()') self.unbind() return False return True def unmerge(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("unmerge"): log.notice('Resume point detected, skipping unmerge operation...') else: if self.settings["spec_prefix"]+"/unmerge" in self.settings: if isinstance(self.settings[self.settings['spec_prefix']+'/unmerge'], str): self.settings[self.settings["spec_prefix"]+"/unmerge"]=\ [self.settings[self.settings["spec_prefix"]+"/unmerge"]] myunmerge=\ self.settings[self.settings["spec_prefix"]+"/unmerge"][:] for x in range(0,len(myunmerge)): # Surround args with quotes for passing to bash, allows # things like "<" to remain intact myunmerge[x]="'"+myunmerge[x]+"'" myunmerge = ' '.join(myunmerge) # Before cleaning, unmerge stuff try: cmd(self.settings["controller_file"]+\ " unmerge "+ myunmerge,"Unmerge script failed.",\ env=self.env) log.info('unmerge shell script') except CatalystError: self.unbind() raise self.resume.enable("unmerge") def target_setup(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("target_setup"): log.notice('Resume point detected, skipping target_setup operation...') else: log.notice('Setting up filesystems per filesystem type') cmd(self.settings["controller_file"]+\ " target_image_setup "+ self.settings["target_path"],\ "target_image_setup script failed.",env=self.env) self.resume.enable("target_setup") def setup_overlay(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("setup_overlay"): log.notice('Resume point detected, skipping setup_overlay operation...') else: if self.settings["spec_prefix"]+"/overlay" in self.settings: for x in self.settings[self.settings["spec_prefix"]+"/overlay"]: if os.path.exists(x): cmd("rsync -a "+x+"/ "+\ self.settings["target_path"],\ self.settings["spec_prefix"]+"overlay: "+x+\ " copy failed.",env=self.env) self.resume.enable("setup_overlay") def create_iso(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("create_iso"): log.notice('Resume point detected, skipping create_iso operation...') else: # Create the ISO if "iso" in self.settings: cmd(self.settings["controller_file"]+" iso "+\ self.settings["iso"],"ISO creation script failed.",\ env=self.env) self.gen_contents_file(self.settings["iso"]) self.gen_digest_file(self.settings["iso"]) self.resume.enable("create_iso") else: log.warning('livecd/iso was not defined. An ISO Image will not be created.') def build_packages(self): build_packages_resume = pjoin(self.settings["autoresume_path"], "build_packages") if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("build_packages"): log.notice('Resume point detected, skipping build_packages operation...') else: if self.settings["spec_prefix"]+"/packages" in self.settings: if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("build_packages"): log.notice('Resume point detected, skipping build_packages operation...') else: mypack=\ list_bashify(self.settings[self.settings["spec_prefix"]\ +"/packages"]) try: cmd(self.settings["controller_file"]+\ " build_packages "+mypack,\ "Error in attempt to build packages",env=self.env) fileutils.touch(build_packages_resume) self.resume.enable("build_packages") except CatalystError: self.unbind() raise CatalystError(self.settings["spec_prefix"]+\ "build aborting due to error.") def build_kernel(self): '''Build all configured kernels''' if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("build_kernel"): log.notice('Resume point detected, skipping build_kernel operation...') else: if "boot/kernel" in self.settings: try: mynames=self.settings["boot/kernel"] if isinstance(mynames, str): mynames=[mynames] # Execute the script that sets up the kernel build environment cmd(self.settings["controller_file"]+\ " pre-kmerge ","Runscript pre-kmerge failed",\ env=self.env) for kname in mynames: self._build_kernel(kname=kname) self.resume.enable("build_kernel") except CatalystError: self.unbind() raise CatalystError( "build aborting due to kernel build error.", print_traceback=True) def _build_kernel(self, kname): "Build a single configured kernel by name" if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("build_kernel_"+kname): log.notice('Resume point detected, skipping build_kernel for %s operation...', kname) return self._copy_kernel_config(kname=kname) # If we need to pass special options to the bootloader # for this kernel put them into the environment if "boot/kernel/"+kname+"/kernelopts" in self.settings: myopts=self.settings["boot/kernel/"+kname+\ "/kernelopts"] if not isinstance(myopts, str): myopts = ' '.join(myopts) self.env[kname+"_kernelopts"]=myopts else: self.env[kname+"_kernelopts"]="" if "boot/kernel/"+kname+"/extraversion" not in self.settings: self.settings["boot/kernel/"+kname+\ "/extraversion"]="" self.env["clst_kextraversion"]=\ self.settings["boot/kernel/"+kname+\ "/extraversion"] self._copy_initramfs_overlay(kname=kname) # Execute the script that builds the kernel cmd("/bin/bash "+self.settings["controller_file"]+\ " kernel "+kname,\ "Runscript kernel build failed",env=self.env) if "boot/kernel/"+kname+"/initramfs_overlay" in self.settings: if os.path.exists(self.settings["chroot_path"]+\ "/tmp/initramfs_overlay/"): log.notice('Cleaning up temporary overlay dir') cmd("rm -R "+self.settings["chroot_path"]+\ "/tmp/initramfs_overlay/",env=self.env) self.resume.is_enabled("build_kernel_"+kname) # Execute the script that cleans up the kernel build environment cmd("/bin/bash "+self.settings["controller_file"]+\ " post-kmerge ", "Runscript post-kmerge failed",env=self.env) def _copy_kernel_config(self, kname): if "boot/kernel/"+kname+"/config" in self.settings: if not os.path.exists(self.settings["boot/kernel/"+kname+"/config"]): self.unbind() raise CatalystError( "Can't find kernel config: "+\ self.settings["boot/kernel/"+kname+\ "/config"]) try: cmd("cp "+self.settings["boot/kernel/"+kname+\ "/config"]+" "+\ self.settings["chroot_path"]+"/var/tmp/"+\ kname+".config",\ "Couldn't copy kernel config: "+\ self.settings["boot/kernel/"+kname+\ "/config"],env=self.env) except CatalystError: self.unbind() def _copy_initramfs_overlay(self, kname): if "boot/kernel/"+kname+"/initramfs_overlay" in self.settings: if os.path.exists(self.settings["boot/kernel/"+\ kname+"/initramfs_overlay"]): log.notice('Copying initramfs_overlay dir %s', self.settings['boot/kernel/' + kname + '/initramfs_overlay']) cmd("mkdir -p "+\ self.settings["chroot_path"]+\ "/tmp/initramfs_overlay/"+\ self.settings["boot/kernel/"+kname+\ "/initramfs_overlay"],env=self.env) cmd("cp -R "+self.settings["boot/kernel/"+\ kname+"/initramfs_overlay"]+"/* "+\ self.settings["chroot_path"]+\ "/tmp/initramfs_overlay/"+\ self.settings["boot/kernel/"+kname+\ "/initramfs_overlay"],env=self.env) def bootloader(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("bootloader"): log.notice('Resume point detected, skipping bootloader operation...') else: try: cmd(self.settings["controller_file"]+\ " bootloader " + self.settings["target_path"].rstrip('/'),\ "Bootloader script failed.",env=self.env) self.resume.enable("bootloader") except CatalystError: self.unbind() raise CatalystError("Script aborting due to error.") def livecd_update(self): if "autoresume" in self.settings["options"] \ and self.resume.is_enabled("livecd_update"): log.notice('Resume point detected, skipping build_packages operation...') else: try: cmd(self.settings["controller_file"]+\ " livecd-update","livecd-update failed.",env=self.env) self.resume.enable("livecd_update") except CatalystError: self.unbind() raise CatalystError("build aborting due to livecd_update error.") @staticmethod def _debug_pause_(): py_input("press any key to continue: ") # vim: ts=4 sw=4 sta et sts=4 ai
gpl-2.0
translate/virtaal
virtaal/views/widgets/label_expander.py
6
2409
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008-2009 Zuza Software Foundation # # This file is part of Virtaal. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. import gtk import gtk.gdk import gobject import pango from virtaal.views import markup class LabelExpander(gtk.EventBox): __gproperties__ = { "expanded": (gobject.TYPE_BOOLEAN, "expanded", "A boolean indicating whether this widget has been expanded to show its contained widget", False, gobject.PARAM_READWRITE), } def __init__(self, widget, get_text, expanded=False): super(LabelExpander, self).__init__() label_text = gtk.Label() label_text.set_single_line_mode(False) label_text.set_line_wrap(True) label_text.set_justify(gtk.JUSTIFY_FILL) label_text.set_use_markup(True) self.label = gtk.EventBox() self.label.add(label_text) self.widget = widget self.get_text = get_text self.expanded = expanded #self.label.connect('button-release-event', lambda widget, *args: setattr(self, 'expanded', True)) def do_get_property(self, prop): return getattr(self, prop.name) def do_set_property(self, prop, value): setattr(self, prop.name, value) def _get_expanded(self): return self.child == self.widget def _set_expanded(self, value): if self.child != None: self.remove(self.child) if value: self.add(self.widget) else: self.add(self.label) self.label.child.set_markup(markup.markuptext(self.get_text(), fancyspaces=False, markupescapes=False)) self.child.show() expanded = property(_get_expanded, _set_expanded)
gpl-2.0
arbrandes/edx-platform
lms/djangoapps/ccx/migrations/0001_initial.py
4
1564
from django.conf import settings from django.db import migrations, models from opaque_keys.edx.django.models import CourseKeyField, UsageKeyField class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CcxFieldOverride', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('location', UsageKeyField(max_length=255, db_index=True)), ('field', models.CharField(max_length=255)), ('value', models.TextField(default='null')), ], ), migrations.CreateModel( name='CustomCourseForEdX', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255, db_index=True)), ('display_name', models.CharField(max_length=255)), ('coach', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='ccxfieldoverride', name='ccx', field=models.ForeignKey(to='ccx.CustomCourseForEdX', on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='ccxfieldoverride', unique_together={('ccx', 'location', 'field')}, ), ]
agpl-3.0
brchiu/tensorflow
tensorflow/contrib/util/loader.py
41
2063
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for loading op libraries. @@load_op_library """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader def load_op_library(path): """Loads a contrib op library from the given path. NOTE(mrry): On Windows, we currently assume that some contrib op libraries are statically linked into the main TensorFlow Python extension DLL - use dynamically linked ops if the .so is present. Args: path: An absolute path to a shared object file. Returns: A Python module containing the Python wrappers for Ops defined in the plugin. """ if os.name == 'nt': # To avoid making every user_ops aware of windows, re-write # the file extension from .so to .dll if .so file doesn't exist. if not os.path.exists(path): path = re.sub(r'\.so$', '.dll', path) # Currently we have only some user_ops as dlls on windows - don't try # to load them if the dll is not found. # TODO(mrry): Once we have all of them this check should be removed. if not os.path.exists(path): return None path = resource_loader.get_path_to_datafile(path) ret = load_library.load_op_library(path) assert ret, 'Could not load %s' % path return ret
apache-2.0
user-none/calibre
src/calibre/devices/mtp/filesystem_cache.py
14
8300
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>' __docformat__ = 'restructuredtext en' import weakref, sys, json from collections import deque from operator import attrgetter from future_builtins import map from datetime import datetime from calibre import human_readable, prints, force_unicode from calibre.utils.date import local_tz, as_utc from calibre.utils.icu import sort_key, lower from calibre.ebooks import BOOK_EXTENSIONS bexts = frozenset(BOOK_EXTENSIONS) - {'mbp', 'tan', 'rar', 'zip', 'xml'} class FileOrFolder(object): def __init__(self, entry, fs_cache): self.all_storage_ids = fs_cache.all_storage_ids self.object_id = entry['id'] self.is_folder = entry['is_folder'] self.storage_id = entry['storage_id'] # self.parent_id is None for storage objects self.parent_id = entry.get('parent_id', None) n = entry.get('name', None) if not n: n = '___' self.name = force_unicode(n, 'utf-8') self.persistent_id = entry.get('persistent_id', self.object_id) self.size = entry.get('size', 0) md = entry.get('modified', 0) try: if isinstance(md, tuple): self.last_modified = datetime(*(list(md)+[local_tz])) else: self.last_modified = datetime.fromtimestamp(md, local_tz) except: self.last_modified = datetime.fromtimestamp(0, local_tz) self.last_mod_string = self.last_modified.strftime('%Y/%m/%d %H:%M') self.last_modified = as_utc(self.last_modified) if self.storage_id not in self.all_storage_ids: raise ValueError('Storage id %s not valid for %s, valid values: %s'%(self.storage_id, entry, self.all_storage_ids)) if self.parent_id == 0: self.parent_id = self.storage_id self.is_hidden = entry.get('is_hidden', False) self.is_system = entry.get('is_system', False) self.can_delete = entry.get('can_delete', True) self.files = [] self.folders = [] fs_cache.id_map[self.object_id] = self self.fs_cache = weakref.ref(fs_cache) self.deleted = False if self.storage_id == self.object_id: self.storage_prefix = 'mtp:::%s:::'%self.persistent_id self.is_ebook = (not self.is_folder and self.name.rpartition('.')[-1].lower() in bexts) def __repr__(self): name = 'Folder' if self.is_folder else 'File' try: path = unicode(self.full_path) except: path = '' datum = 'size=%s'%(self.size) if self.is_folder: datum = 'children=%s'%(len(self.files) + len(self.folders)) return '%s(id=%s, storage_id=%s, %s, path=%s, modified=%s)'%(name, self.object_id, self.storage_id, datum, path, self.last_mod_string) __str__ = __repr__ __unicode__ = __repr__ @property def empty(self): return not self.files and not self.folders @property def id_map(self): return self.fs_cache().id_map @property def parent(self): return None if self.parent_id is None else self.id_map[self.parent_id] @property def full_path(self): parts = deque() parts.append(self.name) p = self.parent while p is not None: parts.appendleft(p.name) p = p.parent return tuple(parts) def __iter__(self): for e in self.folders: yield e for e in self.files: yield e def add_child(self, entry): ans = FileOrFolder(entry, self.fs_cache()) t = self.folders if ans.is_folder else self.files t.append(ans) return ans def remove_child(self, entry): for x in (self.files, self.folders): try: x.remove(entry) except ValueError: pass self.id_map.pop(entry.object_id, None) entry.deleted = True def dump(self, prefix='', out=sys.stdout): c = '+' if self.is_folder else '-' data = ('%s children'%(sum(map(len, (self.files, self.folders)))) if self.is_folder else human_readable(self.size)) data += ' modified=%s'%self.last_mod_string line = '%s%s %s [id:%s %s]'%(prefix, c, self.name, self.object_id, data) prints(line, file=out) for c in (self.folders, self.files): for e in sorted(c, key=lambda x:sort_key(x.name)): e.dump(prefix=prefix+' ', out=out) def folder_named(self, name): name = lower(name) for e in self.folders: if e.name and lower(e.name) == name: return e return None def file_named(self, name): name = lower(name) for e in self.files: if e.name and lower(e.name) == name: return e return None def find_path(self, path): ''' Find a path in this folder, where path is a tuple of folder and file names like ('eBooks', 'newest', 'calibre.epub'). Finding is case-insensitive. ''' parent = self components = list(path) while components: child = components[0] components = components[1:] c = parent.folder_named(child) if c is None: c = parent.file_named(child) if c is None: return None parent = c return parent @property def mtp_relpath(self): return tuple(x.lower() for x in self.full_path[1:]) @property def mtp_id_path(self): return 'mtp:::' + json.dumps(self.object_id) + ':::' + '/'.join(self.full_path) class FilesystemCache(object): def __init__(self, all_storage, entries): self.entries = [] self.id_map = {} self.all_storage_ids = tuple(x['id'] for x in all_storage) for storage in all_storage: storage['storage_id'] = storage['id'] e = FileOrFolder(storage, self) self.entries.append(e) self.entries.sort(key=attrgetter('object_id')) all_storage_ids = [x.storage_id for x in self.entries] self.all_storage_ids = tuple(all_storage_ids) for entry in entries: FileOrFolder(entry, self) for item in self.id_map.itervalues(): try: p = item.parent except KeyError: # Parent does not exist, set the parent to be the storage # object sid = item.storage_id if sid not in all_storage_ids: sid = all_storage_ids[0] item.parent_id = sid p = item.parent if p is not None: t = p.folders if item.is_folder else p.files t.append(item) def dump(self, out=sys.stdout): for e in self.entries: e.dump(out=out) def storage(self, storage_id): for e in self.entries: if e.storage_id == storage_id: return e def iterebooks(self, storage_id): for x in self.id_map.itervalues(): if x.storage_id == storage_id and x.is_ebook: if x.parent_id == storage_id and x.name.lower().endswith('.txt'): continue # Ignore .txt files in the root yield x def __len__(self): return len(self.id_map) def resolve_mtp_id_path(self, path): if not path.startswith('mtp:::'): raise ValueError('%s is not a valid MTP path'%path) parts = path.split(':::') if len(parts) < 3: raise ValueError('%s is not a valid MTP path'%path) try: object_id = json.loads(parts[1]) except: raise ValueError('%s is not a valid MTP path'%path) try: return self.id_map[object_id] except KeyError: raise ValueError('No object found with MTP path: %s'%path)
gpl-3.0
rowemoore/odoo
addons/base_report_designer/openerp_sxw2rml/openerp_sxw2rml.py
301
14179
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c): # # 2005 pyopenoffice.py Martin Simon (http://www.bezirksreiter.de) # 2005 Fabien Pinckaers, TINY SPRL. (http://tiny.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## #!/usr/bin/python """ OpenERP SXW2RML - The OpenERP's report engine OpenERP SXW2RML is part of the OpenERP Report Project. OpenERP Report is a module that allows you to render high quality PDF document from an OpenOffice template (.sxw) and any relationl database. """ __version__ = '0.9' import re import string import os import zipfile import xml.dom.minidom from reportlab.lib.units import toLength import base64 import copy class DomApiGeneral: """General DOM API utilities.""" def __init__(self, content_string="", file=""): self.content_string = content_string self.re_digits = re.compile(r"(.*?\d)(pt|cm|mm|inch|in)") def _unitTuple(self, string): """Split values and units to a tuple.""" temp = self.re_digits.findall(string) if not temp: return (string,"") else: return (temp[0]) def stringPercentToFloat(self, string): temp = string.replace("""%""","") return float(temp)/100 def findChildrenByName(self, parent, name, attr_dict=None): """Helper functions. Does not work recursively. Optional: also test for certain attribute/value pairs.""" if attr_dict is None: attr_dict = {} children = [] for c in parent.childNodes: if c.nodeType == c.ELEMENT_NODE and c.nodeName == name: children.append(c) if attr_dict == {}: return children else: return self._selectForAttributes(nodelist=children,attr_dict=attr_dict) def _selectForAttributes(self, nodelist, attr_dict): "Helper function.""" selected_nodes = [] for n in nodelist: check = 1 for a in attr_dict.keys(): if n.getAttribute(a) != attr_dict[a]: # at least one incorrect attribute value? check = 0 if check: selected_nodes.append(n) return selected_nodes def _stringToTuple(self, s): """Helper function.""" try: temp = string.split(s,",") return int(temp[0]),int(temp[1]) except: return None def _tupleToString(self, t): try: return self.openOfficeStringUtf8("%s,%s" % (t[0],t[1])) except: return None def _lengthToFloat(self, value): v = value if not self.re_digits.search(v): return v try: if v[-4:] == "inch": # OO files use "inch" instead of "in" in Reportlab units v = v[:-2] except: pass try: c = round(toLength(v)) return c except: return v def openOfficeStringUtf8(self, string): if type(string) == unicode: return string.encode("utf-8") tempstring = unicode(string,"cp1252").encode("utf-8") return tempstring class DomApi(DomApiGeneral): """This class provides a DOM-API for XML-Files from an SXW-Archive.""" def __init__(self, xml_content, xml_styles): DomApiGeneral.__init__(self) self.content_dom = xml.dom.minidom.parseString(xml_content) self.styles_dom = xml.dom.minidom.parseString(xml_styles) body = self.content_dom.getElementsByTagName("office:body") self.body = body and body[0] # TODO: self.style_dict = {} self.style_properties_dict = {} # ******** always use the following order: self.buildStyleDict() self.buildStylePropertiesDict() if self.styles_dom.getElementsByTagName("style:page-master").__len__()<>0: self.page_master = self.styles_dom.getElementsByTagName("style:page-master")[0] if self.styles_dom.getElementsByTagName("style:page-layout").__len__()<>0 : self.page_master = self.styles_dom.getElementsByTagName("style:page-layout")[0] self.document = self.content_dom.getElementsByTagName("office:document-content")[0] def buildStylePropertiesDict(self): for s in self.style_dict.keys(): self.style_properties_dict[s] = self.getStylePropertiesDict(s) def updateWithPercents(self, dict, updatedict): """Sometimes you find values like "115%" in the style hierarchy.""" if not updatedict: # no style hierarchies for this style? => return new_updatedict = copy.copy(updatedict) for u in new_updatedict.keys(): try: if new_updatedict[u].find("""%""") != -1 and dict.has_key(u): number = float(self.re_digits.search(dict[u]).group(1)) unit = self.re_digits.search(dict[u]).group(2) new_number = self.stringPercentToFloat(new_updatedict[u]) * number if unit == "pt": new_number = int(new_number) # no floats allowed for "pt" # OOo just takes the int, does not round (try it out!) new_updatedict[u] = "%s%s" % (new_number,unit) else: dict[u] = new_updatedict[u] except: dict[u] = new_updatedict[u] dict.update(new_updatedict) def normalizeStyleProperties(self): """Transfer all style:style-properties attributes from the self.style_properties_hierarchical dict to the automatic-styles from content.xml. Use this function to preprocess content.xml for XSLT transformations etc.Do not try to implement this function with XSlT - believe me, it's a terrible task...""" styles_styles = self.styles_dom.getElementsByTagName("style:style") automatic_styles = self.content_dom.getElementsByTagName("office:automatic-styles")[0] for s in styles_styles: automatic_styles.appendChild(s.cloneNode(deep=1)) content_styles = self.content_dom.getElementsByTagName("style:style") # these are the content_styles with styles_styles added!!! for s in content_styles: c = self.findChildrenByName(s,"style:properties") if c == []: # some derived automatic styles do not have "style:properties": temp = self.content_dom.createElement("style:properties") s.appendChild(temp) c = self.findChildrenByName(s,"style:properties") c = c[0] dict = self.style_properties_dict[(s.getAttribute("style:name")).encode("utf-8")] or {} for attribute in dict.keys(): c.setAttribute(self.openOfficeStringUtf8(attribute),self.openOfficeStringUtf8(dict[attribute])) def transferStylesXml(self): """Transfer certain sub-trees from styles.xml to the normalized content.xml (see above). It is not necessary to do this - for example - with paragraph styles. the "normalized" style properties contain all information needed for further processing.""" # TODO: What about table styles etc.? outline_styles = self.styles_dom.getElementsByTagName("text:outline-style") t = self.content_dom.createElement("transferredfromstylesxml") self.document.insertBefore(t,self.body) t_new = self.body.previousSibling try: page_master = self.page_master t_new.appendChild(page_master.cloneNode(deep=1)) t_new.appendChild(outline_styles[0].cloneNode(deep=1)) except: pass def normalizeLength(self): """Normalize all lengthes to floats (i.e: 1 inch = 72). Always use this after "normalizeContent" and "transferStyles"!""" # TODO: The complex attributes of table cell styles are not transferred yet. #all_styles = self.content_dom.getElementsByTagName("style:properties") #all_styles += self.content_dom.getElementsByTagName("draw:image") all_styles = self.content_dom.getElementsByTagName("*") for s in all_styles: for x in s._attrs.keys(): v = s.getAttribute(x) s.setAttribute(x,"%s" % self._lengthToFloat(v)) # convert float to string first! def normalizeTableColumns(self): """Handle this strange table:number-columns-repeated attribute.""" columns = self.content_dom.getElementsByTagName("table:table-column") for c in columns: if c.hasAttribute("table:number-columns-repeated"): number = int(c.getAttribute("table:number-columns-repeated")) c.removeAttribute("table:number-columns-repeated") for i in range(number-1): (c.parentNode).insertBefore(c.cloneNode(deep=1),c) def buildStyleDict(self): """Store all style:style-nodes from content.xml and styles.xml in self.style_dict. Caution: in this dict the nodes from two dom apis are merged!""" for st in (self.styles_dom,self.content_dom): for s in st.getElementsByTagName("style:style"): name = s.getAttribute("style:name").encode("utf-8") self.style_dict[name] = s return True def toxml(self): return self.content_dom.toxml(encoding="utf-8") def getStylePropertiesDict(self, style_name): res = {} if self.style_dict[style_name].hasAttribute("style:parent-style-name"): parent = self.style_dict[style_name].getAttribute("style:parent-style-name").encode("utf-8") res = self.getStylePropertiesDict(parent) children = self.style_dict[style_name].childNodes for c in children: if c.nodeType == c.ELEMENT_NODE and c.nodeName.find("properties")>0 : for attr in c._attrs.keys(): res[attr] = c.getAttribute(attr).encode("utf-8") return res class PyOpenOffice(object): """This is the main class which provides all functionality.""" def __init__(self, path='.', save_pict=False): self.path = path self.save_pict = save_pict self.images = {} def oo_read(self, fname): z = zipfile.ZipFile(fname,"r") content = z.read('content.xml') style = z.read('styles.xml') all = z.namelist() for a in all: if a[:9]=='Pictures/' and len(a)>10: pic_content = z.read(a) self.images[a[9:]] = pic_content if self.save_pict: f=open(os.path.join(self.path, os.path.basename(a)),"wb") f.write(pic_content) f.close() z.close() return content,style def oo_replace(self, content): regex = [ (r"<para[^>]*/>", ""), (r"<para(.*)>(.*?)<text:line-break[^>]*/>", "<para$1>$2</para><para$1>"), ] for key,val in regex: content = re.sub(key, val, content) return content def unpackNormalize(self, sourcefile): c,s = self.oo_read(sourcefile) c = self.oo_replace(c) dom = DomApi(c,s) dom.normalizeStyleProperties() dom.transferStylesXml() dom.normalizeLength() dom.normalizeTableColumns() new_c = dom.toxml() return new_c def sxw2rml(sxw_file, xsl, output='.', save_pict=False): from lxml import etree from StringIO import StringIO tool = PyOpenOffice(output, save_pict = save_pict) res = tool.unpackNormalize(sxw_file) f = StringIO(xsl) styledoc = etree.parse(f) style = etree.XSLT(styledoc) f = StringIO(res) doc = etree.parse(f) result = style(doc) root = etree.XPathEvaluator(result)("/document/stylesheet") if root: root=root[0] images = etree.Element("images") for img in tool.images: node = etree.Element('image', name=img) node.text = base64.encodestring(tool.images[img]) images.append(node) root.append(images) try: xml = str(result) return xml except: return result if __name__ == "__main__": import optparse parser = optparse.OptionParser( version="Odoo Report v%s" % __version__, usage = 'openerp_sxw2rml.py [options] file.sxw') parser.add_option("-v", "--verbose", default=False, dest="verbose", help="enable basic debugging") parser.add_option("-o", "--output", dest="output", default='.', help="directory of image output") (opt, args) = parser.parse_args() if len(args) != 1: parser.error("Incorrect number of arguments.") import sys fname = sys.argv[1] f = fname xsl_file = 'normalized_oo2rml.xsl' z = zipfile.ZipFile(fname,"r") mimetype = z.read('mimetype') if mimetype.split('/')[-1] == 'vnd.oasis.opendocument.text' : xsl_file = 'normalized_odt2rml.xsl' xsl = file(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), xsl_file)).read() result = sxw2rml(f, xsl, output=opt.output, save_pict=False) print result # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Foxboron/stem
test/integ/socket/control_message.py
4
7048
""" Integration tests for the stem.response.ControlMessage class. """ import re import unittest import stem.socket import stem.version import test.runner class TestControlMessage(unittest.TestCase): def test_unestablished_socket(self): """ Checks message parsing when we have a valid but unauthenticated socket. """ if test.runner.require_control(self): return # If an unauthenticated connection gets a message besides AUTHENTICATE or # PROTOCOLINFO then tor will give an 'Authentication required.' message and # hang up. control_socket = test.runner.get_runner().get_tor_socket(False) control_socket.send('GETINFO version') auth_required_response = control_socket.recv() self.assertEqual('Authentication required.', str(auth_required_response)) self.assertEqual(['Authentication required.'], list(auth_required_response)) self.assertEqual('514 Authentication required.\r\n', auth_required_response.raw_content()) self.assertEqual([('514', ' ', 'Authentication required.')], auth_required_response.content()) # The socket's broken but doesn't realize it yet. These use cases are # checked in more depth by the ControlSocket integ tests. self.assertTrue(control_socket.is_alive()) self.assertRaises(stem.SocketClosed, control_socket.recv) self.assertFalse(control_socket.is_alive()) # Additional socket usage should fail, and pulling more responses will fail # with more closed exceptions. self.assertRaises(stem.SocketError, control_socket.send, 'GETINFO version') self.assertRaises(stem.SocketClosed, control_socket.recv) self.assertRaises(stem.SocketClosed, control_socket.recv) self.assertRaises(stem.SocketClosed, control_socket.recv) # The socket connection is already broken so calling close shouldn't have # an impact. control_socket.close() self.assertRaises(stem.SocketClosed, control_socket.send, 'GETINFO version') self.assertRaises(stem.SocketClosed, control_socket.recv) def test_invalid_command(self): """ Parses the response for a command which doesn't exist. """ if test.runner.require_control(self): return with test.runner.get_runner().get_tor_socket() as control_socket: control_socket.send('blarg') unrecognized_command_response = control_socket.recv() self.assertEqual('Unrecognized command "blarg"', str(unrecognized_command_response)) self.assertEqual(['Unrecognized command "blarg"'], list(unrecognized_command_response)) self.assertEqual('510 Unrecognized command "blarg"\r\n', unrecognized_command_response.raw_content()) self.assertEqual([('510', ' ', 'Unrecognized command "blarg"')], unrecognized_command_response.content()) def test_invalid_getinfo(self): """ Parses the response for a GETINFO query which doesn't exist. """ if test.runner.require_control(self): return with test.runner.get_runner().get_tor_socket() as control_socket: control_socket.send('GETINFO blarg') unrecognized_key_response = control_socket.recv() self.assertEqual('Unrecognized key "blarg"', str(unrecognized_key_response)) self.assertEqual(['Unrecognized key "blarg"'], list(unrecognized_key_response)) self.assertEqual('552 Unrecognized key "blarg"\r\n', unrecognized_key_response.raw_content()) self.assertEqual([('552', ' ', 'Unrecognized key "blarg"')], unrecognized_key_response.content()) def test_getinfo_config_file(self): """ Parses the 'GETINFO config-file' response. """ if test.runner.require_control(self): return runner = test.runner.get_runner() torrc_dst = runner.get_torrc_path() with runner.get_tor_socket() as control_socket: control_socket.send('GETINFO config-file') config_file_response = control_socket.recv() self.assertEqual('config-file=%s\nOK' % torrc_dst, str(config_file_response)) self.assertEqual(['config-file=%s' % torrc_dst, 'OK'], list(config_file_response)) self.assertEqual('250-config-file=%s\r\n250 OK\r\n' % torrc_dst, config_file_response.raw_content()) self.assertEqual([('250', '-', 'config-file=%s' % torrc_dst), ('250', ' ', 'OK')], config_file_response.content()) def test_getinfo_config_text(self): """ Parses the 'GETINFO config-text' response. """ if test.runner.require_control(self): return elif test.runner.require_version(self, stem.version.Requirement.GETINFO_CONFIG_TEXT): return runner = test.runner.get_runner() # We can't be certain of the order, and there may be extra config-text # entries as per... # https://trac.torproject.org/projects/tor/ticket/2362 # # so we'll just check that the response is a superset of our config torrc_contents = [] for line in runner.get_torrc_contents().splitlines(): line = line.strip() if line and not line.startswith('#'): torrc_contents.append(line) with runner.get_tor_socket() as control_socket: control_socket.send('GETINFO config-text') config_text_response = control_socket.recv() # the response should contain two entries, the first being a data response self.assertEqual(2, len(list(config_text_response))) self.assertEqual('OK', list(config_text_response)[1]) self.assertEqual(('250', ' ', 'OK'), config_text_response.content()[1]) self.assertTrue(config_text_response.raw_content().startswith('250+config-text=\r\n')) self.assertTrue(config_text_response.raw_content().endswith('\r\n.\r\n250 OK\r\n')) self.assertTrue(str(config_text_response).startswith('config-text=\n')) self.assertTrue(str(config_text_response).endswith('\nOK')) for torrc_entry in torrc_contents: self.assertTrue('\n%s\n' % torrc_entry in str(config_text_response)) self.assertTrue(torrc_entry in list(config_text_response)[0]) self.assertTrue('%s\r\n' % torrc_entry in config_text_response.raw_content()) self.assertTrue('%s' % torrc_entry in config_text_response.content()[0][2]) def test_bw_event(self): """ Issues 'SETEVENTS BW' and parses a couple events. """ if test.runner.require_control(self): return with test.runner.get_runner().get_tor_socket() as control_socket: control_socket.send('SETEVENTS BW') setevents_response = control_socket.recv() self.assertEqual('OK', str(setevents_response)) self.assertEqual(['OK'], list(setevents_response)) self.assertEqual('250 OK\r\n', setevents_response.raw_content()) self.assertEqual([('250', ' ', 'OK')], setevents_response.content()) # Tor will emit a BW event once per second. Parsing two of them. for _ in range(2): bw_event = control_socket.recv() self.assertTrue(re.match('BW [0-9]+ [0-9]+', str(bw_event))) self.assertTrue(re.match('650 BW [0-9]+ [0-9]+\r\n', bw_event.raw_content())) self.assertEqual(('650', ' '), bw_event.content()[0][:2])
lgpl-3.0
UK992/servo
tests/wpt/web-platform-tests/tools/third_party/pywebsocket3/example/echo_wsh.py
21
2214
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import six _GOODBYE_MESSAGE = u'Goodbye' def web_socket_do_extra_handshake(request): # This example handler accepts any request. See origin_check_wsh.py for how # to reject access from untrusted scripts based on origin value. pass # Always accept. def web_socket_transfer_data(request): while True: line = request.ws_stream.receive_message() if line is None: return if isinstance(line, six.text_type): request.ws_stream.send_message(line, binary=False) if line == _GOODBYE_MESSAGE: return else: request.ws_stream.send_message(line, binary=True) # vi:sts=4 sw=4 et
mpl-2.0
eXcomm/p2pool
p2pool/bitcoin/height_tracker.py
227
4678
from twisted.internet import defer from twisted.python import log import p2pool from p2pool.bitcoin import data as bitcoin_data from p2pool.util import deferral, forest, jsonrpc, variable class HeaderWrapper(object): __slots__ = 'hash previous_hash'.split(' ') @classmethod def from_header(cls, header): return cls(bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header)), header['previous_block']) def __init__(self, hash, previous_hash): self.hash, self.previous_hash = hash, previous_hash class HeightTracker(object): '''Point this at a factory and let it take care of getting block heights''' def __init__(self, best_block_func, factory, backlog_needed): self._best_block_func = best_block_func self._factory = factory self._backlog_needed = backlog_needed self._tracker = forest.Tracker() self._watch1 = self._factory.new_headers.watch(self._heard_headers) self._watch2 = self._factory.new_block.watch(self._request) self._requested = set() self._clear_task = deferral.RobustLoopingCall(self._requested.clear) self._clear_task.start(60) self._last_notified_size = 0 self.updated = variable.Event() self._think_task = deferral.RobustLoopingCall(self._think) self._think_task.start(15) self._think2_task = deferral.RobustLoopingCall(self._think2) self._think2_task.start(15) def _think(self): try: highest_head = max(self._tracker.heads, key=lambda h: self._tracker.get_height_and_last(h)[0]) if self._tracker.heads else None if highest_head is None: return # wait for think2 height, last = self._tracker.get_height_and_last(highest_head) if height < self._backlog_needed: self._request(last) except: log.err(None, 'Error in HeightTracker._think:') def _think2(self): self._request(self._best_block_func()) def _heard_headers(self, headers): changed = False for header in headers: hw = HeaderWrapper.from_header(header) if hw.hash in self._tracker.items: continue changed = True self._tracker.add(hw) if changed: self.updated.happened() self._think() if len(self._tracker.items) >= self._last_notified_size + 100: print 'Have %i/%i block headers' % (len(self._tracker.items), self._backlog_needed) self._last_notified_size = len(self._tracker.items) @defer.inlineCallbacks def _request(self, last): if last in self._tracker.items: return if last in self._requested: return self._requested.add(last) (yield self._factory.getProtocol()).send_getheaders(version=1, have=[], last=last) def get_height_rel_highest(self, block_hash): # callers: highest height can change during yields! best_height, best_last = self._tracker.get_height_and_last(self._best_block_func()) height, last = self._tracker.get_height_and_last(block_hash) if last != best_last: return -1000000000 # XXX hack return height - best_height @defer.inlineCallbacks def get_height_rel_highest_func(bitcoind, factory, best_block_func, net): if '\ngetblock ' in (yield deferral.retry()(bitcoind.rpc_help)()): @deferral.DeferredCacher @defer.inlineCallbacks def height_cacher(block_hash): try: x = yield bitcoind.rpc_getblock('%x' % (block_hash,)) except jsonrpc.Error_for_code(-5): # Block not found if not p2pool.DEBUG: raise deferral.RetrySilentlyException() else: raise defer.returnValue(x['blockcount'] if 'blockcount' in x else x['height']) best_height_cached = variable.Variable((yield deferral.retry()(height_cacher)(best_block_func()))) def get_height_rel_highest(block_hash): this_height = height_cacher.call_now(block_hash, 0) best_height = height_cacher.call_now(best_block_func(), 0) best_height_cached.set(max(best_height_cached.value, this_height, best_height)) return this_height - best_height_cached.value else: get_height_rel_highest = HeightTracker(best_block_func, factory, 5*net.SHARE_PERIOD*net.CHAIN_LENGTH/net.PARENT.BLOCK_PERIOD).get_height_rel_highest defer.returnValue(get_height_rel_highest)
gpl-3.0
satyarth934/root
interpreter/llvm/src/tools/clang/docs/analyzer/conf.py
20
8070
# -*- coding: utf-8 -*- # # Clang Static Analyzer documentation build configuration file, created by # sphinx-quickstart on Wed Jan 2 15:54:28 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os from datetime import date # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Clang Static Analyzer' copyright = u'2013-%d, Analyzer Team' % date.today().year # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '3.4' # The full version, including alpha/beta/rc tags. release = '3.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'haiku' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ClangStaticAnalyzerdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'ClangStaticAnalyzer.tex', u'Clang Static Analyzer Documentation', u'Analyzer Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'clangstaticanalyzer', u'Clang Static Analyzer Documentation', [u'Analyzer Team'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ClangStaticAnalyzer', u'Clang Static Analyzer Documentation', u'Analyzer Team', 'ClangStaticAnalyzer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
lgpl-2.1
rvalyi/geraldo
site/newsite/site-geraldo/appengine_django/management/commands/testserver.py
15
2414
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from appengine_django.db.base import destroy_datastore from appengine_django.db.base import get_test_datastore_paths from django.core.management.base import BaseCommand class Command(BaseCommand): """Overrides the default Django testserver command. Instead of starting the default Django development server this command fires up a copy of the full fledged appengine dev_appserver. The appserver is always initialised with a blank datastore with the specified fixtures loaded into it. """ help = 'Runs the development server with data from the given fixtures.' def run_from_argv(self, argv): fixtures = argv[2:] # Ensure an on-disk test datastore is used. from django.db import connection connection.use_test_datastore = True connection.test_datastore_inmemory = False # Flush any existing test datastore. connection.flush() # Load the fixtures. from django.core.management import call_command call_command('loaddata', 'initial_data') if fixtures: call_command('loaddata', *fixtures) # Build new arguments for dev_appserver. datastore_path, history_path = get_test_datastore_paths(False) new_args = argv[0:1] new_args.extend(['--datastore_path', datastore_path]) new_args.extend(['--history_path', history_path]) new_args.extend([os.getcwdu()]) # Add email settings from django.conf import settings new_args.extend(['--smtp_host', settings.EMAIL_HOST, '--smtp_port', str(settings.EMAIL_PORT), '--smtp_user', settings.EMAIL_HOST_USER, '--smtp_password', settings.EMAIL_HOST_PASSWORD]) # Start the test dev_appserver. from google.appengine.tools import dev_appserver_main dev_appserver_main.main(new_args)
lgpl-3.0
annarev/tensorflow
tensorflow/python/ops/linalg/inverse_registrations.py
18
9206
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registrations for LinearOperator.inverse.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_addition from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_block_diag from tensorflow.python.ops.linalg import linear_operator_block_lower_triangular from tensorflow.python.ops.linalg import linear_operator_circulant from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_full_matrix from tensorflow.python.ops.linalg import linear_operator_householder from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_inversion from tensorflow.python.ops.linalg import linear_operator_kronecker # By default, return LinearOperatorInversion which switched the .matmul # and .solve methods. @linear_operator_algebra.RegisterInverse(linear_operator.LinearOperator) def _inverse_linear_operator(linop): return linear_operator_inversion.LinearOperatorInversion( linop, is_non_singular=linop.is_non_singular, is_self_adjoint=linop.is_self_adjoint, is_positive_definite=linop.is_positive_definite, is_square=linop.is_square) @linear_operator_algebra.RegisterInverse( linear_operator_inversion.LinearOperatorInversion) def _inverse_inverse_linear_operator(linop_inversion): return linop_inversion.operator @linear_operator_algebra.RegisterInverse( linear_operator_diag.LinearOperatorDiag) def _inverse_diag(diag_operator): return linear_operator_diag.LinearOperatorDiag( 1. / diag_operator.diag, is_non_singular=diag_operator.is_non_singular, is_self_adjoint=diag_operator.is_self_adjoint, is_positive_definite=diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_identity.LinearOperatorIdentity) def _inverse_identity(identity_operator): return identity_operator @linear_operator_algebra.RegisterInverse( linear_operator_identity.LinearOperatorScaledIdentity) def _inverse_scaled_identity(identity_operator): return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=identity_operator._num_rows, # pylint: disable=protected-access multiplier=1. / identity_operator.multiplier, is_non_singular=identity_operator.is_non_singular, is_self_adjoint=True, is_positive_definite=identity_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_block_diag.LinearOperatorBlockDiag) def _inverse_block_diag(block_diag_operator): # We take the inverse of each block on the diagonal. return linear_operator_block_diag.LinearOperatorBlockDiag( operators=[ operator.inverse() for operator in block_diag_operator.operators], is_non_singular=block_diag_operator.is_non_singular, is_self_adjoint=block_diag_operator.is_self_adjoint, is_positive_definite=block_diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_block_lower_triangular.LinearOperatorBlockLowerTriangular) def _inverse_block_lower_triangular(block_lower_triangular_operator): """Inverse of LinearOperatorBlockLowerTriangular. We recursively apply the identity: ```none |A 0|' = | A' 0| |B C| |-C'BA' C'| ``` where `A` is n-by-n, `B` is m-by-n, `C` is m-by-m, and `'` denotes inverse. This identity can be verified through multiplication: ```none |A 0|| A' 0| |B C||-C'BA' C'| = | AA' 0| |BA'-CC'BA' CC'| = |I 0| |0 I| ``` Args: block_lower_triangular_operator: Instance of `LinearOperatorBlockLowerTriangular`. Returns: block_lower_triangular_operator_inverse: Instance of `LinearOperatorBlockLowerTriangular`, the inverse of `block_lower_triangular_operator`. """ if len(block_lower_triangular_operator.operators) == 1: return (linear_operator_block_lower_triangular. LinearOperatorBlockLowerTriangular( [[block_lower_triangular_operator.operators[0][0].inverse()]], is_non_singular=block_lower_triangular_operator.is_non_singular, is_self_adjoint=block_lower_triangular_operator.is_self_adjoint, is_positive_definite=(block_lower_triangular_operator. is_positive_definite), is_square=True)) blockwise_dim = len(block_lower_triangular_operator.operators) # Calculate the inverse of the `LinearOperatorBlockLowerTriangular` # representing all but the last row of `block_lower_triangular_operator` with # a recursive call (the matrix `A'` in the docstring definition). upper_left_inverse = ( linear_operator_block_lower_triangular.LinearOperatorBlockLowerTriangular( block_lower_triangular_operator.operators[:-1]).inverse()) bottom_row = block_lower_triangular_operator.operators[-1] bottom_right_inverse = bottom_row[-1].inverse() # Find the bottom row of the inverse (equal to `[-C'BA', C']` in the docstring # definition, where `C` is the bottom-right operator of # `block_lower_triangular_operator` and `B` is the set of operators in the # bottom row excluding `C`). To find `-C'BA'`, we first iterate over the # column partitions of `A'`. inverse_bottom_row = [] for i in range(blockwise_dim - 1): # Find the `i`-th block of `BA'`. blocks = [] for j in range(i, blockwise_dim - 1): result = bottom_row[j].matmul(upper_left_inverse.operators[j][i]) if not any(isinstance(result, op_type) for op_type in linear_operator_addition.SUPPORTED_OPERATORS): result = linear_operator_full_matrix.LinearOperatorFullMatrix( result.to_dense()) blocks.append(result) summed_blocks = linear_operator_addition.add_operators(blocks) assert len(summed_blocks) == 1 block = summed_blocks[0] # Find the `i`-th block of `-C'BA'`. block = bottom_right_inverse.matmul(block) block = linear_operator_identity.LinearOperatorScaledIdentity( num_rows=bottom_right_inverse.domain_dimension_tensor(), multiplier=math_ops.cast(-1, dtype=block.dtype)).matmul(block) inverse_bottom_row.append(block) # `C'` is the last block of the inverted linear operator. inverse_bottom_row.append(bottom_right_inverse) return ( linear_operator_block_lower_triangular.LinearOperatorBlockLowerTriangular( upper_left_inverse.operators + [inverse_bottom_row], is_non_singular=block_lower_triangular_operator.is_non_singular, is_self_adjoint=block_lower_triangular_operator.is_self_adjoint, is_positive_definite=(block_lower_triangular_operator. is_positive_definite), is_square=True)) @linear_operator_algebra.RegisterInverse( linear_operator_kronecker.LinearOperatorKronecker) def _inverse_kronecker(kronecker_operator): # Inverse decomposition of a Kronecker product is the Kronecker product # of inverse decompositions. return linear_operator_kronecker.LinearOperatorKronecker( operators=[ operator.inverse() for operator in kronecker_operator.operators], is_non_singular=kronecker_operator.is_non_singular, is_self_adjoint=kronecker_operator.is_self_adjoint, is_positive_definite=kronecker_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_circulant.LinearOperatorCirculant) def _inverse_circulant(circulant_operator): # Inverting the spectrum is sufficient to get the inverse. return linear_operator_circulant.LinearOperatorCirculant( spectrum=1. / circulant_operator.spectrum, is_non_singular=circulant_operator.is_non_singular, is_self_adjoint=circulant_operator.is_self_adjoint, is_positive_definite=circulant_operator.is_positive_definite, is_square=True, input_output_dtype=circulant_operator.dtype) @linear_operator_algebra.RegisterInverse( linear_operator_householder.LinearOperatorHouseholder) def _inverse_householder(householder_operator): return householder_operator
apache-2.0
lucidbard/curveship
fiction/artmaking.py
3
2031
'Artmaking, a tiny demonstration game for Curveship.' __author__ = 'Nick Montfort' __copyright__ = 'Copyright 2011 Nick Montfort' __license__ = 'ISC' __version__ = '0.5.0.0' from item_model import Actor, Room, Thing from action_model import Modify, Sense import can import when discourse = { 'metadata': { 'title': 'Artmaking', 'headline': 'A very simple example', 'people': [('by', 'Nick Montfort')], 'prologue': 'Settle for nothing less than an artistic breakthrough.'}, 'spin': {'commanded': '@artist', 'focalizer': '@artist', 'narratee': '@artist'}} initial_actions = [Sense('ogle', '@artist', direct='@studio', modality='sight')] class Art(Thing): '@sculpture is the only instance.' def react(self, world, basis): 'Win the game when smashed.' actions = [] if (basis.verb in ['kick', 'strike'] and basis.direct == str(self)): damage = Modify('puncture', basis.agent, direct=str(self), feature='intact', new=False) damage.after = """finally, a worthy contribution to the art world ... victory!""" damage.final = True actions = [damage] return actions items = [ Actor('@artist in @studio', article='the', called='artist', gender='female', allowed=can.possess_any_item, refuses=[('LEAVE way=(north|out)', when.always, '[@artist/s] [have/v] work to do')]), Room('@studio', article='the', called='studio', exits={}, sight='a bare studio space with a single exit, to the north'), Thing('@box in @studio', article='a', called='box', open=False, allowed=can.contain_and_support_things, sight='the medium-sized parcel [is/1/v] [open/@box/a]'), Art('@sculpture in @box', article='a', called='sculpture', intact=True, sight='a sculpture of a mountain, made to order in China')]
isc
zhuwenping/python-for-android
python3-alpha/python3-src/Lib/encodings/utf_16_be.py
860
1037
""" Python 'utf-16-be' Codec Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import codecs ### Codec APIs encode = codecs.utf_16_be_encode def decode(input, errors='strict'): return codecs.utf_16_be_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.utf_16_be_encode(input, self.errors)[0] class IncrementalDecoder(codecs.BufferedIncrementalDecoder): _buffer_decode = codecs.utf_16_be_decode class StreamWriter(codecs.StreamWriter): encode = codecs.utf_16_be_encode class StreamReader(codecs.StreamReader): decode = codecs.utf_16_be_decode ### encodings module API def getregentry(): return codecs.CodecInfo( name='utf-16-be', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
apache-2.0
timsavage/denim
tests/service/supervisor.py
1
1880
from denim.constants import RootUser from tests._utils import ApiTestCase from denim.service import supervisor class TestServiceSupervisor(ApiTestCase): def test_manager_start(self): supervisor.manager_start() self.assertSudo('/etc/init.d/supervisor start', user=RootUser) def test_manager_stop(self): supervisor.manager_stop() self.assertSudo('/etc/init.d/supervisor stop', user=RootUser) def test_manager_restart(self): supervisor.manager_restart() self.assertSudo('/etc/init.d/supervisor restart', user=RootUser) def test_manager_reload(self): supervisor.manager_reload() self.assertSudo('supervisorctl reload', user=RootUser) def test_start_default_service(self): supervisor.start() self.assertSudo('supervisorctl start test-project', user=RootUser) def test_start_custom_service(self): supervisor.start('bar') self.assertSudo('supervisorctl start bar', user=RootUser) def test_stop_default_service(self): supervisor.stop() self.assertSudo('supervisorctl stop test-project', user=RootUser) def test_stop_custom_service(self): supervisor.stop('bar') self.assertSudo('supervisorctl stop bar', user=RootUser) def test_restart_default_service(self): supervisor.restart() self.assertSudo('supervisorctl restart test-project', user=RootUser) def test_restart_custom_service(self): supervisor.restart('bar') self.assertSudo('supervisorctl restart bar', user=RootUser) def test_status_default_service(self): supervisor.status() self.assertSudo('supervisorctl status test-project', user=RootUser) def test_status_custom_service(self): supervisor.status('bar') self.assertSudo('supervisorctl status bar', user=RootUser)
bsd-2-clause
jrossyra/adaptivemd
adaptivemd/engine/openmm/openmm.py
1
10478
############################################################################## # adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD) # Simulations on HPC Resources # Copyright 2017 FU Berlin and the Authors # # Authors: Jan-Hendrik Prinz # Contributors: # # `adaptiveMD` is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## from __future__ import absolute_import import os import ujson # from adaptivemd.task import PythonTask from adaptivemd.file import Location, File from adaptivemd.engine import (Engine, Frame, Trajectory, TrajectoryGenerationTask, TrajectoryExtensionTask) exec_file = File('file://' + os.path.join(os.path.dirname(__file__), 'openmmrun.py')).load() class OpenMMEngine(Engine): """ OpenMM Engine to be used by Adaptive MD Attributes ---------- system_file : `File` the system.xml file for OpenMM integrator_file : `File` the integrator.xml file for OpenMM pdb_file : `File` the .pdb file for the topology args : str a list of arguments passed to the `openmmrun.py` script """ def __init__(self, system_file, integrator_file, pdb_file, args=None): super(OpenMMEngine, self).__init__() self._items = dict() self['pdb_file'] = pdb_file self['system_file'] = system_file self['integrator_file'] = integrator_file self['_executable_file'] = exec_file for name, f in self.files.items(): stage = f.transfer(Location('staging:///')) self[name + '_stage'] = stage.target self.initial_staging.append(stage) if args is None: args = '-p CPU' self.args = args @classmethod def from_dict(cls, dct): obj = super(OpenMMEngine, cls).from_dict(dct) obj.args = dct['args'] return obj def to_dict(self): dct = super(OpenMMEngine, self).to_dict() dct.update({ 'args': self.args}) return dct @staticmethod def then_func_import(project, task, data, inputs): for f in data: # check if file with same location exists if f not in project.files: project.files.update(f) def _create_output_str(self): d = dict() for name, opt in self.types.items(): d[name] = opt.to_dict() return '--types="%s"' % ujson.dumps(d).replace('"', "'") def run(self, target, resource_name=None, export_path=None, cpu_threads=1, gpu_contexts=0, mpi_rank=0): t = TrajectoryGenerationTask(self, target, cpu_threads=cpu_threads, gpu_contexts=gpu_contexts, mpi_rank=mpi_rank) if resource_name is None: resource_name = list() elif isinstance(resource_name, str): resource_name = [resource_name] assert isinstance(resource_name, list) t.resource_name = resource_name if export_path: t.append(export_path) initial_pdb = t.pre_link(self['pdb_file_stage'], Location('initial.pdb')) t.pre_link(self['system_file_stage']) t.pre_link(self['integrator_file_stage']) t.pre_link(self['_executable_file_stage']) if target.frame in [self['pdb_file'], self['pdb_file_stage']]: input_pdb = initial_pdb elif isinstance(target.frame, File): loc = Location('coordinates.%s' % target.frame.extension) input_pdb = t.get(target.frame, loc) elif isinstance(target.frame, Frame): input_traj = t.pre_link(target.frame.trajectory, 'source/') input_pdb = File('input.pdb') # frame index is in canonical stride = 1 # we need to figure out which frame in the traj this actually is # also, we need a traj with full coordinates / selection = None ty, idx = target.frame.index_in_outputs if ty is None: # cannot use a trajectory where we do not have full coordinates return t.pre.append('mdconvert -o {target} -i {index} -t {pdb} {source}'.format( target=input_pdb, # input.pdb is used as starting structure index=idx, # the index from the source trajectory pdb=initial_pdb, # use the main pdb source=input_traj.outputs(ty))) # we pick output ty else: # for now we assume that if the initial frame is None or # not specific use the engines internal. That should be changed # todo: Raise exception here return # this represents our output trajectory output = Trajectory('traj/', target.frame, length=target.length, engine=self) # create the directory t.touch(output) # TODO option for retry # TODO use filenames from engine retry = '\nj=0\ntries=10\nsleep=1\n' retry += '\ntrajfile=traj/protein.dcd\n\n' retry += 'while [ $j -le $tries ]; do if ! [ -s $trajfile ]; then {0}; fi; sleep 1; j=$((j+1)); done' cmd = 'python openmmrun.py {args} {types} -s {system} -i {integrator} -t {pdb} --length {length} {output}'.format( pdb=input_pdb, types=self._create_output_str(), length=target.length, system=self['system_file'].basename, integrator=self['integrator_file'].basename, output=output, args=self.args, ) cmd = retry.format(cmd) t.append(cmd) t.put(output, target) return t def extend(self, source, length, resource_name=None, export_path=None, cpu_threads=1, gpu_contexts=0, mpi_rank=0): if length < 0: return [] # create a new file, but with the same name, etc, just new length target = source.clone() target.length = len(source) + length t = TrajectoryExtensionTask(self, target, source, cpu_threads=cpu_threads, gpu_contexts=gpu_contexts, mpi_rank=mpi_rank, )#resource_name=resource_name, export_path=export_path) if resource_name is None: resource_name = list() elif isinstance(resource_name, str): resource_name = [resource_name] assert isinstance(resource_name, list) t.resource_name = resource_name if export_path: t.append(export_path) initial_pdb = t.link(self['pdb_file_stage'], Location('initial.pdb')) t.link(self['system_file_stage']) t.link(self['integrator_file_stage']) t.link(self['_executable_file_stage']) # this represents our output trajectory source_link = t.link(source, 'source/') extension = Trajectory( 'extension/', target.frame, length=target.length, engine=self) t.touch(extension) # TODO option for retry # TODO use filenames from engine retry = '\nj=0\ntries=10\nsleep=1\n' retry += '\ntrajfile=extension/protein.dcd\n\n' retry += 'while [ $j -le $tries ]; do if ! [ -s $trajfile ]; then {0}; fi; sleep 1; j=$((j+1)); done' cmd = ('python openmmrun.py {args} {types} -s {system} -i {integrator} --restart {restart} -t {pdb} ' '--length {length} {output}').format( pdb=initial_pdb, restart=source.file('restart.npz'), # todo: this is engine specific! length=target.length - source.length, system=self['system_file'].basename, integrator=self['integrator_file'].basename, output=extension, args=self.args, types=self._create_output_str() ) cmd = retry.format(cmd) t.append(cmd) # join both trajectories for all outputs for ty, desc in self.types.items(): # stride = desc['stride'] outname = ty + '.temp.dcd' t.post.append('mdconvert -o {output} {source} {extension}'.format( output=extension.file(outname), source=source_link.outputs(ty), extension=extension.outputs(ty) )) # rename joined extended.dcd into output.dcd t.post.append(extension.file(outname).move(extension.outputs(ty))) # now extension/ should contain all files as expected # move extended trajectory to target place (replace old) files # this will also register the new trajectory folder as existent t.post_put(extension, target) return t # def task_import_trajectory_folder(self, source): # t = PythonTask(self) # # t.link(self['pdb_file_stage'], Location('initial.pdb')) # t.call(scan_trajectories, source=source) # # # call `then_func_import` after success # t.then('then_func_import') # # return t # # # def scan_trajectories(source): # import glob # import mdtraj as md # # files = glob.glob(source) # # here = os.getcwd() # # reference_list = [] # for f in files: # # rel = os.path.relpath(f, here) # # if rel.startswith('../../../../'): # p = 'worker://' + os.path.abspath(f) # elif rel.startswith('../../../'): # p = 'shared://' + rel[8:] # elif rel.startswith('../../'): # p = 'sandbox://' + rel[5:] # else: # p = 'worker://' + os.path.abspath(f) # # # print f, rel, p # # traj = md.load(f, top='initial.pdb') # reference = Trajectory(p, None, len(traj)) # reference_list.append(reference) # # return reference_list
lgpl-2.1
robotframework/mabot
lib/robot/result/executionresult.py
3
2450
# Copyright 2008-2012 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement from robot.model import Statistics from robot.reporting.outputwriter import OutputWriter from .executionerrors import ExecutionErrors from .configurer import SuiteConfigurer from .testsuite import TestSuite class Result(object): """Contains results of test execution. :ivar source: Path to the xml file where results are read from. :ivar suite: Hierarchical :class:`~.testsuite.TestSuite` results. :ivar errors: Execution :class:`~.executionerrors.ExecutionErrors`. """ def __init__(self, source=None, root_suite=None, errors=None): self.source = source self.suite = root_suite or TestSuite() self.errors = errors or ExecutionErrors() self.generator = None self._status_rc = True self._stat_config = {} @property def statistics(self): """Test execution :class:`~robot.model.statistics.Statistics`.""" return Statistics(self.suite, **self._stat_config) @property def return_code(self): """Return code (integer) of test execution.""" if self._status_rc: return min(self.suite.statistics.critical.failed, 250) return 0 def configure(self, status_rc=True, suite_config={}, stat_config={}): SuiteConfigurer(**suite_config).configure(self.suite) self._status_rc = status_rc self._stat_config = stat_config def visit(self, visitor): visitor.visit_result(self) def save(self, path=None): self.visit(OutputWriter(path or self.source)) class CombinedResult(Result): def __init__(self, others): Result.__init__(self) for other in others: self.add_result(other) def add_result(self, other): self.suite.suites.append(other.suite) self.errors.add(other.errors)
apache-2.0
Fedik/gramps
gramps/gen/filters/rules/person/_nevermarried.py
5
1744
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from .. import Rule #------------------------------------------------------------------------- # "People with no marriage records" #------------------------------------------------------------------------- class NeverMarried(Rule): """People with no marriage records""" name = _('People with no marriage records') description = _("Matches people who have no spouse") category = _('Family filters') def apply(self,db,person): return len(person.get_family_handle_list()) == 0
gpl-2.0
elit3ge/SickRage
lib/requests/packages/chardet/codingstatemachine.py
2931
2318
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .constants import eStart from .compat import wrap_ord class CodingStateMachine: def __init__(self, sm): self._mModel = sm self._mCurrentBytePos = 0 self._mCurrentCharLen = 0 self.reset() def reset(self): self._mCurrentState = eStart def next_state(self, c): # for each byte we get its class # if it is first byte, we also get byte length # PY3K: aBuf is a byte stream, so c is an int, not a byte byteCls = self._mModel['classTable'][wrap_ord(c)] if self._mCurrentState == eStart: self._mCurrentBytePos = 0 self._mCurrentCharLen = self._mModel['charLenTable'][byteCls] # from byte's class and stateTable, we get its next state curr_state = (self._mCurrentState * self._mModel['classFactor'] + byteCls) self._mCurrentState = self._mModel['stateTable'][curr_state] self._mCurrentBytePos += 1 return self._mCurrentState def get_current_charlen(self): return self._mCurrentCharLen def get_coding_state_machine(self): return self._mModel['name']
gpl-3.0
jetskijoe/headphones
lib/beetsplug/embyupdate.py
1
4090
# -*- coding: utf-8 -*- """Updates the Emby Library whenever the beets library is changed. emby: host: localhost port: 8096 username: user password: password """ from __future__ import division, absolute_import, print_function from beets import config from beets.plugins import BeetsPlugin from six.moves.urllib.parse import urlencode from six.moves.urllib.parse import urljoin, parse_qs, urlsplit, urlunsplit import hashlib import requests def api_url(host, port, endpoint): """Returns a joined url. """ joined = urljoin('http://{0}:{1}'.format(host, port), endpoint) scheme, netloc, path, query_string, fragment = urlsplit(joined) query_params = parse_qs(query_string) query_params['format'] = ['json'] new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment)) def password_data(username, password): """Returns a dict with username and its encoded password. """ return { 'username': username, 'password': hashlib.sha1(password.encode('utf-8')).hexdigest(), 'passwordMd5': hashlib.md5(password.encode('utf-8')).hexdigest() } def create_headers(user_id, token=None): """Return header dict that is needed to talk to the Emby API. """ headers = { 'Authorization': 'MediaBrowser', 'UserId': user_id, 'Client': 'other', 'Device': 'empy', 'DeviceId': 'beets', 'Version': '0.0.0' } if token: headers['X-MediaBrowser-Token'] = token return headers def get_token(host, port, headers, auth_data): """Return token for a user. """ url = api_url(host, port, '/Users/AuthenticateByName') r = requests.post(url, headers=headers, data=auth_data) return r.json().get('AccessToken') def get_user(host, port, username): """Return user dict from server or None if there is no user. """ url = api_url(host, port, '/Users/Public') r = requests.get(url) user = [i for i in r.json() if i['Name'] == username] return user class EmbyUpdate(BeetsPlugin): def __init__(self): super(EmbyUpdate, self).__init__() # Adding defaults. config['emby'].add({ u'host': u'localhost', u'port': 8096 }) self.register_listener('database_change', self.listen_for_db_change) def listen_for_db_change(self, lib, model): """Listens for beets db change and register the update for the end. """ self.register_listener('cli_exit', self.update) def update(self, lib): """When the client exists try to send refresh request to Emby. """ self._log.info(u'Updating Emby library...') host = config['emby']['host'].get() port = config['emby']['port'].get() username = config['emby']['username'].get() password = config['emby']['password'].get() token = config['emby']['apikey'].get() # Get user information from the Emby API. user = get_user(host, port, username) if not user: self._log.warning(u'User {0} could not be found.'.format(username)) return if not token: # Create Authentication data and headers. auth_data = password_data(username, password) headers = create_headers(user[0]['Id']) # Get authentication token. token = get_token(host, port, headers, auth_data) if not token: self._log.warning( u'Could not get token for user {0}', username ) return # Recreate headers with a token. headers = create_headers(user[0]['Id'], token=token) # Trigger the Update. url = api_url(host, port, '/Library/Refresh') r = requests.post(url, headers=headers) if r.status_code != 204: self._log.warning(u'Update could not be triggered') else: self._log.info(u'Update triggered.')
gpl-3.0
eddyb/servo
tests/wpt/mozilla/tests/webgl/conformance-2.0.0/deqp/functional/gles3/uniformbuffers/uniformbuffers_test_generator.py
51
3333
#!/usr/bin/env python # Copyright (c) 2016 The Khronos Group Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and/or associated documentation files (the # "Materials"), to deal in the Materials without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Materials, and to # permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. """ Generator for uniformbuffers* tests. This file needs to be run in its folder. """ import sys _DO_NOT_EDIT_WARNING = """<!-- This file is auto-generated from uniformbuffers_test_generator.py DO NOT EDIT! --> """ _HTML_TEMPLATE = """<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>WebGL Uniform Block Conformance Tests</title> <link rel="stylesheet" href="../../../../resources/js-test-style.css"/> <script src="../../../../js/js-test-pre.js"></script> <script src="../../../../js/webgl-test-utils.js"></script> <script src="../../../../closure-library/closure/goog/base.js"></script> <script src="../../../deqp-deps.js"></script> <script>goog.require('functional.gles3.es3fUniformBlockTests');</script> </head> <body> <div id="description"></div> <div id="console"></div> <canvas id="canvas" width="200" height="100"> </canvas> <script> var wtu = WebGLTestUtils; var gl = wtu.create3DContext('canvas', null, 2); functional.gles3.es3fUniformBlockTests.run([%(start)s, %(end)s]); </script> </body> </html> """ _GROUPS = [ 'single_basic_type', 'single_basic_array', 'single_struct', 'single_struct_array', 'single_nested_struct', 'single_nested_struct_array', 'instance_array_basic_type', 'multi_basic_types', 'multi_nested_struct', 'random', ] def GenerateFilename(group): """Generate test filename.""" filename = group filename += ".html" return filename def WriteTest(filename, start, end): """Write one test.""" file = open(filename, "wb") file.write(_DO_NOT_EDIT_WARNING) file.write(_HTML_TEMPLATE % { 'start': start, 'end': end }) file.close def GenerateTests(): """Generate all tests.""" filelist = [] for ii in range(len(_GROUPS)): filename = GenerateFilename(_GROUPS[ii]) filelist.append(filename) WriteTest(filename, ii, ii + 1) return filelist def GenerateTestList(filelist): file = open("00_test_list.txt", "wb") file.write('\n'.join(filelist)) file.close def main(argv): """This is the main function.""" filelist = GenerateTests() GenerateTestList(filelist) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mpl-2.0
emop/task-worker
src/python/Sailing/src/sailing/webrobot/captcha/http_client.py
4
4971
# -*- coding: utf-8 -*- import urllib2, httplib, cookielib import StringIO import gzip from urlparse import urlparse import logging import socket import os import urllib import time import re # timeout in seconds timeout = 10 socket.setdefaulttimeout(timeout) class HTTPClient(object): def __init__(self, www_root=None): self.proxy = None self.last_url = None self.logger = logging.getLogger("HttpClient") #self.cookies = cookielib.MozillaCookieJar("http_cookie.txt") #self.cookie_handler = urllib2.HTTPCookieProcessor(self.cookies) def set_proxy(self, proxy, www_root=None): if proxy: self.proxy = urllib2.ProxyHandler(proxy) def _http_handlers(self): h = [] if self.proxy: h.append(self.proxy) return h def relative_path(self, url): if url.startswith("http:"): self.last_url = urlparse(url) elif url.startswith('/'): pass elif url.startswith('?'): pass else: pass return self.last_url.geturl() def download(self, url, save_as): self.logger.info("download:%s --> %s" % (url, save_as)) self.post(url, save_as) return '200' def post(self, url, save_as, data=None): if data and isinstance(data, dict): data = urllib.urlencode(data) elif data and os.path.isfile(data): fd = open(data, 'r') data = fd.read() fd.close() data = self._http_request(url, data) fd = open(save_as, "wb") fd.write(data) fd.close() return '200' def get(self, url): return self._http_request(url, None) def post_data(self, url, data, headers={}): if data and isinstance(data, dict): data = urllib.urlencode(data) elif data and os.path.isfile(data): fd = open(data, 'r') data = fd.read() fd.close() return self._http_request(url, data, headers) def _http_request(self, url, req_data, headers={}): #url = self.relative_path(url) data = None try: #httplib.HTTPConnection.debuglevel = 1 if req_data: request = urllib2.Request(url, req_data) else: request = urllib2.Request(url) request.add_header('Accept-encoding', 'gzip') for k, v in headers.iteritems(): request.add_header(k, v) opener = urllib2.build_opener(*self._http_handlers()) for i in range(3): try: f = opener.open(request, timeout=180) #print f encoding = f.headers.get('Content-Encoding') trans_encoding = f.headers.get('Transfer-Encoding') #self.logger.info("header:%s" % f.headers) if encoding and 'gzip' in encoding: #compresseddata = f.read() compresseddata = self._read_data(f, trans_encoding=='chunked') compressedstream = StringIO.StringIO(compresseddata) gzipper = gzip.GzipFile(fileobj=compressedstream) data = gzipper.read() gzipper.close() else: data = self._read_data(f, trans_encoding=='chunked') # f.read() f.close() break except Exception, e: if re.search(r"(timed out|reset by peer)", str(e)): self.logger.info("Time out, sleep 5 seconds then retry, url:%s" % url) time.sleep(5) else: raise #content_size = f.headers.get("Content-Length", 0) #if 'gzip' not in encoding and int(content_size) > 0 and int(content_size) != len(data): # raise Exception("Content size error:%s != %s" %(int(content_size), len(data))) #else: # #self.logger.info("Data read right:%s == %s" %(int(content_size), len(data))) # pass except urllib2.HTTPError, e: raise return data def _read_data(self, fd, chunked=False): data = fd.read() while chunked: ch = fd.read(1024) if not ch:break data += ch return data def close(self): self.logger.info("save cookies to 'http_cookie.txt'....") self.cookies.save("http_cookie.txt", True, True)
bsd-2-clause
shanthanpalle/myRIOTEx
tests/nativenet/tests/01-tests.py
28
1196
#! /usr/bin/env python import sys from pexpect import spawn if __name__ == "__main__": try: if len(sys.argv) > 1: expected_received = int(sys.argv[1]) else: expected_received = 5 except TypeError: sys.stderr.write("Usage: "+str(sys.argv[0])+" [<expected 'received'>]\n") sys.exit(1) receiver = spawn("bin/native/nativenet.elf tap0") sender = spawn("bin/native/nativenet_sender.elf tap1") receiver.expect(r"main\(\): initializing transceiver") receiver.expect(r"main\(\): starting transceiver") receiver.expect(r"main\(\): starting radio thread") receiver.expect("Start receiving") receiver.expect(r"\[nativenet\] trying to set address \d+") sender.expect(r"main\(\): initializing transceiver") sender.expect(r"main\(\): starting transceiver") sender.expect(r"\[nativenet\] trying to set address \d+") sender.expect("Start sending packets") while expected_received > 0: receiver.expect("received") expected_received -= 1 if not sender.terminate(): sender.terminate(force=True) if not receiver.terminate(): receiver.terminate(force=True)
lgpl-2.1
colpain/NeuralDecoding
config/global_config.py
1
1910
import os, json cur_dir = os.path.dirname(__file__) project_root = os.path.join(cur_dir, '..') CONFIG_PATH = os.path.join(project_root, 'config') PIPELINE_CONFIG_PATH = os.path.join(CONFIG_PATH, 'experiment_config.json') class _pipeline_config(object): def __init__(self): self._config = json.load(open(PIPELINE_CONFIG_PATH, 'r')) self.__model_name = self.__get_required('model_name') self.__hyper_parameter = self.__get_required('hyper_parameter') self.__mode = self.__get_not_required('mode', '1') self.__output_path = os.path.join(project_root, 'results') @property def MODEL_NAME(self): return self.__model_name @MODEL_NAME.setter def MODEL_NAME(self, *arg, **kwargs): raise Exception("Set property outside the class scope is prohibited") @property def HYPER_PARAMETER(self): return self.__hyper_parameter @HYPER_PARAMETER.setter def HYPER_PARAMETER(self, *arg, **kwargs): raise Exception("Set property outside the class scope is prohibited") @property def MODE(self): return self.__mode @MODE.setter def MODE(self, *arg, **kwargs): raise Exception("Set property outside the class scope is prohibited") @property def OUTPUT_FOLDER_PATH(self): return self.__output_path @OUTPUT_FOLDER_PATH.setter def OUTPUT_FOLDER_PATH(self, *arg, **kwargs): raise Exception("Set property outside the class scope is prohibited") def __get_not_required(self, key, default): return self._config.get(key, default) def __get_required(self, tag): ret = self._config.get(tag, None) if not ret: raise Exception(self.__warn_msg(tag)) return ret def __warn_msg(self, tag): return "{0} is not set, please set {0} in config file".format(tag) PIPELINE_CONFIG = _pipeline_config()
apache-2.0
OLAPLINE/TM1py
Tests/Hierarchy.py
1
15829
import configparser from pathlib import Path import unittest from TM1py import Element from TM1py.Objects import Dimension, Hierarchy, Subset from TM1py.Services import TM1Service config = configparser.ConfigParser() config.read(Path(__file__).parent.joinpath('config.ini')) DIMENSION_PREFIX = 'TM1py_Tests_Hierarchy_' DIMENSION_NAME = DIMENSION_PREFIX + "Some_Name" SUBSET_NAME = DIMENSION_PREFIX + "Some_Subset" class TestHierarchyMethods(unittest.TestCase): tm1 = None @classmethod def setup_class(cls): cls.tm1 = TM1Service(**config['tm1srv01']) @classmethod def teardown_class(cls): cls.tm1.logout() @classmethod def setUp(cls): cls.create_dimension() cls.create_subset() @classmethod def tearDown(cls): cls.delete_dimension() @classmethod def create_dimension(cls): dimension = Dimension(DIMENSION_NAME) hierarchy = Hierarchy(name=DIMENSION_NAME, dimension_name=DIMENSION_NAME) hierarchy.add_element('Total Years', 'Consolidated') hierarchy.add_element('No Year', 'Numeric') hierarchy.add_element('1989', 'Numeric') hierarchy.add_element("My Element", "Numeric") hierarchy.add_element_attribute('Previous Year', 'String') hierarchy.add_element_attribute('Next Year', 'String') hierarchy.add_edge('Total Years', '1989', 2) dimension.add_hierarchy(hierarchy) cls.tm1.dimensions.create(dimension) @classmethod def delete_dimension(cls): cls.tm1.dimensions.delete(DIMENSION_NAME) @classmethod def create_subset(cls): s = Subset(SUBSET_NAME, DIMENSION_NAME, DIMENSION_NAME, expression="{{[{}].Members}}".format(DIMENSION_NAME)) cls.tm1.dimensions.subsets.create(s, False) def add_other_hierarchy(self): dimension = self.tm1.dimensions.get(DIMENSION_NAME) # other hierarchy hierarchy = Hierarchy(name="Other Hierarchy", dimension_name=DIMENSION_NAME) hierarchy.add_element('Other Total Years', 'Consolidated') hierarchy.add_element('No Year', 'Numeric') hierarchy.add_element('1989', 'Numeric') hierarchy.add_element("Element With ' in the name", "Numeric") hierarchy.add_element_attribute('Previous Year', 'String') hierarchy.add_element_attribute('Next Year', 'String') hierarchy.add_edge('Other Total Years', '1989', 2) dimension.add_hierarchy(hierarchy) self.tm1.dimensions.update(dimension) def add_balanced_hierarchy(self, hierarchy_name): dimension = self.tm1.dimensions.get(DIMENSION_NAME) # other hierarchy hierarchy = Hierarchy(name=hierarchy_name, dimension_name=DIMENSION_NAME) hierarchy.add_element("Total Years Balanced", "Consolidated") hierarchy.add_element('1989', 'Numeric') hierarchy.add_element('1990', 'Numeric') hierarchy.add_element('1991', 'Numeric') hierarchy.add_edge("Total Years Balanced", "1989", 1) hierarchy.add_edge("Total Years Balanced", "1990", 1) hierarchy.add_edge("Total Years Balanced", "1991", 1) dimension.add_hierarchy(hierarchy) self.tm1.dimensions.update(dimension) def update_hierarchy(self): d = self.tm1.dimensions.get(dimension_name=DIMENSION_NAME) h = d.default_hierarchy # Edit Elements and Edges for year in range(2010, 2021, 1): parent = str(year) h.add_element(parent, 'Consolidated') h.add_edge('Total Years', parent, 1) for month in ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'): component = '{}-{}'.format(year, month) h.add_element(component, 'Numeric') h.add_edge(parent, component, 1) # Edit Element Attributes h.add_element_attribute('Name Long', 'Alias') h.add_element_attribute('Name Short', 'Alias') h.add_element_attribute('Days', 'Numeric') # Remove attribute h.remove_element_attribute('Next Year') # Remove Edge h.remove_edge('Total Years', '1989') # Update Edge h.update_edge('Total Years', '2011', 2) # Update_element h.update_element('No Year', 'String') self.tm1.dimensions.update(d) def test_get_hierarchy(self): h = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) self.assertIn('Total Years', h.elements.keys()) self.assertIn('No Year', h.elements.keys()) self.assertIn('1989', h.elements.keys()) self.assertIn('Next Year', [ea.name for ea in h.element_attributes]) self.assertIn('Previous Year', [ea.name for ea in h.element_attributes]) self.assertIn(SUBSET_NAME, h.subsets) def test_hierarchy___get__(self): h = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) element = h["Total Years"] self.assertIsInstance(element, Element) self.assertEqual(element.name, "Total Years") self.assertEqual(element.element_type, Element.Types.CONSOLIDATED) element = h["Total Years".replace(" ", "").lower()] self.assertIsInstance(element, Element) self.assertEqual(element.name, "Total Years") self.assertEqual(element.element_type, Element.Types.CONSOLIDATED) element = h["1989"] self.assertIsInstance(element, Element) self.assertEqual(element.name, "1989") self.assertEqual(element.element_type, Element.Types.NUMERIC) self.assertNotEqual(element.element_type, Element.Types.STRING) def test_hierarchy___get__exception(self): h = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) try: _ = h["Im not a valid year"] raise Exception("did not throw Exception when expected to do so") except ValueError: pass def test_hierarchy___contains__(self): h = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) self.assertIn("1989", h) self.assertIn("Total Years", h) self.assertIn("Total Years".replace(" ", "").lower(), h) self.assertIn("1 9 8 9 ", h) self.assertNotIn("3001", h) def test_hierarchy___iter__(self): h = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) elements_cloned_through_iter = [element for element in h] self.assertEqual(len(h._elements), len(elements_cloned_through_iter)) for element in elements_cloned_through_iter: self.assertIn(element.name, h.elements) def test_hierarchy___len__(self): h = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) self.assertGreater(len(h), 0) self.assertEqual(len(h), len(h._elements)) def test_update_hierarchy(self): self.update_hierarchy() # Check if update works d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertIn('2010-Jan', h.elements.keys()) self.assertIn('2020-Dec', h.elements.keys()) self.assertNotIn('Next Year', [ea.name for ea in h.element_attributes]) self.assertIn('Previous Year', [ea.name for ea in h.element_attributes]) self.assertIn('Days', [ea.name for ea in h.element_attributes]) self.assertIn('Name Long', [ea.name for ea in h.element_attributes]) self.assertEqual(h.edges[('Total Years', '2011')], 2) self.assertEqual(h.elements['No Year'].element_type, Element.Types.STRING) summary = self.tm1.dimensions.hierarchies.get_hierarchy_summary(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(summary["Elements"], 147) self.assertEqual(summary["Edges"], 143) self.assertEqual(summary["Members"], 147) self.assertEqual(summary["ElementAttributes"], 4) self.assertEqual(summary["Levels"], 3) def test_update_hierarchy_remove_c_element(self): self.update_hierarchy() d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertIn('2011', h.elements) self.assertIn(('2011', '2011-Jan'), h.edges) h.remove_element('2011') self.tm1.dimensions.hierarchies.update(h) d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertNotIn('2011', h.elements) self.assertNotIn(('2011', '2011-Jan'), h.edges) def test_update_hierarchy_remove_n_element(self): self.update_hierarchy() d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertIn('2011-Jan', h.elements) self.assertIn(('2011', '2011-Jan'), h.edges) h.remove_element('2011-Jan') self.tm1.dimensions.hierarchies.update(h) d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertNotIn('2011-Jan', h.elements) self.assertNotIn(('2011', '2011-Jan'), h.edges) def test_update_hierarchy_remove_s_element(self): self.update_hierarchy() d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertIn('No Year', h.elements) h.remove_element('No Year') self.tm1.dimensions.hierarchies.update(h) d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertNotIn('No Year', h.elements) def test_update_hierarchy_remove_edges_related_to_element(self): self.update_hierarchy() d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertIn('2012', h.elements) h.remove_edges_related_to_element(element_name='2012 ') self.tm1.dimensions.hierarchies.update(h) d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertIn('2012', h.elements) self.assertNotIn(('2012', '2012- Jan'), h.edges) self.assertNotIn(('2012', '2012-DEC'), h.edges) self.assertNotIn(('TotalYears', '2012'), h.edges) self.assertIn(('Total YEARS', '2011'), h.edges) self.assertIn(('Total Years', '2013'), h.edges) def test_update_hierarchy_remove_edges(self): self.update_hierarchy() d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertIn('2012', h.elements) self.assertIn(('2012', '2012-Jan'), h.edges) self.assertIn(('2012', '2012-Feb'), h.edges) self.assertIn(('2012', '2012-Mar'), h.edges) self.assertIn(('2012', '2012-Apr'), h.edges) edges = [('2012', '2012- Jan'), ('2012', '2012-Feb'), ('2012', '2012-MAR'), ('2012', '2012-Apr')] h.remove_edges(edges=edges) self.tm1.dimensions.hierarchies.update(h) d = self.tm1.dimensions.get(DIMENSION_NAME) h = d.default_hierarchy self.assertNotIn(('2012', '2012-Jan'), h.edges) self.assertNotIn(('2012', '2012-Feb'), h.edges) self.assertNotIn(('2012', '2012-Mar'), h.edges) self.assertNotIn(('2012', '2012-Apr'), h.edges) self.assertNotIn(('2012', '2012 - JAN'), h.edges) self.assertIn(('2012', '2012-May'), h.edges) self.assertIn('2012', h.elements) self.assertIn('2012-Feb', h.elements) def test_hierarchy_summary(self): summary = self.tm1.dimensions.hierarchies.get_hierarchy_summary(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(summary["Elements"], 4) self.assertEqual(summary["Edges"], 1) self.assertEqual(summary["Members"], 4) self.assertEqual(summary["ElementAttributes"], 2) self.assertEqual(summary["Levels"], 2) def test_get_default_member(self): default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(default_member, "Total Years") def test_get_default_member_for_leaves_hierarchy(self): self.add_other_hierarchy() default_member = self.tm1.dimensions.hierarchies.get_default_member( dimension_name=DIMENSION_NAME, hierarchy_name="Leaves") self.assertEqual(default_member, "No Year") def test_update_default_member(self): default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(default_member, "Total Years") self.tm1.dimensions.hierarchies.update_default_member(DIMENSION_NAME, DIMENSION_NAME, member_name="1989") default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(default_member, "1989") def test_update_default_member_skip_hierarchy_name_argument(self): default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME) self.assertEqual(default_member, "Total Years") self.tm1.dimensions.hierarchies.update_default_member(dimension_name=DIMENSION_NAME, member_name="1989") default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME) self.assertEqual(default_member, "1989") def test_update_default_member_for_alternate_hierarchy(self): self.add_other_hierarchy() default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, "Other Hierarchy") self.assertEqual(default_member, "Other Total Years") self.tm1.dimensions.hierarchies.update_default_member(DIMENSION_NAME, DIMENSION_NAME, member_name="1989") default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(default_member, "1989") def test_update_default_member_for_leaves_hierarchy(self): self.add_other_hierarchy() default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, "Leaves") self.assertEqual(default_member, "No Year") self.tm1.dimensions.hierarchies.update_default_member(DIMENSION_NAME, DIMENSION_NAME, member_name="1989") default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(default_member, "1989") def test_update_default_member_with_invalid_value(self): default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(default_member, "Total Years") self.tm1.dimensions.hierarchies.update_default_member( DIMENSION_NAME, DIMENSION_NAME, member_name="I am not a valid Member") default_member = self.tm1.dimensions.hierarchies.get_default_member(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(default_member, "Total Years") def test_remove_all_edges(self): hierarchy = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) self.assertGreater(len(hierarchy.edges), 0) self.tm1.dimensions.hierarchies.remove_all_edges(DIMENSION_NAME, DIMENSION_NAME) hierarchy = self.tm1.dimensions.hierarchies.get(DIMENSION_NAME, DIMENSION_NAME) self.assertEqual(len(hierarchy.edges), 0) def test_is_balanced_false(self): is_balanced = self.tm1.dimensions.hierarchies.is_balanced(DIMENSION_NAME, DIMENSION_NAME) self.assertFalse(is_balanced) def test_is_balanced_true(self): balanced_hierarchy_name = "Balanced Hierarchy" self.add_balanced_hierarchy(balanced_hierarchy_name) is_balanced = self.tm1.dimensions.hierarchies.is_balanced(DIMENSION_NAME, balanced_hierarchy_name) self.assertTrue(is_balanced) if __name__ == '__main__': unittest.main()
mit
opoplawski/fedmsg_meta_fedora_infrastructure
fedmsg_meta_fedora_infrastructure/tests/mm2.py
5
8036
# This file is part of fedmsg. # Copyright (C) 2012 Red Hat, Inc. # # fedmsg is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # fedmsg is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with fedmsg; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Authors: Ralph Bean <rbean@redhat.com> # """ Tests for mirrormanager2 messages """ import unittest from fedmsg_meta_fedora_infrastructure.tests.base import Base from .common import add_doc class TestCrawlerStart(Base): """ `Mirrormanager <https://mirrors.fedoraproject.org>`_ is the system that (surprise) manages the mirror network for Fedora (and others). To accomplish this task, it does lots of things. One of those things is to regularly crawl the list of mirrors and determine who has up-to-date content and who has stale content. *This* message is one that gets published by its backend crawler when it **starts** its task. """ expected_title = "mirrormanager.crawler.start" expected_subti = "mirrormanager's crawler started a crawl of 1 mirrors" expected_link = "https://mirrors.fedoraproject.org" expected_icon = "https://apps.fedoraproject.org/img/icons/downloads.png" expected_secondary_icon = expected_icon expected_packages = set([]) expected_usernames = set([]) expected_objects = set(['mirrors/mirror.fpt-telecom.fpt.net']) msg = { "username": "apache", "i": 3, "timestamp": 1412327834, "msg_id": "2014-1f52337a-8dbe-48f1-baad-f2c770c60640", "crypto": "x509", "topic": "org.fedoraproject.prod.mirrormanager.crawler.start", "msg": { "hosts": [{ "comment": "Mirror FPT Telecom", "name": "mirror.fpt-telecom.fpt.net", "internet2": True, "asn_clients": True, "country": "VN", "admin_active": True, "bandwidth_int": 1000, "site": { "id": 1043, "name": "mirror.fpt-telecom.fpt.net" }, "private": False, "last_crawled": 1374957440.0, "internet2_clients": False, "id": 1432, "user_active": False, "last_checked_in": None, "last_crawl_duration": 9, "asn": None, "max_connections": 1 }] } } class TestCrawlerComplete(Base): """ `Mirrormanager <https://mirrors.fedoraproject.org>`_ is the system that (surprise) manages the mirror network for Fedora (and others). To accomplish this task, it does lots of things. One of those things is to regularly crawl the list of mirrors and determine who has up-to-date content and who has stale content. *This* message is one that gets published by its backend crawler when it **completes** its task. """ expected_title = "mirrormanager.crawler.complete" expected_subti = "mirrormanager's crawler finished a crawl of " + \ "1 mirrors (1 succeeded, 0 failed)" expected_link = "https://mirrors.fedoraproject.org" expected_icon = "https://apps.fedoraproject.org/img/icons/downloads.png" expected_secondary_icon = expected_icon expected_packages = set([]) expected_usernames = set([]) expected_objects = set(['mirrors/mirror.fpt-telecom.fpt.net']) msg = { "username": "apache", "i": 3, "timestamp": 1412327834, "msg_id": "2014-1f52337a-8dbe-48f1-baad-f2c770c60640", "crypto": "x509", "topic": "org.fedoraproject.prod.mirrormanager.crawler.complete", "msg": { "results": [{ "rc": 0, "host": { "comment": "Mirror FPT Telecom", "name": "mirror.fpt-telecom.fpt.net", "internet2": True, "asn_clients": True, "country": "VN", "admin_active": True, "bandwidth_int": 1000, "site": { "id": 1043, "name": "mirror.fpt-telecom.fpt.net" }, "private": False, "last_crawled": 1374957440.0, "internet2_clients": False, "id": 1432, "user_active": False, "last_checked_in": None, "last_crawl_duration": 9, "asn": None, "max_connections": 1 }, }] } } class TestNetblocksSuccess(Base): """ `Mirrormanager <https://mirrors.fedoraproject.org>`_ is the system that (surprise) manages the mirror network for Fedora (and others). To accomplish this task, it does lots of things. One of those things is to pull in mappings of ASN numbers from publicly accessible BGP tables. It uses this information as part of a larger process to try and route clients to mirrors that are close to them. *This* message is one that gets published by a backend cronjob when it **successfully rebuilds one of its netblocks tables**. """ expected_title = "mirrormanager.netblocks.get" expected_subti = "mirrormanager's backend successfully updated its " + \ "global netblocks file" expected_link = "https://mirrors.fedoraproject.org" expected_icon = "https://apps.fedoraproject.org/img/icons/downloads.png" expected_secondary_icon = expected_icon expected_packages = set([]) expected_usernames = set([]) expected_objects = set(['netblocks/global']) msg = { "username": "apache", "i": 3, "timestamp": 1412327834, "msg_id": "2014-1f52337a-8dbe-48f1-baad-f2c770c60640", "crypto": "x509", "topic": "org.fedoraproject.prod.mirrormanager.netblocks.get", "msg": { "type": "global", "success": True, } } class TestNetblocksFailure(Base): """ `Mirrormanager <https://mirrors.fedoraproject.org>`_ is the system that (surprise) manages the mirror network for Fedora (and others). To accomplish this task, it does lots of things. One of those things is to pull in mappings of ASN numbers from publicly accessible BGP tables. It uses this information as part of a larger process to try and route clients to mirrors that are close to them. *This* message is one that gets published by a backend cronjob when it **fails to rebuild one of its netblocks tables**. """ expected_title = "mirrormanager.netblocks.get" expected_subti = "mirrormanager's backend failed to update its " + \ "internet2 netblocks file" expected_link = "https://mirrors.fedoraproject.org" expected_icon = "https://apps.fedoraproject.org/img/icons/downloads.png" expected_secondary_icon = expected_icon expected_packages = set([]) expected_usernames = set([]) expected_objects = set(['netblocks/internet2']) msg = { "username": "apache", "i": 3, "timestamp": 1412327834, "msg_id": "2014-1f52337a-8dbe-48f1-baad-f2c770c60640", "crypto": "x509", "topic": "org.fedoraproject.prod.mirrormanager.netblocks.get", "msg": { "type": "internet2", "success": False, } } add_doc(locals()) if __name__ == '__main__': unittest.main()
lgpl-2.1
openmotics/gateway
src/gateway/migrations/orm/013_add_config.py
1
1498
# Copyright (C) 2020 OpenMotics BV # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from peewee import ( Model, Database, SqliteDatabase, AutoField, CharField, IntegerField ) from peewee_migrate import Migrator import constants if False: # MYPY from typing import Dict, Any def migrate(migrator, database, fake=False, **kwargs): # type: (Migrator, Database, bool, Dict[Any, Any]) -> None class BaseModel(Model): class Meta: database = SqliteDatabase(constants.get_gateway_database_file(), pragmas={'foreign_keys': 1}) class Config(BaseModel): id = AutoField() setting = CharField(unique=True) data = CharField() migrator.create_model(Config) def rollback(migrator, database, fake=False, **kwargs): # type: (Migrator, Database, bool, Dict[Any, Any]) -> None pass
agpl-3.0
harshita-gupta/Harvard-FRSEM-Catalog-2016-17
flask/lib/python2.7/site-packages/Crypto/Random/OSRNG/fallback.py
138
1520
# # Random/OSRNG/fallback.py : Fallback entropy source for systems with os.urandom # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== __revision__ = "$Id$" __all__ = ['PythonOSURandomRNG'] import os from rng_base import BaseRNG class PythonOSURandomRNG(BaseRNG): name = "<os.urandom>" def __init__(self): self._read = os.urandom BaseRNG.__init__(self) def _close(self): self._read = None def new(*args, **kwargs): return PythonOSURandomRNG(*args, **kwargs) # vim:set ts=4 sw=4 sts=4 expandtab:
mit
nishantjr/pjproject
tests/pjsua/scripts-recvfrom/231_reg_bad_fail_stale_false_nonce_changed.py
42
1562
# $Id$ import inc_sip as sip import inc_sdp as sdp # In this test we simulate broken server, where: # - it wants to signal that NONCE has change # - but it sets stale=false # For this case pjsip will retry authentication until # PJSIP_MAX_STALE_COUNT is exceeded. # pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \ "--realm=python --user=username --password=password" req1 = sip.RecvfromTransaction("Initial request", 401, include=["REGISTER sip"], exclude=["Authorization"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""] ) req2 = sip.RecvfromTransaction("First retry", 401, include=["REGISTER sip", "Authorization", "nonce=\"1\""], exclude=["Authorization:[\\s\\S]+Authorization:"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"] ) req3 = sip.RecvfromTransaction("Second retry retry", 401, include=["REGISTER sip", "Authorization", "nonce=\"2\""], exclude=["Authorization:[\\s\\S]+Authorization:"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"3\", stale=true"] ) req4 = sip.RecvfromTransaction("Third retry", 401, include=["REGISTER sip", "Authorization", "nonce=\"3\""], exclude=["Authorization:[\\s\\S]+Authorization:"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"4\", stale=true"], expect="PJSIP_EAUTHSTALECOUNT" ) recvfrom_cfg = sip.RecvfromCfg("Failed registration retry (server rejects with stale=true) ", pjsua, [req1, req2, req3, req4])
gpl-2.0
pecan/pecan
pecan/__init__.py
4
5028
from .core import ( abort, override_template, Pecan, Request, Response, load_app, redirect, render, request, response ) from .decorators import expose from .hooks import RequestViewerHook from .middleware.debug import DebugMiddleware from .middleware.errordocument import ErrorDocumentMiddleware from .middleware.recursive import RecursiveMiddleware from .middleware.static import StaticFileMiddleware from .routing import route from .configuration import set_config, Config from .configuration import _runtime_conf as conf from . import middleware try: from logging.config import dictConfig as load_logging_config except ImportError: from logutils.dictconfig import dictConfig as load_logging_config # noqa import warnings __all__ = [ 'make_app', 'load_app', 'Pecan', 'Request', 'Response', 'request', 'response', 'override_template', 'expose', 'conf', 'set_config', 'render', 'abort', 'redirect', 'route' ] def make_app(root, **kw): ''' Utility for creating the Pecan application object. This function should generally be called from the ``setup_app`` function in your project's ``app.py`` file. :param root: A string representing a root controller object (e.g., "myapp.controller.root.RootController") :param static_root: The relative path to a directory containing static files. Serving static files is only enabled when debug mode is set. :param debug: A flag to enable debug mode. This enables the debug middleware and serving static files. :param wrap_app: A function or middleware class to wrap the Pecan app. This must either be a wsgi middleware class or a function that returns a wsgi application. This wrapper is applied first before wrapping the application in other middlewares such as Pecan's debug middleware. This should be used if you want to use middleware to perform authentication or intercept all requests before they are routed to the root controller. :param logging: A dictionary used to configure logging. This uses ``logging.config.dictConfig``. All other keyword arguments are passed in to the Pecan app constructor. :returns: a ``Pecan`` object. ''' # Pass logging configuration (if it exists) on to the Python logging module logging = kw.get('logging', {}) debug = kw.get('debug', False) if logging: if debug: try: # # By default, Python 2.7+ silences DeprecationWarnings. # However, if conf.app.debug is True, we should probably ensure # that users see these types of warnings. # from logging import captureWarnings captureWarnings(True) warnings.simplefilter("default", DeprecationWarning) except ImportError: # No captureWarnings on Python 2.6, DeprecationWarnings are on pass if isinstance(logging, Config): logging = logging.to_dict() if 'version' not in logging: logging['version'] = 1 load_logging_config(logging) # Instantiate the WSGI app by passing **kw onward app = Pecan(root, **kw) # Optionally wrap the app in another WSGI app wrap_app = kw.get('wrap_app', None) if wrap_app: app = wrap_app(app) # Configuration for serving custom error messages errors = kw.get('errors', getattr(conf.app, 'errors', {})) if errors: app = middleware.errordocument.ErrorDocumentMiddleware(app, errors) # Included for internal redirect support app = middleware.recursive.RecursiveMiddleware(app) # When in debug mode, load exception debugging middleware static_root = kw.get('static_root', None) if debug: debug_kwargs = getattr(conf, 'debug', {}) debug_kwargs.setdefault('context_injectors', []).append( lambda environ: { 'request': environ.get('pecan.locals', {}).get('request') } ) app = DebugMiddleware( app, **debug_kwargs ) # Support for serving static files (for development convenience) if static_root: app = middleware.static.StaticFileMiddleware(app, static_root) elif static_root: warnings.warn( "`static_root` is only used when `debug` is True, ignoring", RuntimeWarning ) if hasattr(conf, 'requestviewer'): warnings.warn(''.join([ "`pecan.conf.requestviewer` is deprecated. To apply the ", "`RequestViewerHook` to your application, add it to ", "`pecan.conf.app.hooks` or manually in your project's `app.py` ", "file."]), DeprecationWarning ) return app
bsd-3-clause
printedheart/h2o-3
py2/testdir_single_jvm/test_parse_nfs.py
20
2678
import unittest, sys sys.path.extend(['.','..','../..','py']) import os import h2o2 as h2o import h2o_cmd, h2o_import as h2i, h2o_browse as h2b from h2o_test import find_file, dump_json, verboseprint expectedZeros = [0 for i in range(11)] CAUSE_FAIL = False def assertEqualMsg(a, b): assert a == b, "%s %s" % (a, b) def parseKeyIndexedCheck(frames_result, multiplyExpected): # get the name of the frame? print "" frame = frames_result['frames'][0] rows = frame['rows'] columns = frame['columns'] for i,c in enumerate(columns): label = c['label'] stype = c['type'] missing = c['missing_count'] zeros = c['zero_count'] domain = c['domain'] print "column: %s label: %s type: %s missing: %s zeros: %s domain: %s" %\ (i,label,stype,missing,zeros,domain) # files are concats of covtype. so multiply expected # assertEqualMsg(zeros, expectedZeros[i] * multiplyExpected) # assertEqualMsg(label,"C%s" % (i+1)) # assertEqualMsg(stype,"int") assertEqualMsg(missing, 0) # assertEqualMsg(domain, None) class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init() @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_parse_nfs(self): print "run as user 0xcustomer on machine with nfs /mnt/0xcustomer-datasets/c1" tryList = [ ('iris2.csv', 'iris2.hex', 1, 30), ] for (csvFilename, hex_key, multiplyExpected, timeoutSecs) in tryList: importFolderPath = "/mnt/0xcustomer-datasets/c1" csvPathname = importFolderPath + "/" + csvFilename parseResult = h2i.import_parse(path=csvPathname, schema='local', timeoutSecs=timeoutSecs, hex_key=hex_key, chunk_size=4194304/2, doSummary=False) pA = h2o_cmd.ParseObj(parseResult) iA = h2o_cmd.InspectObj(pA.parse_key, expectedNumRows=150*multiplyExpected, expectedNumCols=5, expectedMissinglist=[]) print iA.missingList, iA.labelList, iA.numRows, iA.numCols for i in range(0): print "Summary on column", i co = h2o_cmd.runSummary(key=hex_key, column=i) k = parseResult['frames'][0]['frame_id']['name'] frames_result = h2o.nodes[0].frames(key=k, row_count=5) # print "frames_result from the first parseResult key", dump_json(frames_result) parseKeyIndexedCheck(frames_result, multiplyExpected) if __name__ == '__main__': h2o.unit_main()
apache-2.0
ojake/django
django/core/files/locks.py
725
3516
""" Portable file locking utilities. Based partially on an example by Jonathan Feignberg in the Python Cookbook [1] (licensed under the Python Software License) and a ctypes port by Anatoly Techtonik for Roundup [2] (license [3]). [1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203 [2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py [3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt Example Usage:: >>> from django.core.files import locks >>> with open('./file', 'wb') as f: ... locks.lock(f, locks.LOCK_EX) ... f.write('Django') """ import os __all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock') def _fd(f): """Get a filedescriptor from something which could be a file or an fd.""" return f.fileno() if hasattr(f, 'fileno') else f if os.name == 'nt': import msvcrt from ctypes import (sizeof, c_ulong, c_void_p, c_int64, Structure, Union, POINTER, windll, byref) from ctypes.wintypes import BOOL, DWORD, HANDLE LOCK_SH = 0 # the default LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK # --- Adapted from the pyserial project --- # detect size of ULONG_PTR if sizeof(c_ulong) != sizeof(c_void_p): ULONG_PTR = c_int64 else: ULONG_PTR = c_ulong PVOID = c_void_p # --- Union inside Structure by stackoverflow:3480240 --- class _OFFSET(Structure): _fields_ = [ ('Offset', DWORD), ('OffsetHigh', DWORD)] class _OFFSET_UNION(Union): _anonymous_ = ['_offset'] _fields_ = [ ('_offset', _OFFSET), ('Pointer', PVOID)] class OVERLAPPED(Structure): _anonymous_ = ['_offset_union'] _fields_ = [ ('Internal', ULONG_PTR), ('InternalHigh', ULONG_PTR), ('_offset_union', _OFFSET_UNION), ('hEvent', HANDLE)] LPOVERLAPPED = POINTER(OVERLAPPED) # --- Define function prototypes for extra safety --- LockFileEx = windll.kernel32.LockFileEx LockFileEx.restype = BOOL LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED] UnlockFileEx = windll.kernel32.UnlockFileEx UnlockFileEx.restype = BOOL UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED] def lock(f, flags): hfile = msvcrt.get_osfhandle(_fd(f)) overlapped = OVERLAPPED() ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped)) return bool(ret) def unlock(f): hfile = msvcrt.get_osfhandle(_fd(f)) overlapped = OVERLAPPED() ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped)) return bool(ret) else: try: import fcntl LOCK_SH = fcntl.LOCK_SH # shared lock LOCK_NB = fcntl.LOCK_NB # non-blocking LOCK_EX = fcntl.LOCK_EX except (ImportError, AttributeError): # File locking is not supported. LOCK_EX = LOCK_SH = LOCK_NB = 0 # Dummy functions that don't do anything. def lock(f, flags): # File is not locked return False def unlock(f): # File is unlocked return True else: def lock(f, flags): ret = fcntl.flock(_fd(f), flags) return (ret == 0) def unlock(f): ret = fcntl.flock(_fd(f), fcntl.LOCK_UN) return (ret == 0)
bsd-3-clause
hwu25/AppPkg
Applications/Python/Python-2.7.2/Lib/distutils/command/install_headers.py
59
1397
"""distutils.command.install_headers Implements the Distutils 'install_headers' command, to install C/C++ header files to the Python include directory.""" __revision__ = "$Id$" from distutils.core import Command # XXX force is never used class install_headers(Command): description = "install C/C++ header files" user_options = [('install-dir=', 'd', "directory to install header files to"), ('force', 'f', "force installation (overwrite existing files)"), ] boolean_options = ['force'] def initialize_options(self): self.install_dir = None self.force = 0 self.outfiles = [] def finalize_options(self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) def run(self): headers = self.distribution.headers if not headers: return self.mkpath(self.install_dir) for header in headers: (out, _) = self.copy_file(header, self.install_dir) self.outfiles.append(out) def get_inputs(self): return self.distribution.headers or [] def get_outputs(self): return self.outfiles # class install_headers
bsd-2-clause
google/skywater-pdk-libs-sky130_fd_io
docs/conf.py
1
13864
# -*- coding: utf-8 -*- # Copyright 2020 SkyWater PDK Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import docutils import os import re # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'SkyWater SKY130 PDK' copyright = '2020, SkyWater PDK Authors' author = 'SkyWater PDK Authors' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags release = '' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel', 'sphinx.ext.githubpages', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinxcontrib_hdl_diagrams', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # Enable github links when not on readthedocs on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: html_context = { "display_github": True, # Integrate GitHub "github_user": "google", # Username "github_repo": "skywater-pdk", # Repo name "github_version": "master", # Version "conf_py_path": "/doc/", } else: docs_dir = os.path.abspath(os.path.dirname(__file__)) print("Docs dir is:", docs_dir) import subprocess subprocess.call('git fetch origin --unshallow', cwd=docs_dir, shell=True) subprocess.check_call('git fetch origin --tags', cwd=docs_dir, shell=True) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = re.sub('^v', '', os.popen('git describe ').read().strip()) # The short X.Y version. version = release # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ '_build', 'env', 'Thumbs.db', '.DS_Store', # Files included in other rst files. 'code-of-conduct.rst', 'rules/periphery-rules.rst', 'rules/device-details/*/index.rst', ] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # Prefix each section label with the name of the document it is in, followed by # a colon. For example, index:Introduction for a section called Introduction # that appears in document index.rst. Useful for avoiding ambiguity when the # same section heading appears in different documents. #autosectionlabel_prefix_document = True # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_symbiflow_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { # Specify a list of menu in Header. # Tuples forms: # ('Name', 'external url or path of pages in the document', boolean, 'icon name') # # Third argument: # True indicates an external link. # False indicates path of pages in the document. # # Fourth argument: # Specify the icon name. # For details see link. # https://material.io/icons/ 'header_links': [ ('Home', 'index', False, 'home'), ("GitHub", "https://github.com/google/skywater-pdk", True, 'code'), ("SkyWater", "https://www.skywatertechnology.com/", True, 'link'), ], # Customize css colors. # For details see link. # https://getmdl.io/customize/index.html # # Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue, # light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo) 'primary_color': 'light_green', # Values: Same as primary_color. (Default: pink) 'accent_color': 'teal', # Customize layout. # For details see link. # https://getmdl.io/components/index.html#layout-section 'fixed_drawer': True, 'fixed_header': True, 'header_waterfall': True, 'header_scroll': False, # Render title in header. # Values: True, False (Default: False) 'show_header_title': False, # Render title in drawer. # Values: True, False (Default: True) 'show_drawer_title': True, # Render footer. # Values: True, False (Default: True) 'show_footer': True, # Hide the symbiflow links 'hide_symbiflow_links': True, 'license_url' : 'https://www.apache.org/licenses/LICENSE-2.0', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'skywater-pdk-doc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( # source start file master_doc, # target name 'skywater-pdk.tex', # title 'SkyWater SKY130 PDK Documentation', # author author, # document class 'manual', ), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'skywater-pdk', 'SkyWater SKY130 PDK Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( # source start file master_doc, # target name 'skywater-pdk', # title 'SkyWater SKY130 PDK Documentation', # author author, # dir menu entry 'SkyWater SKY130 PDK', # description 'Documentation for open source PDK targetting SkyWater SKY130 process node.', # category 'Miscellaneous', ), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # Enable the figures and numbers numfig = True # -- Extension configuration ------------------------------------------------- # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True import re from docutils.parsers.rst import directives, roles, nodes LIB_REGEX = re.compile('sky130_(?P<lib_src>[^_\s]*)_(?P<lib_type>[^_\s]*)(_(?P<lib_name>[^_\s]*))?') CELL_REGEX = re.compile('sky130_(?P<lib_src>[^_\s]*)_(?P<lib_type>[^_\s]*)(_(?P<lib_name>[^_\s]*))?__(?P<cell_name>[^\s]*)') def lib_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Library name which gets colorized.""" m = LIB_REGEX.match(text) if not m: msg = inliner.reporter.error("Malformed library name of "+repr(text), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] app = inliner.document.settings.env.app #lib_process_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) #lib_src_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) #lib_type_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) #lib_name_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) lib_process = 'sky130' lib_src = m.group('lib_src') lib_type = m.group('lib_type') lib_name = m.group('lib_name') r = [ nodes.inline(lib_process, lib_process, classes=['lib-process']), nodes.inline('_', '_', options=options), nodes.inline(lib_src, lib_src, classes=['lib-src']), nodes.inline('_', '_', options=options), nodes.inline(lib_type, lib_type, classes=['lib-type']), ] if lib_name: r.append(nodes.inline('_', '_', options=options)) r.append(nodes.inline(lib_name, lib_name, classes=['lib-name'])) return r, [] def cell_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Cell name which gets colorized.""" m = CELL_REGEX.match(text) if not m: msg = inliner.reporter.error("Malformed cell name of "+repr(text), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] app = inliner.document.settings.env.app #lib_process_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) #lib_src_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) #lib_type_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) #lib_name_role = roles.role('lib_src', inliner.language, lineno, inliner.reporter) lib_process = 'sky130' lib_src = m.group('lib_src') lib_type = m.group('lib_type') lib_name = m.group('lib_name') cell_name = m.group('cell_name') r = [ nodes.inline(lib_process, lib_process, classes=['lib-process']), nodes.inline('_', '_', options=options), nodes.inline(lib_src, lib_src, classes=['lib-src']), nodes.inline('_', '_', options=options), nodes.inline(lib_type, lib_type, classes=['lib-type']), ] if lib_name: r.append(nodes.inline('_', '_', options=options)) r.append(nodes.inline(lib_name, lib_name, classes=['lib-name'])) r.append(nodes.inline('__', '__', options=options)) r.append(nodes.inline(cell_name, cell_name, classes=['cell-name'])) return r, [] def add_role(app, new_role_name): options = { 'class': directives.class_option(new_role_name), } role = roles.CustomRole(new_role_name, roles.generic_custom_role, options, "") app.add_role(new_role_name, role) def setup(app): app.add_css_file('extra.css') add_role(app, 'cell_name') add_role(app, 'lib_process') add_role(app, 'lib_src') add_role(app, 'lib_type') add_role(app, 'lib_name') add_role(app, 'drc_rule') add_role(app, 'drc_tag') add_role(app, 'drc_flag') add_role(app, 'layer') app.add_role('lib', lib_role) app.add_role('cell', cell_role) app.add_role('model', cell_role)
apache-2.0
jphnoel/udata
udata/search/fields.py
1
10369
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging import re from datetime import date from bson.objectid import ObjectId from elasticsearch_dsl import Q, A from elasticsearch_dsl.faceted_search import ( Facet as DSLFacet, TermsFacet as DSLTermsFacet, RangeFacet as DSLRangeFacet, DateHistogramFacet as DSLDateHistogramFacet, ) from flask_restplus import inputs from udata.i18n import lazy_gettext as _, format_date from udata.models import db from udata.utils import to_bool log = logging.getLogger(__name__) __all__ = ( 'BoolFacet', 'TermsFacet', 'ModelTermsFacet', 'RangeFacet', 'TemporalCoverageFacet', 'BoolBooster', 'FunctionBooster', 'GaussDecay', 'ExpDecay', 'LinearDecay', ) ES_NUM_FAILURES = '-Infinity', 'Infinity', 'NaN', None RE_TIME_COVERAGE = re.compile(r'\d{4}-\d{2}-\d{2}-\d{4}-\d{2}-\d{2}') OR_SEPARATOR = '|' OR_LABEL = _('OR') class Facet(object): def __init__(self, **kwargs): super(Facet, self).__init__(**kwargs) self.labelizer = self._params.pop('labelizer', None) def labelize(self, value): labelize = self.labelizer or self.default_labelizer if isinstance(value, basestring): return ' {0} '.format(OR_LABEL).join( str(labelize(v)) for v in value.split(OR_SEPARATOR) ) return labelize(value) def default_labelizer(self, value): return str(value) def as_request_parser_kwargs(self): return {'type': str} def validate_parameter(self, value): return value def get_value_filter(self, value): self.validate_parameter(value) # Might trigger a double validation return super(Facet, self).get_value_filter(value) class TermsFacet(Facet, DSLTermsFacet): def add_filter(self, filter_values): """Improve the original one to deal with OR cases.""" field = self._params['field'] # Build a `AND` query on values wihtout the OR operator. # and a `OR` query for each value containing the OR operator. filters = [ Q('bool', should=[ Q('term', **{field: v}) for v in value.split(OR_SEPARATOR) ]) if OR_SEPARATOR in value else Q('term', **{field: value}) for value in filter_values ] return Q('bool', must=filters) if len(filters) > 1 else filters[0] class BoolFacet(Facet, DSLFacet): agg_type = 'terms' def get_values(self, data, filter_values): return [ (to_bool(key), doc_count, selected) for (key, doc_count, selected) in super(BoolFacet, self).get_values(data, filter_values) ] def get_value_filter(self, filter_value): boolean = to_bool(filter_value) q = Q('term', **{self._params['field']: True}) return q if boolean else ~q def default_labelizer(self, value): return str(_('yes') if to_bool(value) else _('no')) def as_request_parser_kwargs(self): return {'type': inputs.boolean} class ModelTermsFacet(TermsFacet): def __init__(self, field, model, labelizer=None, field_name='id'): super(ModelTermsFacet, self).__init__(field=field, labelizer=labelizer) self.model = model self.field_name = field_name @property def model_field(self): return getattr(self.model, self.field_name) def get_values(self, data, filter_values): """ Turn the raw bucket data into a list of tuples containing the object, number of documents and a flag indicating whether this value has been selected or not. """ values = super(ModelTermsFacet, self).get_values(data, filter_values) ids = [key for (key, doc_count, selected) in values] # Perform a model resolution: models are feched from DB # We use model field to cast IDs ids = map(self.model_field.to_mongo, ids) objects = self.model.objects.in_bulk(ids) return [ (objects.get(self.model_field.to_mongo(key)), doc_count, selected) for (key, doc_count, selected) in values ] def default_labelizer(self, value): if not isinstance(value, self.model): self.validate_parameter(value) id = self.model_field.to_mongo(value) value = self.model.objects.get(id=id) return str(value) def validate_parameter(self, value): if isinstance(value, ObjectId): return value try: return [ self.model_field.to_mongo(v) for v in value.split(OR_SEPARATOR) ] except Exception: raise ValueError('"{0}" is not valid identifier'.format(value)) class RangeFacet(Facet, DSLRangeFacet): ''' A Range facet with splited keys and labels. This separation allows: - readable keys (without spaces and special chars) in URLs (front and API) - lazily localizable labels (without changing API by language) ''' def __init__(self, **kwargs): super(RangeFacet, self).__init__(**kwargs) self.labels = self._params.pop('labels', {}) if len(self.labels) != len(self._ranges): raise ValueError('Missing some labels') for key in self.labels.keys(): if key not in self._ranges: raise ValueError('Unknown label key {0}'.format(key)) def get_value_filter(self, filter_value): ''' Fix here until upstream PR is merged https://github.com/elastic/elasticsearch-dsl-py/pull/473 ''' self.validate_parameter(filter_value) f, t = self._ranges[filter_value] limits = {} # lt and gte to ensure non-overlapping ranges if f is not None: limits['gte'] = f if t is not None: limits['lt'] = t return Q('range', **{ self._params['field']: limits }) def get_values(self, data, filter_values): return [ (key, count, selected) for key, count, selected in super(RangeFacet, self).get_values(data, filter_values) if count ] def default_labelizer(self, value): self.validate_parameter(value) return self.labels.get(value, value) def as_request_parser_kwargs(self): return {'type': self.validate_parameter, 'choices': self.labels.keys()} def validate_parameter(self, value): if value not in self.labels: raise ValueError('Unknown range key: {0}'.format(value)) return value def get_value(data, name): wrapper = getattr(data, name, {}) return getattr(wrapper, 'value') class TemporalCoverageFacet(Facet, DSLFacet): agg_type = 'nested' def parse_value(self, value): parts = value.split('-') start = date(*map(int, parts[0:3])) end = date(*map(int, parts[3:6])) return start, end def default_labelizer(self, value): self.validate_parameter(value) start, end = self.parse_value(value) return ' - '.join((format_date(start, 'short'), format_date(end, 'short'))) def get_aggregation(self): field = self._params['field'] a = A('nested', path=field) a.metric('min_start', 'min', field='{0}.start'.format(field)) a.metric('max_end', 'max', field='{0}.end'.format(field)) return a def get_value_filter(self, value): self.validate_parameter(value) field = self._params['field'] start, end = self.parse_value(value) range_start = Q({'range': {'{0}.start'.format(field): { 'lte': max(start, end).toordinal(), }}}) range_end = Q({'range': {'{0}.end'.format(field): { 'gte': min(start, end).toordinal(), }}}) return Q('nested', path=field, query=range_start & range_end) def get_values(self, data, filter_values): field = self._params['field'] min_value = get_value(data, 'min_start'.format(field)) max_value = get_value(data, 'max_end'.format(field)) if not (min_value and max_value): return None return { 'min': date.fromordinal(int(min_value)), 'max': date.fromordinal(int(max_value)), 'days': max_value - min_value, } def validate_parameter(self, value): if not RE_TIME_COVERAGE.match(value): msg = '"{0}" does not match YYYY-MM-DD-YYYY-MM-DD'.format(value) raise ValueError(msg) return True def as_request_parser_kwargs(self): return { 'type': self.validate_parameter, 'help': _('A date range expressed as start-end ' 'where both dates are in iso format ' '(ie. YYYY-MM-DD-YYYY-MM-DD)') } class BoolBooster(object): def __init__(self, field, factor): self.field = field self.factor = factor def to_query(self): return { 'filter': {'term': {self.field: True}}, 'boost_factor': self.factor, } class FunctionBooster(object): def __init__(self, function): self.function = function def to_query(self): return { 'script_score': { 'script': self.function, }, } def _v(value): '''Call value if necessary''' return value() if callable(value) else value class DecayFunction(object): function = None def __init__(self, field, origin, scale=None, offset=None, decay=None): self.field = field self.origin = origin self.scale = scale or origin self.offset = offset self.decay = decay def to_query(self): params = { 'origin': _v(self.origin), 'scale': _v(self.scale), } if self.offset: params['offset'] = _v(self.offset) if self.decay: params['decay'] = _v(self.decay) return { self.function: { self.field: params }, } class GaussDecay(DecayFunction): function = 'gauss' class ExpDecay(DecayFunction): function = 'exp' class LinearDecay(DecayFunction): function = 'linear'
agpl-3.0
mdeemer/XlsxWriter
xlsxwriter/test/comparison/test_outline06.py
8
2936
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'outline06.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels'] self.ignore_elements = {} def test_create_file(self): """ Test the creation of a outlines in a XlsxWriter file. These tests are based on the outline programs in the examples directory. """ workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet('Outlined Rows') worksheet1.outline_settings(0, 0, 0, 1) bold = workbook.add_format({'bold': 1}) worksheet1.set_row(1, None, None, {'level': 2}) worksheet1.set_row(2, None, None, {'level': 2}) worksheet1.set_row(3, None, None, {'level': 2}) worksheet1.set_row(4, None, None, {'level': 2}) worksheet1.set_row(5, None, None, {'level': 1}) worksheet1.set_row(6, None, None, {'level': 2}) worksheet1.set_row(7, None, None, {'level': 2}) worksheet1.set_row(8, None, None, {'level': 2}) worksheet1.set_row(9, None, None, {'level': 2}) worksheet1.set_row(10, None, None, {'level': 1}) worksheet1.set_column('A:A', 20) worksheet1.write('A1', 'Region', bold) worksheet1.write('A2', 'North') worksheet1.write('A3', 'North') worksheet1.write('A4', 'North') worksheet1.write('A5', 'North') worksheet1.write('A6', 'North Total', bold) worksheet1.write('B1', 'Sales', bold) worksheet1.write('B2', 1000) worksheet1.write('B3', 1200) worksheet1.write('B4', 900) worksheet1.write('B5', 1200) worksheet1.write('B6', '=SUBTOTAL(9,B2:B5)', bold, 4300) worksheet1.write('A7', 'South') worksheet1.write('A8', 'South') worksheet1.write('A9', 'South') worksheet1.write('A10', 'South') worksheet1.write('A11', 'South Total', bold) worksheet1.write('B7', 400) worksheet1.write('B8', 600) worksheet1.write('B9', 500) worksheet1.write('B10', 600) worksheet1.write('B11', '=SUBTOTAL(9,B7:B10)', bold, 2100) worksheet1.write('A12', 'Grand Total', bold) worksheet1.write('B12', '=SUBTOTAL(9,B2:B10)', bold, 6400) workbook.close() self.assertExcelEqual()
bsd-2-clause
rheise/python-suds-tz
suds/xsd/doctor.py
204
6308
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ The I{doctor} module provides classes for fixing broken (sick) schema(s). """ from logging import getLogger from suds.sax import splitPrefix, Namespace from suds.sax.element import Element from suds.plugin import DocumentPlugin, DocumentContext log = getLogger(__name__) class Doctor: """ Schema Doctor. """ def examine(self, root): """ Examine and repair the schema (if necessary). @param root: A schema root element. @type root: L{Element} """ pass class Practice(Doctor): """ A collection of doctors. @ivar doctors: A list of doctors. @type doctors: list """ def __init__(self): self.doctors = [] def add(self, doctor): """ Add a doctor to the practice @param doctor: A doctor to add. @type doctor: L{Doctor} """ self.doctors.append(doctor) def examine(self, root): for d in self.doctors: d.examine(root) return root class TnsFilter: """ Target Namespace filter. @ivar tns: A list of target namespaces. @type tns: [str,...] """ def __init__(self, *tns): """ @param tns: A list of target namespaces. @type tns: [str,...] """ self.tns = [] self.add(*tns) def add(self, *tns): """ Add I{targetNamesapces} to be added. @param tns: A list of target namespaces. @type tns: [str,...] """ self.tns += tns def match(self, root, ns): """ Match by I{targetNamespace} excluding those that are equal to the specified namespace to prevent adding an import to itself. @param root: A schema root. @type root: L{Element} """ tns = root.get('targetNamespace') if len(self.tns): matched = ( tns in self.tns ) else: matched = 1 itself = ( ns == tns ) return ( matched and not itself ) class Import: """ An <xs:import/> to be applied. @cvar xsdns: The XSD namespace. @type xsdns: (p,u) @ivar ns: An import namespace. @type ns: str @ivar location: An optional I{schemaLocation}. @type location: str @ivar filter: A filter used to restrict application to a particular schema. @type filter: L{TnsFilter} """ xsdns = Namespace.xsdns def __init__(self, ns, location=None): """ @param ns: An import namespace. @type ns: str @param location: An optional I{schemaLocation}. @type location: str """ self.ns = ns self.location = location self.filter = TnsFilter() def setfilter(self, filter): """ Set the filter. @param filter: A filter to set. @type filter: L{TnsFilter} """ self.filter = filter def apply(self, root): """ Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element} """ if not self.filter.match(root, self.ns): return if self.exists(root): return node = Element('import', ns=self.xsdns) node.set('namespace', self.ns) if self.location is not None: node.set('schemaLocation', self.location) log.debug('inserting: %s', node) root.insert(node) def add(self, root): """ Add an <xs:import/> to the specified schema root. @param root: A schema root. @type root: L{Element} """ node = Element('import', ns=self.xsdns) node.set('namespace', self.ns) if self.location is not None: node.set('schemaLocation', self.location) log.debug('%s inserted', node) root.insert(node) def exists(self, root): """ Check to see if the <xs:import/> already exists in the specified schema root by matching I{namesapce}. @param root: A schema root. @type root: L{Element} """ for node in root.children: if node.name != 'import': continue ns = node.get('namespace') if self.ns == ns: return 1 return 0 class ImportDoctor(Doctor, DocumentPlugin): """ Doctor used to fix missing imports. @ivar imports: A list of imports to apply. @type imports: [L{Import},...] """ def __init__(self, *imports): """ """ self.imports = [] self.add(*imports) def add(self, *imports): """ Add a namesapce to be checked. @param imports: A list of L{Import} objects. @type imports: [L{Import},..] """ self.imports += imports def examine(self, node): for imp in self.imports: imp.apply(node) def parsed(self, context): node = context.document # xsd root if node.name == 'schema' and Namespace.xsd(node.namespace()): self.examine(node) return # look deeper context = DocumentContext() for child in node: context.document = child self.parsed(context)
lgpl-3.0
waterponey/scikit-learn
sklearn/datasets/species_distributions.py
13
7866
""" ============================= Species distribution dataset ============================= This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References: * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes: * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset """ # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Jake Vanderplas <vanderplas@astro.washington.edu> # # License: BSD 3 clause from io import BytesIO from os import makedirs from os.path import exists try: # Python 2 from urllib2 import urlopen PY2 = True except ImportError: # Python 3 from urllib.request import urlopen PY2 = False import numpy as np from sklearn.datasets.base import get_data_home, Bunch from sklearn.datasets.base import _pkl_filepath from sklearn.externals import joblib DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/" SAMPLES_URL = DIRECTORY_URL + "samples.zip" COVERAGES_URL = DIRECTORY_URL + "coverages.zip" DATA_ARCHIVE_NAME = "species_coverage.pkz" def _load_coverage(F, header_length=6, dtype=np.int16): """Load a coverage file from an open file object. This will return a numpy array of the given dtype """ header = [F.readline() for i in range(header_length)] make_tuple = lambda t: (t.split()[0], float(t.split()[1])) header = dict([make_tuple(line) for line in header]) M = np.loadtxt(F, dtype=dtype) nodata = int(header[b'NODATA_value']) if nodata != -9999: M[nodata] = -9999 return M def _load_csv(F): """Load csv file. Parameters ---------- F : file object CSV file open in byte mode. Returns ------- rec : np.ndarray record array representing the data """ if PY2: # Numpy recarray wants Python 2 str but not unicode names = F.readline().strip().split(',') else: # Numpy recarray wants Python 3 str but not bytes... names = F.readline().decode('ascii').strip().split(',') rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4') rec.dtype.names = names return rec def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) def fetch_species_distributions(data_home=None, download_if_missing=True): """Loader for species distribution dataset from Phillips et. al. (2006) Read more in the :ref:`User Guide <datasets>`. Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns -------- The data is returned as a Bunch object with the following attributes: coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1623,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (619,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees Notes ------ This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes ----- * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset with scikit-learn """ data_home = get_data_home(data_home) if not exists(data_home): makedirs(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict(x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05) dtype = np.int16 archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME) if not exists(archive_path): print('Downloading species data from %s to %s' % (SAMPLES_URL, data_home)) X = np.load(BytesIO(urlopen(SAMPLES_URL).read())) for f in X.files: fhandle = BytesIO(X[f]) if 'train' in f: train = _load_csv(fhandle) if 'test' in f: test = _load_csv(fhandle) print('Downloading coverage data from %s to %s' % (COVERAGES_URL, data_home)) X = np.load(BytesIO(urlopen(COVERAGES_URL).read())) coverages = [] for f in X.files: fhandle = BytesIO(X[f]) print(' - converting', f) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, archive_path, compress=9) else: bunch = joblib.load(archive_path) return bunch
bsd-3-clause
prefetchnta/questlab
bin/python/Lib/doctest.py
5
104492
# Module doctest. # Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). # Major enhancements and refactoring by: # Jim Fulton # Edward Loper # Provided as-is; use at your own risk; no warranty; no promises; enjoy! r"""Module doctest -- a framework for running examples in docstrings. In simplest use, end each module M to be tested with: def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test() Then running the module as a script will cause the examples in the docstrings to get executed and verified: python M.py This won't display anything unless an example fails, in which case the failing example(s) and the cause(s) of the failure(s) are printed to stdout (why not stderr? because stderr is a lame hack <0.2 wink>), and the final line of output is "Test failed.". Run it with the -v switch instead: python M.py -v and a detailed report of all examples tried is printed to stdout, along with assorted summaries at the end. You can force verbose mode by passing "verbose=True" to testmod, or prohibit it by passing "verbose=False". In either of those cases, sys.argv is not examined by testmod. There are a variety of other ways to run doctests, including integration with the unittest framework, and support for running non-Python text files containing doctests. There are also many ways to override parts of doctest's default behaviors. See the Library Reference Manual for details. """ __docformat__ = 'reStructuredText en' __all__ = [ # 0, Option Flags 'register_optionflag', 'DONT_ACCEPT_TRUE_FOR_1', 'DONT_ACCEPT_BLANKLINE', 'NORMALIZE_WHITESPACE', 'ELLIPSIS', 'SKIP', 'IGNORE_EXCEPTION_DETAIL', 'COMPARISON_FLAGS', 'REPORT_UDIFF', 'REPORT_CDIFF', 'REPORT_NDIFF', 'REPORT_ONLY_FIRST_FAILURE', 'REPORTING_FLAGS', 'FAIL_FAST', # 1. Utility Functions # 2. Example & DocTest 'Example', 'DocTest', # 3. Doctest Parser 'DocTestParser', # 4. Doctest Finder 'DocTestFinder', # 5. Doctest Runner 'DocTestRunner', 'OutputChecker', 'DocTestFailure', 'UnexpectedException', 'DebugRunner', # 6. Test Functions 'testmod', 'testfile', 'run_docstring_examples', # 7. Unittest Support 'DocTestSuite', 'DocFileSuite', 'set_unittest_reportflags', # 8. Debugging Support 'script_from_examples', 'testsource', 'debug_src', 'debug', ] import __future__ import argparse import difflib import inspect import linecache import os import pdb import re import sys import traceback import unittest from io import StringIO from collections import namedtuple TestResults = namedtuple('TestResults', 'failed attempted') # There are 4 basic classes: # - Example: a <source, want> pair, plus an intra-docstring line number. # - DocTest: a collection of examples, parsed from a docstring, plus # info about where the docstring came from (name, filename, lineno). # - DocTestFinder: extracts DocTests from a given object's docstring and # its contained objects' docstrings. # - DocTestRunner: runs DocTest cases, and accumulates statistics. # # So the basic picture is: # # list of: # +------+ +---------+ +-------+ # |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| # +------+ +---------+ +-------+ # | Example | # | ... | # | Example | # +---------+ # Option constants. OPTIONFLAGS_BY_NAME = {} def register_optionflag(name): # Create a new flag unless `name` is already known. return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') ELLIPSIS = register_optionflag('ELLIPSIS') SKIP = register_optionflag('SKIP') IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | DONT_ACCEPT_BLANKLINE | NORMALIZE_WHITESPACE | ELLIPSIS | SKIP | IGNORE_EXCEPTION_DETAIL) REPORT_UDIFF = register_optionflag('REPORT_UDIFF') REPORT_CDIFF = register_optionflag('REPORT_CDIFF') REPORT_NDIFF = register_optionflag('REPORT_NDIFF') REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') FAIL_FAST = register_optionflag('FAIL_FAST') REPORTING_FLAGS = (REPORT_UDIFF | REPORT_CDIFF | REPORT_NDIFF | REPORT_ONLY_FIRST_FAILURE | FAIL_FAST) # Special string markers for use in `want` strings: BLANKLINE_MARKER = '<BLANKLINE>' ELLIPSIS_MARKER = '...' ###################################################################### ## Table of Contents ###################################################################### # 1. Utility Functions # 2. Example & DocTest -- store test cases # 3. DocTest Parser -- extracts examples from strings # 4. DocTest Finder -- extracts test cases from objects # 5. DocTest Runner -- runs test cases # 6. Test Functions -- convenient wrappers for testing # 7. Unittest Support # 8. Debugging Support # 9. Example Usage ###################################################################### ## 1. Utility Functions ###################################################################### def _extract_future_flags(globs): """ Return the compiler-flags associated with the future features that have been imported into the given namespace (globs). """ flags = 0 for fname in __future__.all_feature_names: feature = globs.get(fname, None) if feature is getattr(__future__, fname): flags |= feature.compiler_flag return flags def _normalize_module(module, depth=2): """ Return the module specified by `module`. In particular: - If `module` is a module, then return module. - If `module` is a string, then import and return the module with that name. - If `module` is None, then return the calling module. The calling module is assumed to be the module of the stack frame at the given depth in the call stack. """ if inspect.ismodule(module): return module elif isinstance(module, str): return __import__(module, globals(), locals(), ["*"]) elif module is None: return sys.modules[sys._getframe(depth).f_globals['__name__']] else: raise TypeError("Expected a module, string, or None") def _load_testfile(filename, package, module_relative, encoding): if module_relative: package = _normalize_module(package, 3) filename = _module_relative_path(package, filename) if getattr(package, '__loader__', None) is not None: if hasattr(package.__loader__, 'get_data'): file_contents = package.__loader__.get_data(filename) file_contents = file_contents.decode(encoding) # get_data() opens files as 'rb', so one must do the equivalent # conversion as universal newlines would do. return file_contents.replace(os.linesep, '\n'), filename with open(filename, encoding=encoding) as f: return f.read(), filename def _indent(s, indent=4): """ Add the given number of space characters to the beginning of every non-blank line in `s`, and return the result. """ # This regexp matches the start of non-blank lines: return re.sub('(?m)^(?!$)', indent*' ', s) def _exception_traceback(exc_info): """ Return a string containing a traceback message for the given exc_info tuple (as returned by sys.exc_info()). """ # Get a traceback message. excout = StringIO() exc_type, exc_val, exc_tb = exc_info traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) return excout.getvalue() # Override some StringIO methods. class _SpoofOut(StringIO): def getvalue(self): result = StringIO.getvalue(self) # If anything at all was written, make sure there's a trailing # newline. There's no way for the expected output to indicate # that a trailing newline is missing. if result and not result.endswith("\n"): result += "\n" return result def truncate(self, size=None): self.seek(size) StringIO.truncate(self) # Worst-case linear-time ellipsis matching. def _ellipsis_match(want, got): """ Essentially the only subtle case: >>> _ellipsis_match('aa...aa', 'aaa') False """ if ELLIPSIS_MARKER not in want: return want == got # Find "the real" strings. ws = want.split(ELLIPSIS_MARKER) assert len(ws) >= 2 # Deal with exact matches possibly needed at one or both ends. startpos, endpos = 0, len(got) w = ws[0] if w: # starts with exact match if got.startswith(w): startpos = len(w) del ws[0] else: return False w = ws[-1] if w: # ends with exact match if got.endswith(w): endpos -= len(w) del ws[-1] else: return False if startpos > endpos: # Exact end matches required more characters than we have, as in # _ellipsis_match('aa...aa', 'aaa') return False # For the rest, we only need to find the leftmost non-overlapping # match for each piece. If there's no overall match that way alone, # there's no overall match period. for w in ws: # w may be '' at times, if there are consecutive ellipses, or # due to an ellipsis at the start or end of `want`. That's OK. # Search for an empty string succeeds, and doesn't change startpos. startpos = got.find(w, startpos, endpos) if startpos < 0: return False startpos += len(w) return True def _comment_line(line): "Return a commented form of the given line" line = line.rstrip() if line: return '# '+line else: return '#' def _strip_exception_details(msg): # Support for IGNORE_EXCEPTION_DETAIL. # Get rid of everything except the exception name; in particular, drop # the possibly dotted module path (if any) and the exception message (if # any). We assume that a colon is never part of a dotted name, or of an # exception name. # E.g., given # "foo.bar.MyError: la di da" # return "MyError" # Or for "abc.def" or "abc.def:\n" return "def". start, end = 0, len(msg) # The exception name must appear on the first line. i = msg.find("\n") if i >= 0: end = i # retain up to the first colon (if any) i = msg.find(':', 0, end) if i >= 0: end = i # retain just the exception name i = msg.rfind('.', 0, end) if i >= 0: start = i+1 return msg[start: end] class _OutputRedirectingPdb(pdb.Pdb): """ A specialized version of the python debugger that redirects stdout to a given stream when interacting with the user. Stdout is *not* redirected when traced code is executed. """ def __init__(self, out): self.__out = out self.__debugger_used = False # do not play signal games in the pdb pdb.Pdb.__init__(self, stdout=out, nosigint=True) # still use input() to get user input self.use_rawinput = 1 def set_trace(self, frame=None): self.__debugger_used = True if frame is None: frame = sys._getframe().f_back pdb.Pdb.set_trace(self, frame) def set_continue(self): # Calling set_continue unconditionally would break unit test # coverage reporting, as Bdb.set_continue calls sys.settrace(None). if self.__debugger_used: pdb.Pdb.set_continue(self) def trace_dispatch(self, *args): # Redirect stdout to the given stream. save_stdout = sys.stdout sys.stdout = self.__out # Call Pdb's trace dispatch method. try: return pdb.Pdb.trace_dispatch(self, *args) finally: sys.stdout = save_stdout # [XX] Normalize with respect to os.path.pardir? def _module_relative_path(module, path): if not inspect.ismodule(module): raise TypeError('Expected a module: %r' % module) if path.startswith('/'): raise ValueError('Module-relative files may not have absolute paths') # Find the base directory for the path. if hasattr(module, '__file__'): # A normal module/package basedir = os.path.split(module.__file__)[0] elif module.__name__ == '__main__': # An interactive session. if len(sys.argv)>0 and sys.argv[0] != '': basedir = os.path.split(sys.argv[0])[0] else: basedir = os.curdir else: # A module w/o __file__ (this includes builtins) raise ValueError("Can't resolve paths relative to the module " + module + " (it has no __file__)") # Combine the base directory and the path. return os.path.join(basedir, *(path.split('/'))) ###################################################################### ## 2. Example & DocTest ###################################################################### ## - An "example" is a <source, want> pair, where "source" is a ## fragment of source code, and "want" is the expected output for ## "source." The Example class also includes information about ## where the example was extracted from. ## ## - A "doctest" is a collection of examples, typically extracted from ## a string (such as an object's docstring). The DocTest class also ## includes information about where the string was extracted from. class Example: """ A single doctest example, consisting of source code and expected output. `Example` defines the following attributes: - source: A single Python statement, always ending with a newline. The constructor adds a newline if needed. - want: The expected output from running the source code (either from stdout, or a traceback in case of exception). `want` ends with a newline unless it's empty, in which case it's an empty string. The constructor adds a newline if needed. - exc_msg: The exception message generated by the example, if the example is expected to generate an exception; or `None` if it is not expected to generate an exception. This exception message is compared against the return value of `traceback.format_exception_only()`. `exc_msg` ends with a newline unless it's `None`. The constructor adds a newline if needed. - lineno: The line number within the DocTest string containing this Example where the Example begins. This line number is zero-based, with respect to the beginning of the DocTest. - indent: The example's indentation in the DocTest string. I.e., the number of space characters that precede the example's first prompt. - options: A dictionary mapping from option flags to True or False, which is used to override default options for this example. Any option flags not contained in this dictionary are left at their default value (as specified by the DocTestRunner's optionflags). By default, no options are set. """ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, options=None): # Normalize inputs. if not source.endswith('\n'): source += '\n' if want and not want.endswith('\n'): want += '\n' if exc_msg is not None and not exc_msg.endswith('\n'): exc_msg += '\n' # Store properties. self.source = source self.want = want self.lineno = lineno self.indent = indent if options is None: options = {} self.options = options self.exc_msg = exc_msg def __eq__(self, other): if type(self) is not type(other): return NotImplemented return self.source == other.source and \ self.want == other.want and \ self.lineno == other.lineno and \ self.indent == other.indent and \ self.options == other.options and \ self.exc_msg == other.exc_msg def __hash__(self): return hash((self.source, self.want, self.lineno, self.indent, self.exc_msg)) class DocTest: """ A collection of doctest examples that should be run in a single namespace. Each `DocTest` defines the following attributes: - examples: the list of examples. - globs: The namespace (aka globals) that the examples should be run in. - name: A name identifying the DocTest (typically, the name of the object whose docstring this DocTest was extracted from). - filename: The name of the file that this DocTest was extracted from, or `None` if the filename is unknown. - lineno: The line number within filename where this DocTest begins, or `None` if the line number is unavailable. This line number is zero-based, with respect to the beginning of the file. - docstring: The string that the examples were extracted from, or `None` if the string is unavailable. """ def __init__(self, examples, globs, name, filename, lineno, docstring): """ Create a new DocTest containing the given examples. The DocTest's globals are initialized with a copy of `globs`. """ assert not isinstance(examples, str), \ "DocTest no longer accepts str; use DocTestParser instead" self.examples = examples self.docstring = docstring self.globs = globs.copy() self.name = name self.filename = filename self.lineno = lineno def __repr__(self): if len(self.examples) == 0: examples = 'no examples' elif len(self.examples) == 1: examples = '1 example' else: examples = '%d examples' % len(self.examples) return ('<DocTest %s from %s:%s (%s)>' % (self.name, self.filename, self.lineno, examples)) def __eq__(self, other): if type(self) is not type(other): return NotImplemented return self.examples == other.examples and \ self.docstring == other.docstring and \ self.globs == other.globs and \ self.name == other.name and \ self.filename == other.filename and \ self.lineno == other.lineno def __hash__(self): return hash((self.docstring, self.name, self.filename, self.lineno)) # This lets us sort tests by name: def __lt__(self, other): if not isinstance(other, DocTest): return NotImplemented return ((self.name, self.filename, self.lineno, id(self)) < (other.name, other.filename, other.lineno, id(other))) ###################################################################### ## 3. DocTestParser ###################################################################### class DocTestParser: """ A class used to parse strings containing doctest examples. """ # This regular expression is used to find doctest examples in a # string. It defines three groups: `source` is the source code # (including leading indentation and prompts); `indent` is the # indentation of the first (PS1) line of the source code; and # `want` is the expected output (including leading indentation). _EXAMPLE_RE = re.compile(r''' # Source consists of a PS1 line followed by zero or more PS2 lines. (?P<source> (?:^(?P<indent> [ ]*) >>> .*) # PS1 line (?:\n [ ]* \.\.\. .*)*) # PS2 lines \n? # Want consists of any non-blank lines that do not start with PS1. (?P<want> (?:(?![ ]*$) # Not a blank line (?![ ]*>>>) # Not a line starting with PS1 .+$\n? # But any other line )*) ''', re.MULTILINE | re.VERBOSE) # A regular expression for handling `want` strings that contain # expected exceptions. It divides `want` into three pieces: # - the traceback header line (`hdr`) # - the traceback stack (`stack`) # - the exception message (`msg`), as generated by # traceback.format_exception_only() # `msg` may have multiple lines. We assume/require that the # exception message is the first non-indented line starting with a word # character following the traceback header line. _EXCEPTION_RE = re.compile(r""" # Grab the traceback header. Different versions of Python have # said different things on the first traceback line. ^(?P<hdr> Traceback\ \( (?: most\ recent\ call\ last | innermost\ last ) \) : ) \s* $ # toss trailing whitespace on the header. (?P<stack> .*?) # don't blink: absorb stuff until... ^ (?P<msg> \w+ .*) # a line *starts* with alphanum. """, re.VERBOSE | re.MULTILINE | re.DOTALL) # A callable returning a true value iff its argument is a blank line # or contains a single comment. _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match def parse(self, string, name='<string>'): """ Divide the given string into examples and intervening text, and return them as a list of alternating Examples and strings. Line numbers for the Examples are 0-based. The optional argument `name` is a name identifying this string, and is only used for error messages. """ string = string.expandtabs() # If all lines begin with the same indentation, then strip it. min_indent = self._min_indent(string) if min_indent > 0: string = '\n'.join([l[min_indent:] for l in string.split('\n')]) output = [] charno, lineno = 0, 0 # Find all doctest examples in the string: for m in self._EXAMPLE_RE.finditer(string): # Add the pre-example text to `output`. output.append(string[charno:m.start()]) # Update lineno (lines before this example) lineno += string.count('\n', charno, m.start()) # Extract info from the regexp match. (source, options, want, exc_msg) = \ self._parse_example(m, name, lineno) # Create an Example, and add it to the list. if not self._IS_BLANK_OR_COMMENT(source): output.append( Example(source, want, exc_msg, lineno=lineno, indent=min_indent+len(m.group('indent')), options=options) ) # Update lineno (lines inside this example) lineno += string.count('\n', m.start(), m.end()) # Update charno. charno = m.end() # Add any remaining post-example text to `output`. output.append(string[charno:]) return output def get_doctest(self, string, globs, name, filename, lineno): """ Extract all doctest examples from the given string, and collect them into a `DocTest` object. `globs`, `name`, `filename`, and `lineno` are attributes for the new `DocTest` object. See the documentation for `DocTest` for more information. """ return DocTest(self.get_examples(string, name), globs, name, filename, lineno, string) def get_examples(self, string, name='<string>'): """ Extract all doctest examples from the given string, and return them as a list of `Example` objects. Line numbers are 0-based, because it's most common in doctests that nothing interesting appears on the same line as opening triple-quote, and so the first interesting line is called \"line 1\" then. The optional argument `name` is a name identifying this string, and is only used for error messages. """ return [x for x in self.parse(string, name) if isinstance(x, Example)] def _parse_example(self, m, name, lineno): """ Given a regular expression match from `_EXAMPLE_RE` (`m`), return a pair `(source, want)`, where `source` is the matched example's source code (with prompts and indentation stripped); and `want` is the example's expected output (with indentation stripped). `name` is the string's name, and `lineno` is the line number where the example starts; both are used for error messages. """ # Get the example's indentation level. indent = len(m.group('indent')) # Divide source into lines; check that they're properly # indented; and then strip their indentation & prompts. source_lines = m.group('source').split('\n') self._check_prompt_blank(source_lines, indent, name, lineno) self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) source = '\n'.join([sl[indent+4:] for sl in source_lines]) # Divide want into lines; check that it's properly indented; and # then strip the indentation. Spaces before the last newline should # be preserved, so plain rstrip() isn't good enough. want = m.group('want') want_lines = want.split('\n') if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): del want_lines[-1] # forget final newline & spaces after it self._check_prefix(want_lines, ' '*indent, name, lineno + len(source_lines)) want = '\n'.join([wl[indent:] for wl in want_lines]) # If `want` contains a traceback message, then extract it. m = self._EXCEPTION_RE.match(want) if m: exc_msg = m.group('msg') else: exc_msg = None # Extract options from the source. options = self._find_options(source, name, lineno) return source, options, want, exc_msg # This regular expression looks for option directives in the # source code of an example. Option directives are comments # starting with "doctest:". Warning: this may give false # positives for string-literals that contain the string # "#doctest:". Eliminating these false positives would require # actually parsing the string; but we limit them by ignoring any # line containing "#doctest:" that is *followed* by a quote mark. _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', re.MULTILINE) def _find_options(self, source, name, lineno): """ Return a dictionary containing option overrides extracted from option directives in the given source string. `name` is the string's name, and `lineno` is the line number where the example starts; both are used for error messages. """ options = {} # (note: with the current regexp, this will match at most once:) for m in self._OPTION_DIRECTIVE_RE.finditer(source): option_strings = m.group(1).replace(',', ' ').split() for option in option_strings: if (option[0] not in '+-' or option[1:] not in OPTIONFLAGS_BY_NAME): raise ValueError('line %r of the doctest for %s ' 'has an invalid option: %r' % (lineno+1, name, option)) flag = OPTIONFLAGS_BY_NAME[option[1:]] options[flag] = (option[0] == '+') if options and self._IS_BLANK_OR_COMMENT(source): raise ValueError('line %r of the doctest for %s has an option ' 'directive on a line with no example: %r' % (lineno, name, source)) return options # This regular expression finds the indentation of every non-blank # line in a string. _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) def _min_indent(self, s): "Return the minimum indentation of any non-blank line in `s`" indents = [len(indent) for indent in self._INDENT_RE.findall(s)] if len(indents) > 0: return min(indents) else: return 0 def _check_prompt_blank(self, lines, indent, name, lineno): """ Given the lines of a source string (including prompts and leading indentation), check to make sure that every prompt is followed by a space character. If any line is not followed by a space character, then raise ValueError. """ for i, line in enumerate(lines): if len(line) >= indent+4 and line[indent+3] != ' ': raise ValueError('line %r of the docstring for %s ' 'lacks blank after %s: %r' % (lineno+i+1, name, line[indent:indent+3], line)) def _check_prefix(self, lines, prefix, name, lineno): """ Check that every line in the given list starts with the given prefix; if any line does not, then raise a ValueError. """ for i, line in enumerate(lines): if line and not line.startswith(prefix): raise ValueError('line %r of the docstring for %s has ' 'inconsistent leading whitespace: %r' % (lineno+i+1, name, line)) ###################################################################### ## 4. DocTest Finder ###################################################################### class DocTestFinder: """ A class used to extract the DocTests that are relevant to a given object, from its docstring and the docstrings of its contained objects. Doctests can currently be extracted from the following object types: modules, functions, classes, methods, staticmethods, classmethods, and properties. """ def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, exclude_empty=True): """ Create a new doctest finder. The optional argument `parser` specifies a class or function that should be used to create new DocTest objects (or objects that implement the same interface as DocTest). The signature for this factory function should match the signature of the DocTest constructor. If the optional argument `recurse` is false, then `find` will only examine the given object, and not any contained objects. If the optional argument `exclude_empty` is false, then `find` will include tests for objects with empty docstrings. """ self._parser = parser self._verbose = verbose self._recurse = recurse self._exclude_empty = exclude_empty def find(self, obj, name=None, module=None, globs=None, extraglobs=None): """ Return a list of the DocTests that are defined by the given object's docstring, or by any of its contained objects' docstrings. The optional parameter `module` is the module that contains the given object. If the module is not specified or is None, then the test finder will attempt to automatically determine the correct module. The object's module is used: - As a default namespace, if `globs` is not specified. - To prevent the DocTestFinder from extracting DocTests from objects that are imported from other modules. - To find the name of the file containing the object. - To help find the line number of the object within its file. Contained objects whose module does not match `module` are ignored. If `module` is False, no attempt to find the module will be made. This is obscure, of use mostly in tests: if `module` is False, or is None but cannot be found automatically, then all objects are considered to belong to the (non-existent) module, so all contained objects will (recursively) be searched for doctests. The globals for each DocTest is formed by combining `globs` and `extraglobs` (bindings in `extraglobs` override bindings in `globs`). A new copy of the globals dictionary is created for each DocTest. If `globs` is not specified, then it defaults to the module's `__dict__`, if specified, or {} otherwise. If `extraglobs` is not specified, then it defaults to {}. """ # If name was not specified, then extract it from the object. if name is None: name = getattr(obj, '__name__', None) if name is None: raise ValueError("DocTestFinder.find: name must be given " "when obj.__name__ doesn't exist: %r" % (type(obj),)) # Find the module that contains the given object (if obj is # a module, then module=obj.). Note: this may fail, in which # case module will be None. if module is False: module = None elif module is None: module = inspect.getmodule(obj) # Read the module's source code. This is used by # DocTestFinder._find_lineno to find the line number for a # given object's docstring. try: file = inspect.getsourcefile(obj) except TypeError: source_lines = None else: if not file: # Check to see if it's one of our special internal "files" # (see __patched_linecache_getlines). file = inspect.getfile(obj) if not file[0]+file[-2:] == '<]>': file = None if file is None: source_lines = None else: if module is not None: # Supply the module globals in case the module was # originally loaded via a PEP 302 loader and # file is not a valid filesystem path source_lines = linecache.getlines(file, module.__dict__) else: # No access to a loader, so assume it's a normal # filesystem path source_lines = linecache.getlines(file) if not source_lines: source_lines = None # Initialize globals, and merge in extraglobs. if globs is None: if module is None: globs = {} else: globs = module.__dict__.copy() else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: globs['__name__'] = '__main__' # provide a default module name # Recursively explore `obj`, extracting DocTests. tests = [] self._find(tests, obj, name, module, source_lines, globs, {}) # Sort the tests by alpha order of names, for consistency in # verbose-mode output. This was a feature of doctest in Pythons # <= 2.3 that got lost by accident in 2.4. It was repaired in # 2.4.4 and 2.5. tests.sort() return tests def _from_module(self, module, object): """ Return true if the given object is defined in the given module. """ if module is None: return True elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.ismethoddescriptor(object): if hasattr(object, '__objclass__'): obj_mod = object.__objclass__.__module__ elif hasattr(object, '__module__'): obj_mod = object.__module__ else: return True # [XX] no easy way to tell otherwise return module.__name__ == obj_mod elif inspect.isclass(object): return module.__name__ == object.__module__ elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) # Look for tests in a module's contained objects. if inspect.ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): valname = '%s.%s' % (name, valname) # Recurse to functions & classes. if ((inspect.isroutine(val) or inspect.isclass(val)) and self._from_module(module, val)): self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a module's __test__ dictionary. if inspect.ismodule(obj) and self._recurse: for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError("DocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isroutine(val) or inspect.isclass(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError("DocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj) and self._recurse: for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isroutine(val) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) def _get_test(self, obj, name, module, globs, source_lines): """ Return a DocTest for the given object, if it defines a docstring; otherwise, return None. """ # Extract the object's docstring. If it doesn't have one, # then return None (no test for this object). if isinstance(obj, str): docstring = obj else: try: if obj.__doc__ is None: docstring = '' else: docstring = obj.__doc__ if not isinstance(docstring, str): docstring = str(docstring) except (TypeError, AttributeError): docstring = '' # Find the docstring's location in the file. lineno = self._find_lineno(obj, source_lines) # Don't bother if the docstring is empty. if self._exclude_empty and not docstring: return None # Return a DocTest for this object. if module is None: filename = None else: filename = getattr(module, '__file__', module.__name__) if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] return self._parser.get_doctest(docstring, globs, name, filename, lineno) def _find_lineno(self, obj, source_lines): """ Return a line number of the given object's docstring. Note: this method assumes that the object has a docstring. """ lineno = None # Find the line number for modules. if inspect.ismodule(obj): lineno = 0 # Find the line number for classes. # Note: this could be fooled if a class is defined multiple # times in a single file. if inspect.isclass(obj): if source_lines is None: return None pat = re.compile(r'^\s*class\s*%s\b' % getattr(obj, '__name__', '-')) for i, line in enumerate(source_lines): if pat.match(line): lineno = i break # Find the line number for functions & methods. if inspect.ismethod(obj): obj = obj.__func__ if inspect.isfunction(obj): obj = obj.__code__ if inspect.istraceback(obj): obj = obj.tb_frame if inspect.isframe(obj): obj = obj.f_code if inspect.iscode(obj): lineno = getattr(obj, 'co_firstlineno', None)-1 # Find the line number where the docstring starts. Assume # that it's the first line that begins with a quote mark. # Note: this could be fooled by a multiline function # signature, where a continuation line begins with a quote # mark. if lineno is not None: if source_lines is None: return lineno+1 pat = re.compile('(^|.*:)\s*\w*("|\')') for lineno in range(lineno, len(source_lines)): if pat.match(source_lines[lineno]): return lineno # We couldn't find the line number. return None ###################################################################### ## 5. DocTest Runner ###################################################################### class DocTestRunner: """ A class used to run DocTest test cases, and accumulate statistics. The `run` method is used to process a single DocTest case. It returns a tuple `(f, t)`, where `t` is the number of test cases tried, and `f` is the number of test cases that failed. >>> tests = DocTestFinder().find(_TestClass) >>> runner = DocTestRunner(verbose=False) >>> tests.sort(key = lambda test: test.name) >>> for test in tests: ... print(test.name, '->', runner.run(test)) _TestClass -> TestResults(failed=0, attempted=2) _TestClass.__init__ -> TestResults(failed=0, attempted=2) _TestClass.get -> TestResults(failed=0, attempted=2) _TestClass.square -> TestResults(failed=0, attempted=1) The `summarize` method prints a summary of all the test cases that have been run by the runner, and returns an aggregated `(f, t)` tuple: >>> runner.summarize(verbose=1) 4 items passed all tests: 2 tests in _TestClass 2 tests in _TestClass.__init__ 2 tests in _TestClass.get 1 tests in _TestClass.square 7 tests in 4 items. 7 passed and 0 failed. Test passed. TestResults(failed=0, attempted=7) The aggregated number of tried examples and failed examples is also available via the `tries` and `failures` attributes: >>> runner.tries 7 >>> runner.failures 0 The comparison between expected outputs and actual outputs is done by an `OutputChecker`. This comparison may be customized with a number of option flags; see the documentation for `testmod` for more information. If the option flags are insufficient, then the comparison may also be customized by passing a subclass of `OutputChecker` to the constructor. The test runner's display output can be controlled in two ways. First, an output function (`out) can be passed to `TestRunner.run`; this function will be called with strings that should be displayed. It defaults to `sys.stdout.write`. If capturing the output is not sufficient, then the display output can be also customized by subclassing DocTestRunner, and overriding the methods `report_start`, `report_success`, `report_unexpected_exception`, and `report_failure`. """ # This divider string is used to separate failure messages, and to # separate sections of the summary. DIVIDER = "*" * 70 def __init__(self, checker=None, verbose=None, optionflags=0): """ Create a new test runner. Optional keyword arg `checker` is the `OutputChecker` that should be used to compare the expected outputs and actual outputs of doctest examples. Optional keyword arg 'verbose' prints lots of stuff if true, only failures if false; by default, it's true iff '-v' is in sys.argv. Optional argument `optionflags` can be used to control how the test runner compares expected output to actual output, and how it displays failures. See the documentation for `testmod` for more information. """ self._checker = checker or OutputChecker() if verbose is None: verbose = '-v' in sys.argv self._verbose = verbose self.optionflags = optionflags self.original_optionflags = optionflags # Keep track of the examples we've run. self.tries = 0 self.failures = 0 self._name2ft = {} # Create a fake output target for capturing doctest output. self._fakeout = _SpoofOut() #///////////////////////////////////////////////////////////////// # Reporting methods #///////////////////////////////////////////////////////////////// def report_start(self, out, test, example): """ Report that the test runner is about to process the given example. (Only displays a message if verbose=True) """ if self._verbose: if example.want: out('Trying:\n' + _indent(example.source) + 'Expecting:\n' + _indent(example.want)) else: out('Trying:\n' + _indent(example.source) + 'Expecting nothing\n') def report_success(self, out, test, example, got): """ Report that the given example ran successfully. (Only displays a message if verbose=True) """ if self._verbose: out("ok\n") def report_failure(self, out, test, example, got): """ Report that the given example failed. """ out(self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags)) def report_unexpected_exception(self, out, test, example, exc_info): """ Report that the given example raised an unexpected exception. """ out(self._failure_header(test, example) + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) def _failure_header(self, test, example): out = [self.DIVIDER] if test.filename: if test.lineno is not None and example.lineno is not None: lineno = test.lineno + example.lineno + 1 else: lineno = '?' out.append('File "%s", line %s, in %s' % (test.filename, lineno, test.name)) else: out.append('Line %s, in %s' % (example.lineno+1, test.name)) out.append('Failed example:') source = example.source out.append(_indent(source)) return '\n'.join(out) #///////////////////////////////////////////////////////////////// # DocTest Running #///////////////////////////////////////////////////////////////// def __run(self, test, compileflags, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. The examples are run in the namespace `test.globs`. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and failures > 0) # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # If 'SKIP' is set, then skip this example. if self.optionflags & SKIP: continue # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Use a special filename for compile(), so we can retrieve # the source code during interactive debugging (see # __patched_linecache_getlines). filename = '<doctest %s[%d]>' % (test.name, examplenum) # Run the example in the given context (globs), and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) try: # Don't blink! This is where the user's code gets run. exec(compile(example.source, filename, "single", compileflags, 1), test.globs) self.debugger.set_continue() # ==== Example Finished ==== exception = None except KeyboardInterrupt: raise except: exception = sys.exc_info() self.debugger.set_continue() # ==== Example Finished ==== got = self._fakeout.getvalue() # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proved innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception is None: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: exc_msg = traceback.format_exception_only(*exception[:2])[-1] if not quiet: got += _exception_traceback(exception) # If `example.exc_msg` is None, then we weren't expecting # an exception. if example.exc_msg is None: outcome = BOOM # We expected an exception: see whether it matches. elif check(example.exc_msg, exc_msg, self.optionflags): outcome = SUCCESS # Another chance if they didn't care about the detail. elif self.optionflags & IGNORE_EXCEPTION_DETAIL: if check(_strip_exception_details(example.exc_msg), _strip_exception_details(exc_msg), self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exception) failures += 1 else: assert False, ("unknown outcome", outcome) if failures and self.optionflags & FAIL_FAST: break # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self.__record_outcome(test, failures, tries) return TestResults(failures, tries) def __record_outcome(self, test, f, t): """ Record the fact that the given DocTest (`test`) generated `f` failures out of `t` tried examples. """ f2, t2 = self._name2ft.get(test.name, (0,0)) self._name2ft[test.name] = (f+f2, t+t2) self.failures += f self.tries += t __LINECACHE_FILENAME_RE = re.compile(r'<doctest ' r'(?P<name>.+)' r'\[(?P<examplenum>\d+)\]>$') def __patched_linecache_getlines(self, filename, module_globals=None): m = self.__LINECACHE_FILENAME_RE.match(filename) if m and m.group('name') == self.test.name: example = self.test.examples[int(m.group('examplenum'))] return example.source.splitlines(keepends=True) else: return self.save_linecache_getlines(filename, module_globals) def run(self, test, compileflags=None, out=None, clear_globs=True): """ Run the examples in `test`, and display the results using the writer function `out`. The examples are run in the namespace `test.globs`. If `clear_globs` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use `clear_globs=False`. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. The output of each example is checked using `DocTestRunner.check_output`, and the results are formatted by the `DocTestRunner.report_*` methods. """ self.test = test if compileflags is None: compileflags = _extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: encoding = save_stdout.encoding if encoding is None or encoding.lower() == 'utf-8': out = save_stdout.write else: # Use backslashreplace error handling on write def out(s): s = str(s.encode(encoding, 'backslashreplace'), encoding) save_stdout.write(s) sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_trace = sys.gettrace() save_set_trace = pdb.set_trace self.debugger = _OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = linecache.getlines linecache.getlines = self.__patched_linecache_getlines # Make sure sys.displayhook just prints the value to stdout save_displayhook = sys.displayhook sys.displayhook = sys.__displayhook__ try: return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace sys.settrace(save_trace) linecache.getlines = self.save_linecache_getlines sys.displayhook = save_displayhook if clear_globs: test.globs.clear() import builtins builtins._ = None #///////////////////////////////////////////////////////////////// # Summarization #///////////////////////////////////////////////////////////////// def summarize(self, verbose=None): """ Print a summary of all the test cases that have been run by this DocTestRunner, and return a tuple `(f, t)`, where `f` is the total number of failed examples, and `t` is the total number of tried examples. The optional `verbose` argument controls how detailed the summary is. If the verbosity is not specified, then the DocTestRunner's verbosity is used. """ if verbose is None: verbose = self._verbose notests = [] passed = [] failed = [] totalt = totalf = 0 for x in self._name2ft.items(): name, (f, t) = x assert f <= t totalt += t totalf += f if t == 0: notests.append(name) elif f == 0: passed.append( (name, t) ) else: failed.append(x) if verbose: if notests: print(len(notests), "items had no tests:") notests.sort() for thing in notests: print(" ", thing) if passed: print(len(passed), "items passed all tests:") passed.sort() for thing, count in passed: print(" %3d tests in %s" % (count, thing)) if failed: print(self.DIVIDER) print(len(failed), "items had failures:") failed.sort() for thing, (f, t) in failed: print(" %3d of %3d in %s" % (f, t, thing)) if verbose: print(totalt, "tests in", len(self._name2ft), "items.") print(totalt - totalf, "passed and", totalf, "failed.") if totalf: print("***Test Failed***", totalf, "failures.") elif verbose: print("Test passed.") return TestResults(totalf, totalt) #///////////////////////////////////////////////////////////////// # Backward compatibility cruft to maintain doctest.master. #///////////////////////////////////////////////////////////////// def merge(self, other): d = self._name2ft for name, (f, t) in other._name2ft.items(): if name in d: # Don't print here by default, since doing # so breaks some of the buildbots #print("*** DocTestRunner.merge: '" + name + "' in both" \ # " testers; summing outcomes.") f2, t2 = d[name] f = f + f2 t = t + t2 d[name] = f, t class OutputChecker: """ A class used to check the whether the actual output from a doctest example matches the expected output. `OutputChecker` defines two methods: `check_output`, which compares a given pair of outputs, and returns true if they match; and `output_difference`, which returns a string describing the differences between two outputs. """ def _toAscii(self, s): """ Convert string to hex-escaped ASCII string. """ return str(s.encode('ASCII', 'backslashreplace'), "ASCII") def check_output(self, want, got, optionflags): """ Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. """ # If `want` contains hex-escaped character such as "\u1234", # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]). # On the other hand, `got` could be another sequence of # characters such as [\u1234], so `want` and `got` should # be folded to hex-escaped ASCII string to compare. got = self._toAscii(got) want = self._toAscii(want) # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # The values True and False replaced 1 and 0 as the return # value for boolean comparisons in Python 2.3. if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): if (got,want) == ("True\n", "1\n"): return True if (got,want) == ("False\n", "0\n"): return True # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub('(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & ELLIPSIS: if _ellipsis_match(want, got): return True # We didn't find any match; return false. return False # Should we do a fancy diff? def _do_a_fancy_diff(self, want, got, optionflags): # Not unless they asked for a fancy diff. if not optionflags & (REPORT_UDIFF | REPORT_CDIFF | REPORT_NDIFF): return False # If expected output uses ellipsis, a meaningful fancy diff is # too hard ... or maybe not. In two real-life failures Tim saw, # a diff was a major help anyway, so this is commented out. # [todo] _ellipsis_match() knows which pieces do and don't match, # and could be the basis for a kick-ass diff in this case. ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: ## return False # ndiff does intraline difference marking, so can be useful even # for 1-line differences. if optionflags & REPORT_NDIFF: return True # The other diff types need at least a few lines to be helpful. return want.count('\n') > 2 and got.count('\n') > 2 def output_difference(self, example, got, optionflags): """ Return a string describing the differences between the expected output for a given example (`example`) and the actual output (`got`). `optionflags` is the set of option flags used to compare `want` and `got`. """ want = example.want # If <BLANKLINE>s are being used, then replace blank lines # with <BLANKLINE> in the actual output string. if not (optionflags & DONT_ACCEPT_BLANKLINE): got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) # Check if we should use diff. if self._do_a_fancy_diff(want, got, optionflags): # Split want & got into lines. want_lines = want.splitlines(keepends=True) got_lines = got.splitlines(keepends=True) # Use difflib to find their differences. if optionflags & REPORT_UDIFF: diff = difflib.unified_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'unified diff with -expected +actual' elif optionflags & REPORT_CDIFF: diff = difflib.context_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'context diff with expected followed by actual' elif optionflags & REPORT_NDIFF: engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) diff = list(engine.compare(want_lines, got_lines)) kind = 'ndiff with -expected +actual' else: assert 0, 'Bad diff option' # Remove trailing whitespace on diff output. diff = [line.rstrip() + '\n' for line in diff] return 'Differences (%s):\n' % kind + _indent(''.join(diff)) # If we're not using diff, then simply list the expected # output followed by the actual output. if want and got: return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) elif want: return 'Expected:\n%sGot nothing\n' % _indent(want) elif got: return 'Expected nothing\nGot:\n%s' % _indent(got) else: return 'Expected nothing\nGot nothing\n' class DocTestFailure(Exception): """A DocTest example has failed in debugging mode. The exception instance has variables: - test: the DocTest object being run - example: the Example object that failed - got: the actual output """ def __init__(self, test, example, got): self.test = test self.example = example self.got = got def __str__(self): return str(self.test) class UnexpectedException(Exception): """A DocTest example has encountered an unexpected exception The exception instance has variables: - test: the DocTest object being run - example: the Example object that failed - exc_info: the exception info """ def __init__(self, test, example, exc_info): self.test = test self.example = example self.exc_info = exc_info def __str__(self): return str(self.test) class DebugRunner(DocTestRunner): r"""Run doc tests but raise an exception as soon as there is a failure. If an unexpected exception occurs, an UnexpectedException is raised. It contains the test, the example, and the original exception: >>> runner = DebugRunner(verbose=False) >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', ... {}, 'foo', 'foo.py', 0) >>> try: ... runner.run(test) ... except UnexpectedException as f: ... failure = f >>> failure.test is test True >>> failure.example.want '42\n' >>> exc_info = failure.exc_info >>> raise exc_info[1] # Already has the traceback Traceback (most recent call last): ... KeyError We wrap the original exception to give the calling application access to the test and example information. If the output doesn't match, then a DocTestFailure is raised: >>> test = DocTestParser().get_doctest(''' ... >>> x = 1 ... >>> x ... 2 ... ''', {}, 'foo', 'foo.py', 0) >>> try: ... runner.run(test) ... except DocTestFailure as f: ... failure = f DocTestFailure objects provide access to the test: >>> failure.test is test True As well as to the example: >>> failure.example.want '2\n' and the actual output: >>> failure.got '1\n' If a failure or error occurs, the globals are left intact: >>> del test.globs['__builtins__'] >>> test.globs {'x': 1} >>> test = DocTestParser().get_doctest(''' ... >>> x = 2 ... >>> raise KeyError ... ''', {}, 'foo', 'foo.py', 0) >>> runner.run(test) Traceback (most recent call last): ... doctest.UnexpectedException: <DocTest foo from foo.py:0 (2 examples)> >>> del test.globs['__builtins__'] >>> test.globs {'x': 2} But the globals are cleared if there is no error: >>> test = DocTestParser().get_doctest(''' ... >>> x = 2 ... ''', {}, 'foo', 'foo.py', 0) >>> runner.run(test) TestResults(failed=0, attempted=1) >>> test.globs {} """ def run(self, test, compileflags=None, out=None, clear_globs=True): r = DocTestRunner.run(self, test, compileflags, out, False) if clear_globs: test.globs.clear() return r def report_unexpected_exception(self, out, test, example, exc_info): raise UnexpectedException(test, example, exc_info) def report_failure(self, out, test, example, got): raise DocTestFailure(test, example, got) ###################################################################### ## 6. Test Functions ###################################################################### # These should be backwards compatible. # For backward compatibility, a global instance of a DocTestRunner # class, updated by testmod. master = None def testmod(m=None, name=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, exclude_empty=False): """m=None, name=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, exclude_empty=False Test examples in docstrings in functions and classes reachable from module m (or the current module if m is not supplied), starting with m.__doc__. Also test examples reachable from dict m.__test__ if it exists and is not None. m.__test__ maps names to functions, classes and strings; function and class docstrings are tested even if the name is private; strings are tested directly, as if they were docstrings. Return (#failures, #tests). See help(doctest) for an overview. Optional keyword arg "name" gives the name of the module; by default use m.__name__. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use m.__dict__. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "extraglobs" gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. This is new in 2.4. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. This is new in 2.3. Possible values (see the docs for details): DONT_ACCEPT_TRUE_FOR_1 DONT_ACCEPT_BLANKLINE NORMALIZE_WHITESPACE ELLIPSIS SKIP IGNORE_EXCEPTION_DETAIL REPORT_UDIFF REPORT_CDIFF REPORT_NDIFF REPORT_ONLY_FIRST_FAILURE Optional keyword arg "raise_on_error" raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master # If no module was given, then use __main__. if m is None: # DWA - m will still be None if this wasn't invoked from the command # line, in which case the following TypeError is about as good an error # as we should expect m = sys.modules.get('__main__') # Check that we were actually given a module. if not inspect.ismodule(m): raise TypeError("testmod: module required; %r" % (m,)) # If no name was given, then use the module's name. if name is None: name = m.__name__ # Find, parse, and run all tests in the given module. finder = DocTestFinder(exclude_empty=exclude_empty) if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return TestResults(runner.failures, runner.tries) def testfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=DocTestParser(), encoding=None): """ Test examples in the given file. Return (#failures, #tests). Optional keyword arg "module_relative" specifies how filenames should be interpreted: - If "module_relative" is True (the default), then "filename" specifies a module-relative path. By default, this path is relative to the calling module's directory; but if the "package" argument is specified, then it is relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and should not be an absolute path (i.e., it may not begin with "/"). - If "module_relative" is False, then "filename" specifies an os-specific path. The path may be absolute or relative (to the current working directory). Optional keyword arg "name" gives the name of the test; by default use the file's basename. Optional keyword argument "package" is a Python package or the name of a Python package whose directory should be used as the base directory for a module relative filename. If no package is specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use {}. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "extraglobs" gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. Possible values (see the docs for details): DONT_ACCEPT_TRUE_FOR_1 DONT_ACCEPT_BLANKLINE NORMALIZE_WHITESPACE ELLIPSIS SKIP IGNORE_EXCEPTION_DETAIL REPORT_UDIFF REPORT_CDIFF REPORT_NDIFF REPORT_ONLY_FIRST_FAILURE Optional keyword arg "raise_on_error" raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Optional keyword arg "parser" specifies a DocTestParser (or subclass) that should be used to extract tests from the files. Optional keyword arg "encoding" specifies an encoding that should be used to convert the file to unicode. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path text, filename = _load_testfile(filename, package, module_relative, encoding or "utf-8") # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: globs['__name__'] = '__main__' if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) # Read the file, convert it to a test, and run it. test = parser.get_doctest(text, globs, name, filename, 0) runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return TestResults(runner.failures, runner.tries) def run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): """ Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information. """ # Find, parse, and run all tests in the given module. finder = DocTestFinder(verbose=verbose, recurse=False) runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(f, name, globs=globs): runner.run(test, compileflags=compileflags) ###################################################################### ## 7. Unittest Support ###################################################################### _unittest_reportflags = 0 def set_unittest_reportflags(flags): """Sets the unittest option flags. The old flag is returned so that a runner could restore the old value if it wished to: >>> import doctest >>> old = doctest._unittest_reportflags >>> doctest.set_unittest_reportflags(REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) == old True >>> doctest._unittest_reportflags == (REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) True Only reporting flags can be set: >>> doctest.set_unittest_reportflags(ELLIPSIS) Traceback (most recent call last): ... ValueError: ('Only reporting flags allowed', 8) >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | ... REPORT_ONLY_FIRST_FAILURE) True """ global _unittest_reportflags if (flags & REPORTING_FLAGS) != flags: raise ValueError("Only reporting flags allowed", flags) old = _unittest_reportflags _unittest_reportflags = flags return old class DocTestCase(unittest.TestCase): def __init__(self, test, optionflags=0, setUp=None, tearDown=None, checker=None): unittest.TestCase.__init__(self) self._dt_optionflags = optionflags self._dt_checker = checker self._dt_test = test self._dt_setUp = setUp self._dt_tearDown = tearDown def setUp(self): test = self._dt_test if self._dt_setUp is not None: self._dt_setUp(test) def tearDown(self): test = self._dt_test if self._dt_tearDown is not None: self._dt_tearDown(test) test.globs.clear() def runTest(self): test = self._dt_test old = sys.stdout new = StringIO() optionflags = self._dt_optionflags if not (optionflags & REPORTING_FLAGS): # The option flags don't include any reporting flags, # so add the default reporting flags optionflags |= _unittest_reportflags runner = DocTestRunner(optionflags=optionflags, checker=self._dt_checker, verbose=False) try: runner.DIVIDER = "-"*70 failures, tries = runner.run( test, out=new.write, clear_globs=False) finally: sys.stdout = old if failures: raise self.failureException(self.format_failure(new.getvalue())) def format_failure(self, err): test = self._dt_test if test.lineno is None: lineno = 'unknown line number' else: lineno = '%s' % test.lineno lname = '.'.join(test.name.split('.')[-1:]) return ('Failed doctest test for %s\n' ' File "%s", line %s, in %s\n\n%s' % (test.name, test.filename, lineno, lname, err) ) def debug(self): r"""Run the test case without results and without catching exceptions The unit test framework includes a debug method on test cases and test suites to support post-mortem debugging. The test code is run in such a way that errors are not caught. This way a caller can catch the errors and initiate post-mortem debugging. The DocTestCase provides a debug method that raises UnexpectedException errors if there is an unexpected exception: >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', ... {}, 'foo', 'foo.py', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except UnexpectedException as f: ... failure = f The UnexpectedException contains the test, the example, and the original exception: >>> failure.test is test True >>> failure.example.want '42\n' >>> exc_info = failure.exc_info >>> raise exc_info[1] # Already has the traceback Traceback (most recent call last): ... KeyError If the output doesn't match, then a DocTestFailure is raised: >>> test = DocTestParser().get_doctest(''' ... >>> x = 1 ... >>> x ... 2 ... ''', {}, 'foo', 'foo.py', 0) >>> case = DocTestCase(test) >>> try: ... case.debug() ... except DocTestFailure as f: ... failure = f DocTestFailure objects provide access to the test: >>> failure.test is test True As well as to the example: >>> failure.example.want '2\n' and the actual output: >>> failure.got '1\n' """ self.setUp() runner = DebugRunner(optionflags=self._dt_optionflags, checker=self._dt_checker, verbose=False) runner.run(self._dt_test, clear_globs=False) self.tearDown() def id(self): return self._dt_test.name def __eq__(self, other): if type(self) is not type(other): return NotImplemented return self._dt_test == other._dt_test and \ self._dt_optionflags == other._dt_optionflags and \ self._dt_setUp == other._dt_setUp and \ self._dt_tearDown == other._dt_tearDown and \ self._dt_checker == other._dt_checker def __hash__(self): return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown, self._dt_checker)) def __repr__(self): name = self._dt_test.name.split('.') return "%s (%s)" % (name[-1], '.'.join(name[:-1])) __str__ = __repr__ def shortDescription(self): return "Doctest: " + self._dt_test.name class SkipDocTestCase(DocTestCase): def __init__(self, module): self.module = module DocTestCase.__init__(self, None) def setUp(self): self.skipTest("DocTestSuite will not work with -O2 and above") def test_skip(self): pass def shortDescription(self): return "Skipping tests from %s" % self.module.__name__ __str__ = shortDescription class _DocTestSuite(unittest.TestSuite): def _removeTestAtIndex(self, index): pass def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, **options): """ Convert doctest tests for a module to a unittest test suite. This converts each documentation string in a module that contains doctest tests to a unittest test case. If any of the tests in a doc string fail, then the test case fails. An exception is raised showing the name of the file containing the test and a (sometimes approximate) line number. The `module` argument provides the module to be tested. The argument can be either a module or a module name. If no argument is given, the calling module is used. A number of options may be provided as keyword arguments: setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. """ if test_finder is None: test_finder = DocTestFinder() module = _normalize_module(module) tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) if not tests and sys.flags.optimize >=2: # Skip doctests when running with -O2 suite = _DocTestSuite() suite.addTest(SkipDocTestCase(module)) return suite elif not tests: # Why do we want to do this? Because it reveals a bug that might # otherwise be hidden. # It is probably a bug that this exception is not also raised if the # number of doctest examples in tests is zero (i.e. if no doctest # examples were found). However, we should probably not be raising # an exception at all here, though it is too late to make this change # for a maintenance release. See also issue #14649. raise ValueError(module, "has no docstrings") tests.sort() suite = _DocTestSuite() for test in tests: if len(test.examples) == 0: continue if not test.filename: filename = module.__file__ if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] test.filename = filename suite.addTest(DocTestCase(test, **options)) return suite class DocFileCase(DocTestCase): def id(self): return '_'.join(self._dt_test.name.split('.')) def __repr__(self): return self._dt_test.filename __str__ = __repr__ def format_failure(self, err): return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' % (self._dt_test.name, self._dt_test.filename, err) ) def DocFileTest(path, module_relative=True, package=None, globs=None, parser=DocTestParser(), encoding=None, **options): if globs is None: globs = {} else: globs = globs.copy() if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path. doc, path = _load_testfile(path, package, module_relative, encoding or "utf-8") if "__file__" not in globs: globs["__file__"] = path # Find the file and read it. name = os.path.basename(path) # Convert it to a test, and wrap it in a DocFileCase. test = parser.get_doctest(doc, globs, name, path, 0) return DocFileCase(test, **options) def DocFileSuite(*paths, **kw): """A unittest suite for one or more doctest files. The path to each doctest file is given as a string; the interpretation of that string depends on the keyword argument "module_relative". A number of options may be provided as keyword arguments: module_relative If "module_relative" is True, then the given file paths are interpreted as os-independent module-relative paths. By default, these paths are relative to the calling module's directory; but if the "package" argument is specified, then they are relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and may not be an absolute path (i.e., it may not begin with "/"). If "module_relative" is False, then the given file paths are interpreted as os-specific paths. These paths may be absolute or relative (to the current working directory). package A Python package or the name of a Python package whose directory should be used as the base directory for module relative paths. If "package" is not specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. parser A DocTestParser (or subclass) that should be used to extract tests from the files. encoding An encoding that will be used to convert the files to unicode. """ suite = _DocTestSuite() # We do this here so that _normalize_module is called at the right # level. If it were called in DocFileTest, then this function # would be the caller and we might guess the package incorrectly. if kw.get('module_relative', True): kw['package'] = _normalize_module(kw.get('package')) for path in paths: suite.addTest(DocFileTest(path, **kw)) return suite ###################################################################### ## 8. Debugging Support ###################################################################### def script_from_examples(s): r"""Extract script from text with examples. Converts text with examples to a Python script. Example input is converted to regular code. Example output and all other words are converted to comments: >>> text = ''' ... Here are examples of simple math. ... ... Python has super accurate integer addition ... ... >>> 2 + 2 ... 5 ... ... And very friendly error messages: ... ... >>> 1/0 ... To Infinity ... And ... Beyond ... ... You can use logic if you want: ... ... >>> if 0: ... ... blah ... ... blah ... ... ... ... Ho hum ... ''' >>> print(script_from_examples(text)) # Here are examples of simple math. # # Python has super accurate integer addition # 2 + 2 # Expected: ## 5 # # And very friendly error messages: # 1/0 # Expected: ## To Infinity ## And ## Beyond # # You can use logic if you want: # if 0: blah blah # # Ho hum <BLANKLINE> """ output = [] for piece in DocTestParser().parse(s): if isinstance(piece, Example): # Add the example's source code (strip trailing NL) output.append(piece.source[:-1]) # Add the expected output: want = piece.want if want: output.append('# Expected:') output += ['## '+l for l in want.split('\n')[:-1]] else: # Add non-example text. output += [_comment_line(l) for l in piece.split('\n')[:-1]] # Trim junk on both ends. while output and output[-1] == '#': output.pop() while output and output[0] == '#': output.pop(0) # Combine the output, and return it. # Add a courtesy newline to prevent exec from choking (see bug #1172785) return '\n'.join(output) + '\n' def testsource(module, name): """Extract the test sources from a doctest docstring as a script. Provide the module (or dotted name of the module) containing the test to be debugged and the name (within the module) of the object with the doc string with tests to be debugged. """ module = _normalize_module(module) tests = DocTestFinder().find(module) test = [t for t in tests if t.name == name] if not test: raise ValueError(name, "not found in tests") test = test[0] testsrc = script_from_examples(test.docstring) return testsrc def debug_src(src, pm=False, globs=None): """Debug a single doctest docstring, in argument `src`'""" testsrc = script_from_examples(src) debug_script(testsrc, pm, globs) def debug_script(src, pm=False, globs=None): "Debug a test script. `src` is the script, as a string." import pdb if globs: globs = globs.copy() else: globs = {} if pm: try: exec(src, globs, globs) except: print(sys.exc_info()[1]) p = pdb.Pdb(nosigint=True) p.reset() p.interaction(None, sys.exc_info()[2]) else: pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs) def debug(module, name, pm=False): """Debug a single doctest docstring. Provide the module (or dotted name of the module) containing the test to be debugged and the name (within the module) of the object with the docstring with tests to be debugged. """ module = _normalize_module(module) testsrc = testsource(module, name) debug_script(testsrc, pm, module.__dict__) ###################################################################### ## 9. Example Usage ###################################################################### class _TestClass: """ A pointless class, for sanity-checking of docstring testing. Methods: square() get() >>> _TestClass(13).get() + _TestClass(-12).get() 1 >>> hex(_TestClass(13).square().get()) '0xa9' """ def __init__(self, val): """val -> _TestClass object with associated value val. >>> t = _TestClass(123) >>> print(t.get()) 123 """ self.val = val def square(self): """square() -> square TestClass's associated value >>> _TestClass(13).square().get() 169 """ self.val = self.val ** 2 return self def get(self): """get() -> return TestClass's associated value. >>> x = _TestClass(-42) >>> print(x.get()) -42 """ return self.val __test__ = {"_TestClass": _TestClass, "string": r""" Example of a string object, searched as-is. >>> x = 1; y = 2 >>> x + y, x * y (3, 2) """, "bool-int equivalence": r""" In 2.2, boolean expressions displayed 0 or 1. By default, we still accept them. This can be disabled by passing DONT_ACCEPT_TRUE_FOR_1 to the new optionflags argument. >>> 4 == 4 1 >>> 4 == 4 True >>> 4 > 4 0 >>> 4 > 4 False """, "blank lines": r""" Blank lines can be marked with <BLANKLINE>: >>> print('foo\n\nbar\n') foo <BLANKLINE> bar <BLANKLINE> """, "ellipsis": r""" If the ellipsis flag is used, then '...' can be used to elide substrings in the desired output: >>> print(list(range(1000))) #doctest: +ELLIPSIS [0, 1, 2, ..., 999] """, "whitespace normalization": r""" If the whitespace normalization flag is used, then differences in whitespace are ignored. >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] """, } def _test(): parser = argparse.ArgumentParser(description="doctest runner") parser.add_argument('-v', '--verbose', action='store_true', default=False, help='print very verbose output for all tests') parser.add_argument('-o', '--option', action='append', choices=OPTIONFLAGS_BY_NAME.keys(), default=[], help=('specify a doctest option flag to apply' ' to the test run; may be specified more' ' than once to apply multiple options')) parser.add_argument('-f', '--fail-fast', action='store_true', help=('stop running tests after first failure (this' ' is a shorthand for -o FAIL_FAST, and is' ' in addition to any other -o options)')) parser.add_argument('file', nargs='+', help='file containing the tests to run') args = parser.parse_args() testfiles = args.file # Verbose used to be handled by the "inspect argv" magic in DocTestRunner, # but since we are using argparse we are passing it manually now. verbose = args.verbose options = 0 for option in args.option: options |= OPTIONFLAGS_BY_NAME[option] if args.fail_fast: options |= FAIL_FAST for filename in testfiles: if filename.endswith(".py"): # It is a module -- insert its dir into sys.path and try to # import it. If it is part of a package, that possibly # won't work because of package imports. dirname, filename = os.path.split(filename) sys.path.insert(0, dirname) m = __import__(filename[:-3]) del sys.path[0] failures, _ = testmod(m, verbose=verbose, optionflags=options) else: failures, _ = testfile(filename, module_relative=False, verbose=verbose, optionflags=options) if failures: return 1 return 0 if __name__ == "__main__": sys.exit(_test())
lgpl-2.1
40123151ChengYu/2015cd_midterm
static/Brython3.1.1-20150328-091302/Lib/os.py
635
35582
r"""OS routines for Mac, NT, or Posix depending on what system we're on. This exports: - all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc. - os.path is either posixpath or ntpath - os.name is either 'posix', 'nt', 'os2' or 'ce'. - os.curdir is a string representing the current directory ('.' or ':') - os.pardir is a string representing the parent directory ('..' or '::') - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') - os.extsep is the extension separator (always '.') - os.altsep is the alternate pathname separator (None or '/') - os.pathsep is the component separator used in $PATH etc - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') - os.defpath is the default search path for executables - os.devnull is the file path of the null device ('/dev/null', etc.) Programs that import and use 'os' stand a better chance of being portable between different platforms. Of course, they must then only use functions that are defined by all platforms (e.g., unlink and opendir), and leave all pathname manipulation to os.path (e.g., split and join). """ import sys, errno import stat as st _names = sys.builtin_module_names # Note: more names are added to __all__ later. __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", "popen", "extsep"] def _exists(name): return name in globals() def _get_exports_list(module): try: return list(module.__all__) except AttributeError: return [n for n in dir(module) if n[0] != '_'] # Any new dependencies of the os module and/or changes in path separator # requires updating importlib as well. if 'posix' in _names: name = 'posix' linesep = '\n' from posix import * try: from posix import _exit __all__.append('_exit') except ImportError: pass import posixpath as path try: from posix import _have_functions except ImportError: pass elif 'nt' in _names: name = 'nt' linesep = '\r\n' from nt import * try: from nt import _exit __all__.append('_exit') except ImportError: pass import ntpath as path import nt __all__.extend(_get_exports_list(nt)) del nt try: from nt import _have_functions except ImportError: pass elif 'os2' in _names: name = 'os2' linesep = '\r\n' from os2 import * try: from os2 import _exit __all__.append('_exit') except ImportError: pass if sys.version.find('EMX GCC') == -1: import ntpath as path else: import os2emxpath as path from _emx_link import link import os2 __all__.extend(_get_exports_list(os2)) del os2 try: from os2 import _have_functions except ImportError: pass elif 'ce' in _names: name = 'ce' linesep = '\r\n' from ce import * try: from ce import _exit __all__.append('_exit') except ImportError: pass # We can use the standard Windows path. import ntpath as path import ce __all__.extend(_get_exports_list(ce)) del ce try: from ce import _have_functions except ImportError: pass else: raise ImportError('no os specific module found') sys.modules['os.path'] = path from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, devnull) del _names if _exists("_have_functions"): _globals = globals() def _add(str, fn): if (fn in _globals) and (str in _have_functions): _set.add(_globals[fn]) _set = set() _add("HAVE_FACCESSAT", "access") _add("HAVE_FCHMODAT", "chmod") _add("HAVE_FCHOWNAT", "chown") _add("HAVE_FSTATAT", "stat") _add("HAVE_FUTIMESAT", "utime") _add("HAVE_LINKAT", "link") _add("HAVE_MKDIRAT", "mkdir") _add("HAVE_MKFIFOAT", "mkfifo") _add("HAVE_MKNODAT", "mknod") _add("HAVE_OPENAT", "open") _add("HAVE_READLINKAT", "readlink") _add("HAVE_RENAMEAT", "rename") _add("HAVE_SYMLINKAT", "symlink") _add("HAVE_UNLINKAT", "unlink") _add("HAVE_UNLINKAT", "rmdir") _add("HAVE_UTIMENSAT", "utime") supports_dir_fd = _set _set = set() _add("HAVE_FACCESSAT", "access") supports_effective_ids = _set _set = set() _add("HAVE_FCHDIR", "chdir") _add("HAVE_FCHMOD", "chmod") _add("HAVE_FCHOWN", "chown") _add("HAVE_FDOPENDIR", "listdir") _add("HAVE_FEXECVE", "execve") _set.add(stat) # fstat always works _add("HAVE_FTRUNCATE", "truncate") _add("HAVE_FUTIMENS", "utime") _add("HAVE_FUTIMES", "utime") _add("HAVE_FPATHCONF", "pathconf") if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 _add("HAVE_FSTATVFS", "statvfs") supports_fd = _set _set = set() _add("HAVE_FACCESSAT", "access") # Some platforms don't support lchmod(). Often the function exists # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. # (No, I don't know why that's a good design.) ./configure will detect # this and reject it--so HAVE_LCHMOD still won't be defined on such # platforms. This is Very Helpful. # # However, sometimes platforms without a working lchmod() *do* have # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes # it behave like lchmod(). So in theory it would be a suitable # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s # flag doesn't work *either*. Sadly ./configure isn't sophisticated # enough to detect this condition--it only determines whether or not # fchmodat() minimally works. # # Therefore we simply ignore fchmodat() when deciding whether or not # os.chmod supports follow_symlinks. Just checking lchmod() is # sufficient. After all--if you have a working fchmodat(), your # lchmod() almost certainly works too. # # _add("HAVE_FCHMODAT", "chmod") _add("HAVE_FCHOWNAT", "chown") _add("HAVE_FSTATAT", "stat") _add("HAVE_LCHFLAGS", "chflags") _add("HAVE_LCHMOD", "chmod") if _exists("lchown"): # mac os x10.3 _add("HAVE_LCHOWN", "chown") _add("HAVE_LINKAT", "link") _add("HAVE_LUTIMES", "utime") _add("HAVE_LSTAT", "stat") _add("HAVE_FSTATAT", "stat") _add("HAVE_UTIMENSAT", "utime") _add("MS_WINDOWS", "stat") supports_follow_symlinks = _set del _set del _have_functions del _globals del _add # Python uses fixed values for the SEEK_ constants; they are mapped # to native constants if necessary in posixmodule.c # Other possible SEEK values are directly imported from posixmodule.c SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 def _get_masked_mode(mode): mask = umask(0) umask(mask) return mode & ~mask # Super directory utilities. # (Inspired by Eric Raymond; the doc strings are mostly his) def makedirs(name, mode=0o777, exist_ok=False): """makedirs(path [, mode=0o777][, exist_ok=False]) Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. If the target directory with the same mode as we specified already exists, raises an OSError if exist_ok is False, otherwise no exception is raised. This is recursive. """ head, tail = path.split(name) if not tail: head, tail = path.split(head) if head and tail and not path.exists(head): try: makedirs(head, mode, exist_ok) except OSError as e: # be happy if someone already created the path if e.errno != errno.EEXIST: raise cdir = curdir if isinstance(tail, bytes): cdir = bytes(curdir, 'ASCII') if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists return try: mkdir(name, mode) except OSError as e: dir_exists = path.isdir(name) expected_mode = _get_masked_mode(mode) if dir_exists: # S_ISGID is automatically copied by the OS from parent to child # directories on mkdir. Don't consider it being set to be a mode # mismatch as mkdir does not unset it when not specified in mode. actual_mode = st.S_IMODE(lstat(name).st_mode) & ~st.S_ISGID else: actual_mode = -1 if not (e.errno == errno.EEXIST and exist_ok and dir_exists and actual_mode == expected_mode): if dir_exists and actual_mode != expected_mode: e.strerror += ' (mode %o != expected mode %o)' % ( actual_mode, expected_mode) raise def removedirs(name): """removedirs(path) Super-rmdir; remove a leaf directory and all empty intermediate ones. Works like rmdir except that, if the leaf directory is successfully removed, directories corresponding to rightmost path segments will be pruned away until either the whole path is consumed or an error occurs. Errors during this latter phase are ignored -- they generally mean that a directory was not empty. """ rmdir(name) head, tail = path.split(name) if not tail: head, tail = path.split(head) while head and tail: try: rmdir(head) except error: break head, tail = path.split(head) def renames(old, new): """renames(old, new) Super-rename; create directories as necessary and delete any left empty. Works like rename, except creation of any intermediate directories needed to make the new pathname good is attempted first. After the rename, directories corresponding to rightmost path segments of the old name will be pruned way until either the whole path is consumed or a nonempty directory is found. Note: this function can fail with the new directory structure made if you lack permissions needed to unlink the leaf directory or file. """ head, tail = path.split(new) if head and tail and not path.exists(head): makedirs(head) rename(old, new) head, tail = path.split(old) if head and tail: try: removedirs(head) except error: pass __all__.extend(["makedirs", "removedirs", "renames"]) def walk(top, topdown=True, onerror=None, followlinks=False): """Directory tree generator. For each directory in the directory tree rooted at top (including top itself, but excluding '.' and '..'), yields a 3-tuple dirpath, dirnames, filenames dirpath is a string, the path to the directory. dirnames is a list of the names of the subdirectories in dirpath (excluding '.' and '..'). filenames is a list of the names of the non-directory files in dirpath. Note that the names in the lists are just names, with no path components. To get a full path (which begins with top) to a file or directory in dirpath, do os.path.join(dirpath, name). If optional arg 'topdown' is true or not specified, the triple for a directory is generated before the triples for any of its subdirectories (directories are generated top down). If topdown is false, the triple for a directory is generated after the triples for all of its subdirectories (directories are generated bottom up). When topdown is true, the caller can modify the dirnames list in-place (e.g., via del or slice assignment), and walk will only recurse into the subdirectories whose names remain in dirnames; this can be used to prune the search, or to impose a specific order of visiting. Modifying dirnames when topdown is false is ineffective, since the directories in dirnames have already been generated by the time dirnames itself is generated. By default errors from the os.listdir() call are ignored. If optional arg 'onerror' is specified, it should be a function; it will be called with one argument, an os.error instance. It can report the error to continue with the walk, or raise the exception to abort the walk. Note that the filename is available as the filename attribute of the exception object. By default, os.walk does not follow symbolic links to subdirectories on systems that support them. In order to get this functionality, set the optional argument 'followlinks' to true. Caution: if you pass a relative pathname for top, don't change the current working directory between resumptions of walk. walk never changes the current directory, and assumes that the client doesn't either. Example: import os from os.path import join, getsize for root, dirs, files in os.walk('python/Lib/email'): print(root, "consumes", end="") print(sum([getsize(join(root, name)) for name in files]), end="") print("bytes in", len(files), "non-directory files") if 'CVS' in dirs: dirs.remove('CVS') # don't visit CVS directories """ islink, join, isdir = path.islink, path.join, path.isdir # We may not have read permission for top, in which case we can't # get a list of the files the directory contains. os.walk # always suppressed the exception then, rather than blow up for a # minor reason when (say) a thousand readable directories are still # left to visit. That logic is copied here. try: # Note that listdir and error are globals in this module due # to earlier import-*. names = listdir(top) except error as err: if onerror is not None: onerror(err) return dirs, nondirs = [], [] for name in names: if isdir(join(top, name)): dirs.append(name) else: nondirs.append(name) if topdown: yield top, dirs, nondirs for name in dirs: new_path = join(top, name) if followlinks or not islink(new_path): yield from walk(new_path, topdown, onerror, followlinks) if not topdown: yield top, dirs, nondirs __all__.append("walk") if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd: def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): """Directory tree generator. This behaves exactly like walk(), except that it yields a 4-tuple dirpath, dirnames, filenames, dirfd `dirpath`, `dirnames` and `filenames` are identical to walk() output, and `dirfd` is a file descriptor referring to the directory `dirpath`. The advantage of fwalk() over walk() is that it's safe against symlink races (when follow_symlinks is False). If dir_fd is not None, it should be a file descriptor open to a directory, and top should be relative; top will then be relative to that directory. (dir_fd is always supported for fwalk.) Caution: Since fwalk() yields file descriptors, those are only valid until the next iteration step, so you should dup() them if you want to keep them for a longer period. Example: import os for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): print(root, "consumes", end="") print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]), end="") print("bytes in", len(files), "non-directory files") if 'CVS' in dirs: dirs.remove('CVS') # don't visit CVS directories """ # Note: To guard against symlink races, we use the standard # lstat()/open()/fstat() trick. orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd) topfd = open(top, O_RDONLY, dir_fd=dir_fd) try: if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and path.samestat(orig_st, stat(topfd)))): yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks) finally: close(topfd) def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks): # Note: This uses O(depth of the directory tree) file descriptors: if # necessary, it can be adapted to only require O(1) FDs, see issue # #13734. names = listdir(topfd) dirs, nondirs = [], [] for name in names: try: # Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with # walk() which reports symlinks to directories as directories. # We do however check for symlinks before recursing into # a subdirectory. if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode): dirs.append(name) else: nondirs.append(name) except FileNotFoundError: try: # Add dangling symlinks, ignore disappeared files if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False) .st_mode): nondirs.append(name) except FileNotFoundError: continue if topdown: yield toppath, dirs, nondirs, topfd for name in dirs: try: orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks) dirfd = open(name, O_RDONLY, dir_fd=topfd) except error as err: if onerror is not None: onerror(err) return try: if follow_symlinks or path.samestat(orig_st, stat(dirfd)): dirpath = path.join(toppath, name) yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks) finally: close(dirfd) if not topdown: yield toppath, dirs, nondirs, topfd __all__.append("fwalk") # Make sure os.environ exists, at least try: environ except NameError: environ = {} def execl(file, *args): """execl(file, *args) Execute the executable file with argument list args, replacing the current process. """ execv(file, args) def execle(file, *args): """execle(file, *args, env) Execute the executable file with argument list args and environment env, replacing the current process. """ env = args[-1] execve(file, args[:-1], env) def execlp(file, *args): """execlp(file, *args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. """ execvp(file, args) def execlpe(file, *args): """execlpe(file, *args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env, replacing the current process. """ env = args[-1] execvpe(file, args[:-1], env) def execvp(file, args): """execvp(file, args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args) def execvpe(file, args, env): """execvpe(file, args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env , replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args, env) __all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) def _execvpe(file, args, env=None): if env is not None: exec_func = execve argrest = (args, env) else: exec_func = execv argrest = (args,) env = environ head, tail = path.split(file) if head: exec_func(file, *argrest) return last_exc = saved_exc = None saved_tb = None path_list = get_exec_path(env) if name != 'nt': file = fsencode(file) path_list = map(fsencode, path_list) for dir in path_list: fullname = path.join(dir, file) try: exec_func(fullname, *argrest) except error as e: last_exc = e tb = sys.exc_info()[2] if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR and saved_exc is None): saved_exc = e saved_tb = tb if saved_exc: raise saved_exc.with_traceback(saved_tb) raise last_exc.with_traceback(tb) def get_exec_path(env=None): """Returns the sequence of directories that will be searched for the named executable (similar to a shell) when launching a process. *env* must be an environment variable dict or None. If *env* is None, os.environ will be used. """ # Use a local import instead of a global import to limit the number of # modules loaded at startup: the os module is always loaded at startup by # Python. It may also avoid a bootstrap issue. import warnings if env is None: env = environ # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a # BytesWarning when using python -b or python -bb: ignore the warning with warnings.catch_warnings(): warnings.simplefilter("ignore", BytesWarning) try: path_list = env.get('PATH') except TypeError: path_list = None if supports_bytes_environ: try: path_listb = env[b'PATH'] except (KeyError, TypeError): pass else: if path_list is not None: raise ValueError( "env cannot contain 'PATH' and b'PATH' keys") path_list = path_listb if path_list is not None and isinstance(path_list, bytes): path_list = fsdecode(path_list) if path_list is None: path_list = defpath return path_list.split(pathsep) # Change environ to automatically call putenv(), unsetenv if they exist. from collections.abc import MutableMapping class _Environ(MutableMapping): def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv): self.encodekey = encodekey self.decodekey = decodekey self.encodevalue = encodevalue self.decodevalue = decodevalue self.putenv = putenv self.unsetenv = unsetenv self._data = data def __getitem__(self, key): try: value = self._data[self.encodekey(key)] except KeyError: # raise KeyError with the original key value raise KeyError(key) from None return self.decodevalue(value) def __setitem__(self, key, value): key = self.encodekey(key) value = self.encodevalue(value) self.putenv(key, value) self._data[key] = value def __delitem__(self, key): encodedkey = self.encodekey(key) self.unsetenv(encodedkey) try: del self._data[encodedkey] except KeyError: # raise KeyError with the original key value raise KeyError(key) from None def __iter__(self): for key in self._data: yield self.decodekey(key) def __len__(self): return len(self._data) def __repr__(self): return 'environ({{{}}})'.format(', '.join( ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value)) for key, value in self._data.items()))) def copy(self): return dict(self) def setdefault(self, key, value): if key not in self: self[key] = value return self[key] try: _putenv = putenv except NameError: _putenv = lambda key, value: None else: __all__.append("putenv") try: _unsetenv = unsetenv except NameError: _unsetenv = lambda key: _putenv(key, "") else: __all__.append("unsetenv") def _createenviron(): if name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE def check_str(value): if not isinstance(value, str): raise TypeError("str expected, not %s" % type(value).__name__) return value encode = check_str decode = str def encodekey(key): return encode(key).upper() data = {} for key, value in environ.items(): data[encodekey(key)] = value else: # Where Env Var Names Can Be Mixed Case encoding = sys.getfilesystemencoding() def encode(value): if not isinstance(value, str): raise TypeError("str expected, not %s" % type(value).__name__) return value.encode(encoding, 'surrogateescape') def decode(value): return value.decode(encoding, 'surrogateescape') encodekey = encode data = environ return _Environ(data, encodekey, decode, encode, decode, _putenv, _unsetenv) # unicode environ environ = _createenviron() del _createenviron def getenv(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default. key, default and the result are str.""" return environ.get(key, default) supports_bytes_environ = name not in ('os2', 'nt') __all__.extend(("getenv", "supports_bytes_environ")) if supports_bytes_environ: def _check_bytes(value): if not isinstance(value, bytes): raise TypeError("bytes expected, not %s" % type(value).__name__) return value # bytes environ environb = _Environ(environ._data, _check_bytes, bytes, _check_bytes, bytes, _putenv, _unsetenv) del _check_bytes def getenvb(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default. key, default and the result are bytes.""" return environb.get(key, default) __all__.extend(("environb", "getenvb")) def _fscodec(): encoding = sys.getfilesystemencoding() if encoding == 'mbcs': errors = 'strict' else: errors = 'surrogateescape' def fsencode(filename): """ Encode filename to the filesystem encoding with 'surrogateescape' error handler, return bytes unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, bytes): return filename elif isinstance(filename, str): return filename.encode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): """ Decode filename from the filesystem encoding with 'surrogateescape' error handler, return str unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, str): return filename elif isinstance(filename, bytes): return filename.decode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) return fsencode, fsdecode fsencode, fsdecode = _fscodec() del _fscodec # Supply spawn*() (probably only for Unix) if _exists("fork") and not _exists("spawnv") and _exists("execv"): P_WAIT = 0 P_NOWAIT = P_NOWAITO = 1 __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) # XXX Should we support P_DETACH? I suppose it could fork()**2 # and close the std I/O streams. Also, P_OVERLAY is the same # as execv*()? def _spawnvef(mode, file, args, env, func): # Internal helper; func is the exec*() function to use pid = fork() if not pid: # Child try: if env is None: func(file, args) else: func(file, args, env) except: _exit(127) else: # Parent if mode == P_NOWAIT: return pid # Caller is responsible for waiting! while 1: wpid, sts = waitpid(pid, 0) if WIFSTOPPED(sts): continue elif WIFSIGNALED(sts): return -WTERMSIG(sts) elif WIFEXITED(sts): return WEXITSTATUS(sts) else: raise error("Not stopped, signaled or exited???") def spawnv(mode, file, args): """spawnv(mode, file, args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execv) def spawnve(mode, file, args, env): """spawnve(mode, file, args, env) -> integer Execute file with arguments from args in a subprocess with the specified environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execve) # Note: spawnvp[e] is't currently supported on Windows def spawnvp(mode, file, args): """spawnvp(mode, file, args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execvp) def spawnvpe(mode, file, args, env): """spawnvpe(mode, file, args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execvpe) if _exists("spawnv"): # These aren't supplied by the basic Windows code # but can be easily implemented in Python def spawnl(mode, file, *args): """spawnl(mode, file, *args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnv(mode, file, args) def spawnle(mode, file, *args): """spawnle(mode, file, *args, env) -> integer Execute file with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnve(mode, file, args[:-1], env) __all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",]) if _exists("spawnvp"): # At the moment, Windows doesn't implement spawnvp[e], # so it won't have spawnlp[e] either. def spawnlp(mode, file, *args): """spawnlp(mode, file, *args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnvp(mode, file, args) def spawnlpe(mode, file, *args): """spawnlpe(mode, file, *args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnvpe(mode, file, args[:-1], env) __all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",]) import copyreg as _copyreg def _make_stat_result(tup, dict): return stat_result(tup, dict) def _pickle_stat_result(sr): (type, args) = sr.__reduce__() return (_make_stat_result, args) try: _copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result) except NameError: # stat_result may not exist pass def _make_statvfs_result(tup, dict): return statvfs_result(tup, dict) def _pickle_statvfs_result(sr): (type, args) = sr.__reduce__() return (_make_statvfs_result, args) try: _copyreg.pickle(statvfs_result, _pickle_statvfs_result, _make_statvfs_result) except NameError: # statvfs_result may not exist pass # Supply os.popen() def popen(cmd, mode="r", buffering=-1): if not isinstance(cmd, str): raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) if mode not in ("r", "w"): raise ValueError("invalid mode %r" % mode) if buffering == 0 or buffering is None: raise ValueError("popen() does not support unbuffered streams") import subprocess, io if mode == "r": proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, bufsize=buffering) return _wrap_close(io.TextIOWrapper(proc.stdout), proc) else: proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, bufsize=buffering) return _wrap_close(io.TextIOWrapper(proc.stdin), proc) # Helper for popen() -- a proxy for a file whose close waits for the process class _wrap_close: def __init__(self, stream, proc): self._stream = stream self._proc = proc def close(self): self._stream.close() returncode = self._proc.wait() if returncode == 0: return None if name == 'nt': return returncode else: return returncode << 8 # Shift left to match old behavior def __enter__(self): return self def __exit__(self, *args): self.close() def __getattr__(self, name): return getattr(self._stream, name) def __iter__(self): return iter(self._stream) # Supply os.fdopen() def fdopen(fd, *args, **kwargs): if not isinstance(fd, int): raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) import io return io.open(fd, *args, **kwargs)
gpl-2.0
sn1k/app_mundial
lib/python2.7/site-packages/django/contrib/admindocs/views.py
10
15480
from importlib import import_module import inspect import os import re import warnings from django import template from django.apps import apps from django.conf import settings from django.contrib import admin from django.contrib.admin.views.decorators import staff_member_required from django.db import models from django.core.exceptions import ViewDoesNotExist from django.http import Http404 from django.core import urlresolvers from django.contrib.admindocs import utils from django.utils.decorators import method_decorator from django.utils.deprecation import RemovedInDjango18Warning from django.utils._os import upath from django.utils import six from django.utils.translation import ugettext as _ from django.views.generic import TemplateView # Exclude methods starting with these strings from documentation MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_') if getattr(settings, 'ADMIN_FOR', None): warnings.warn('The ADMIN_FOR setting has been removed, you can remove ' 'this setting from your configuration.', RemovedInDjango18Warning, stacklevel=2) class BaseAdminDocsView(TemplateView): """ Base view for admindocs views. """ @method_decorator(staff_member_required) def dispatch(self, *args, **kwargs): if not utils.docutils_is_available: # Display an error message for people without docutils self.template_name = 'admin_doc/missing_docutils.html' return self.render_to_response(admin.site.each_context()) return super(BaseAdminDocsView, self).dispatch(*args, **kwargs) def get_context_data(self, **kwargs): kwargs.update({'root_path': urlresolvers.reverse('admin:index')}) kwargs.update(admin.site.each_context()) return super(BaseAdminDocsView, self).get_context_data(**kwargs) class BookmarkletsView(BaseAdminDocsView): template_name = 'admin_doc/bookmarklets.html' def get_context_data(self, **kwargs): context = super(BookmarkletsView, self).get_context_data(**kwargs) context.update({ 'admin_url': "%s://%s%s" % ( self.request.scheme, self.request.get_host(), context['root_path']) }) return context class TemplateTagIndexView(BaseAdminDocsView): template_name = 'admin_doc/template_tag_index.html' def get_context_data(self, **kwargs): load_all_installed_template_libraries() tags = [] app_libs = list(six.iteritems(template.libraries)) builtin_libs = [(None, lib) for lib in template.builtins] for module_name, library in builtin_libs + app_libs: for tag_name, tag_func in library.tags.items(): title, body, metadata = utils.parse_docstring(tag_func.__doc__) if title: title = utils.parse_rst(title, 'tag', _('tag:') + tag_name) if body: body = utils.parse_rst(body, 'tag', _('tag:') + tag_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name) if library in template.builtins: tag_library = '' else: tag_library = module_name.split('.')[-1] tags.append({ 'name': tag_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) kwargs.update({'tags': tags}) return super(TemplateTagIndexView, self).get_context_data(**kwargs) class TemplateFilterIndexView(BaseAdminDocsView): template_name = 'admin_doc/template_filter_index.html' def get_context_data(self, **kwargs): load_all_installed_template_libraries() filters = [] app_libs = list(six.iteritems(template.libraries)) builtin_libs = [(None, lib) for lib in template.builtins] for module_name, library in builtin_libs + app_libs: for filter_name, filter_func in library.filters.items(): title, body, metadata = utils.parse_docstring(filter_func.__doc__) if title: title = utils.parse_rst(title, 'filter', _('filter:') + filter_name) if body: body = utils.parse_rst(body, 'filter', _('filter:') + filter_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name) if library in template.builtins: tag_library = '' else: tag_library = module_name.split('.')[-1] filters.append({ 'name': filter_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) kwargs.update({'filters': filters}) return super(TemplateFilterIndexView, self).get_context_data(**kwargs) class ViewIndexView(BaseAdminDocsView): template_name = 'admin_doc/view_index.html' def get_context_data(self, **kwargs): views = [] urlconf = import_module(settings.ROOT_URLCONF) view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns) for (func, regex, namespace, name) in view_functions: views.append({ 'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)), 'url': simplify_regex(regex), 'url_name': ':'.join((namespace or []) + (name and [name] or [])), 'namespace': ':'.join((namespace or [])), 'name': name, }) kwargs.update({'views': views}) return super(ViewIndexView, self).get_context_data(**kwargs) class ViewDetailView(BaseAdminDocsView): template_name = 'admin_doc/view_detail.html' def get_context_data(self, **kwargs): view = self.kwargs['view'] mod, func = urlresolvers.get_mod_func(view) try: view_func = getattr(import_module(mod), func) except (ImportError, AttributeError): raise Http404 title, body, metadata = utils.parse_docstring(view_func.__doc__) if title: title = utils.parse_rst(title, 'view', _('view:') + view) if body: body = utils.parse_rst(body, 'view', _('view:') + view) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view) kwargs.update({ 'name': view, 'summary': title, 'body': body, 'meta': metadata, }) return super(ViewDetailView, self).get_context_data(**kwargs) class ModelIndexView(BaseAdminDocsView): template_name = 'admin_doc/model_index.html' def get_context_data(self, **kwargs): m_list = [m._meta for m in apps.get_models()] kwargs.update({'models': m_list}) return super(ModelIndexView, self).get_context_data(**kwargs) class ModelDetailView(BaseAdminDocsView): template_name = 'admin_doc/model_detail.html' def get_context_data(self, **kwargs): # Get the model class. try: app_config = apps.get_app_config(self.kwargs['app_label']) except LookupError: raise Http404(_("App %(app_label)r not found") % self.kwargs) try: model = app_config.get_model(self.kwargs['model_name']) except LookupError: raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs) opts = model._meta # Gather fields/field descriptions. fields = [] for field in opts.fields: # ForeignKey is a special case since the field will actually be a # descriptor that returns the other object if isinstance(field, models.ForeignKey): data_type = field.rel.to.__name__ app_label = field.rel.to._meta.app_label verbose = utils.parse_rst( (_("the related `%(app_label)s.%(data_type)s` object") % { 'app_label': app_label, 'data_type': data_type, }), 'model', _('model:') + data_type, ) else: data_type = get_readable_field_data_type(field) verbose = field.verbose_name fields.append({ 'name': field.name, 'data_type': data_type, 'verbose': verbose, 'help_text': field.help_text, }) # Gather many-to-many fields. for field in opts.many_to_many: data_type = field.rel.to.__name__ app_label = field.rel.to._meta.app_label verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type} fields.append({ 'name': "%s.all" % field.name, "data_type": 'List', 'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name), }) fields.append({ 'name': "%s.count" % field.name, 'data_type': 'Integer', 'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name), }) # Gather model methods. for func_name, func in model.__dict__.items(): if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1): try: for exclude in MODEL_METHODS_EXCLUDE: if func_name.startswith(exclude): raise StopIteration except StopIteration: continue verbose = func.__doc__ if verbose: verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name) fields.append({ 'name': func_name, 'data_type': get_return_data_type(func_name), 'verbose': verbose, }) # Gather related objects for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects(): verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name} accessor = rel.get_accessor_name() fields.append({ 'name': "%s.all" % accessor, 'data_type': 'List', 'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name), }) fields.append({ 'name': "%s.count" % accessor, 'data_type': 'Integer', 'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name), }) kwargs.update({ 'name': '%s.%s' % (opts.app_label, opts.object_name), # Translators: %s is an object type name 'summary': _("Attributes on %s objects") % opts.object_name, 'description': model.__doc__, 'fields': fields, }) return super(ModelDetailView, self).get_context_data(**kwargs) class TemplateDetailView(BaseAdminDocsView): template_name = 'admin_doc/template_detail.html' def get_context_data(self, **kwargs): template = self.kwargs['template'] templates = [] for dir in settings.TEMPLATE_DIRS: template_file = os.path.join(dir, template) templates.append({ 'file': template_file, 'exists': os.path.exists(template_file), 'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '', 'order': list(settings.TEMPLATE_DIRS).index(dir), }) kwargs.update({ 'name': template, 'templates': templates, }) return super(TemplateDetailView, self).get_context_data(**kwargs) #################### # Helper functions # #################### def load_all_installed_template_libraries(): # Load/register all template tag libraries from installed apps. for module_name in template.get_templatetags_modules(): mod = import_module(module_name) try: libraries = [ os.path.splitext(p)[0] for p in os.listdir(os.path.dirname(upath(mod.__file__))) if p.endswith('.py') and p[0].isalpha() ] except OSError: libraries = [] for library_name in libraries: try: template.get_library(library_name) except template.InvalidTemplateLibrary: pass def get_return_data_type(func_name): """Return a somewhat-helpful data type given a function name""" if func_name.startswith('get_'): if func_name.endswith('_list'): return 'List' elif func_name.endswith('_count'): return 'Integer' return '' def get_readable_field_data_type(field): """Returns the description for a given field type, if it exists, Fields' descriptions can contain format strings, which will be interpolated against the values of field.__dict__ before being output.""" return field.description % field.__dict__ def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a two-tuple: (view_func, regex) """ views = [] for p in urlpatterns: if hasattr(p, 'url_patterns'): try: patterns = p.url_patterns except ImportError: continue views.extend(extract_views_from_urlpatterns( patterns, base + p.regex.pattern, (namespace or []) + (p.namespace and [p.namespace] or []) )) elif hasattr(p, 'callback'): try: views.append((p.callback, base + p.regex.pattern, namespace, p.name)) except ViewDoesNotExist: continue else: raise TypeError(_("%s does not appear to be a urlpattern object") % p) return views named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)') non_named_group_matcher = re.compile(r'\(.*?\)') def simplify_regex(pattern): """ Clean up urlpattern regexes into something somewhat readable by Mere Humans: turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$" into "<sport_slug>/athletes/<athlete_slug>/" """ # handle named groups first pattern = named_group_matcher.sub(lambda m: m.group(1), pattern) # handle non-named groups pattern = non_named_group_matcher.sub("<var>", pattern) # clean up any outstanding regex-y characters. pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '') if not pattern.startswith('/'): pattern = '/' + pattern return pattern
gpl-2.0
alsrgv/tensorflow
tensorflow/contrib/learn/python/learn/export_strategy.py
42
4759
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ExportStrategy class represents different flavors of model export (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.util import tf_inspect from tensorflow.python.util.deprecation import deprecated __all__ = ['ExportStrategy'] class ExportStrategy( collections.namedtuple('ExportStrategy', ['name', 'export_fn', 'strip_default_attrs'])): """A class representing a type of model export. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Typically constructed by a utility function specific to the exporter, such as `saved_model_export_utils.make_export_strategy()`. Attributes: name: The directory name under the export base directory where exports of this type will be written. export_fn: A function that writes an export, given an estimator, a destination path, and optionally a checkpoint path and an evaluation result for that checkpoint. This export_fn() may be run repeatedly during continuous training, or just once at the end of fixed-length training. Note the export_fn() may choose whether or not to export based on the eval result or based on an internal timer or any other criterion, if exports are not desired for every checkpoint. The signature of this function must be one of: * `(estimator, export_path) -> export_path` * `(estimator, export_path, checkpoint_path) -> export_path` * `(estimator, export_path, checkpoint_path, eval_result) -> export_path` * `(estimator, export_path, checkpoint_path, eval_result, strip_default_attrs) -> export_path` strip_default_attrs: (Optional) Boolean. If set as True, default attrs in the `GraphDef` will be stripped on write. This is recommended for better forward compatibility of the resulting `SavedModel`. """ @deprecated(None, 'Please switch to tf.estimator.train_and_evaluate, and use ' 'tf.estimator.Exporter.') def __new__(cls, name, export_fn, strip_default_attrs=None): return super(ExportStrategy, cls).__new__( cls, name, export_fn, strip_default_attrs) def export(self, estimator, export_path, checkpoint_path=None, eval_result=None): """Exports the given Estimator to a specific format. Args: estimator: the Estimator to export. export_path: A string containing a directory where to write the export. checkpoint_path: The checkpoint path to export. If None (the default), the strategy may locate a checkpoint (e.g. the most recent) by itself. eval_result: The output of Estimator.evaluate on this checkpoint. This should be set only if checkpoint_path is provided (otherwise it is unclear which checkpoint this eval refers to). Returns: The string path to the exported directory. Raises: ValueError: if the export_fn does not have the required signature """ # don't break existing export_fns that don't accept checkpoint_path and # eval_result export_fn_args = tf_inspect.getargspec(self.export_fn).args kwargs = {} if 'checkpoint_path' in export_fn_args: kwargs['checkpoint_path'] = checkpoint_path if 'eval_result' in export_fn_args: if 'checkpoint_path' not in export_fn_args: raise ValueError('An export_fn accepting eval_result must also accept ' 'checkpoint_path.') kwargs['eval_result'] = eval_result if 'strip_default_attrs' in export_fn_args: kwargs['strip_default_attrs'] = self.strip_default_attrs return self.export_fn(estimator, export_path, **kwargs)
apache-2.0
adelq/TextBlob
tests/test_sentiments.py
13
1884
from __future__ import unicode_literals import unittest from nose.tools import * # PEP8 asserts from nose.plugins.attrib import attr from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS class TestPatternSentiment(unittest.TestCase): def setUp(self): self.analyzer = PatternAnalyzer() def test_kind(self): assert_equal(self.analyzer.kind, CONTINUOUS) def test_analyze(self): p1 = "I feel great this morning." n1 = "This is a terrible car." p1_result = self.analyzer.analyze(p1) n1_result = self.analyzer.analyze(n1) assert_true(p1_result[0] > 0) assert_true(n1_result[0] < 0) assert_equal(p1_result.polarity, p1_result[0]) assert_equal(p1_result.subjectivity, p1_result[1]) class TestNaiveBayesAnalyzer(unittest.TestCase): def setUp(self): self.analyzer = NaiveBayesAnalyzer() def test_kind(self): assert_equal(self.analyzer.kind, DISCRETE) @attr('slow') def test_analyze(self): p1 = 'I feel great this morning.' n1 = 'This is a terrible car.' p1_result = self.analyzer.analyze(p1) assert_equal(p1_result[0], 'pos') assert_equal(self.analyzer.analyze(n1)[0], 'neg') # The 2nd item should be the probability that it is positive assert_true(isinstance(p1_result[1], float)) # 3rd item is probability that it is negative assert_true(isinstance(p1_result[2], float)) assert_about_equal(p1_result[1] + p1_result[2], 1) assert_equal(p1_result.classification, p1_result[0]) assert_equal(p1_result.p_pos, p1_result[1]) assert_equal(p1_result.p_neg, p1_result[2]) def assert_about_equal(first, second, places=4): return assert_equal(round(first, places), second) if __name__ == '__main__': unittest.main()
mit
bernard357/shellbot
examples/audit.py
1
4144
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audit interactions in real-time In this example we create a shell with one simple command: audit - command: audit - provides clear status if this channel is currently audited or not - command: audit on - starts auditing - command: audit off - ensure private interactions for some time Multiple questions are adressed in this example: - How to audit chat channels? There are mechanisms built in shellbot by which you can received updates from the chat space and forward these to a safe place. For this you will need: an audit token that can receive updates, a shell command for starting and stopping the audit, and a component to handle updates, namely, an updater. All these are featured below. - What can be done with audited updates? The module ``shellbot.updaters`` offers standard solutions: write to log files, index updates in ELK, or events reflect updates in a sister channel. We expect that more updaters will be developed over time by the community. And of course, you can built your own. - How to customize the handling of audited updates? Shellbot just invoke the member function ``put()`` from every updater. There are really no other constraints. Shellbot creates one updater instance per bot that it manages. This is delegated to the updater factory that is provided on engine initialisation. To run this script you have to provide a custom configuration, or set environment variables instead:: - ``CHANNEL_DEFAULT_PARTICIPANTS`` - Mention at least your e-mail address - ``CISCO_SPARK_BOT_TOKEN`` - Received from Cisco Spark on bot registration - ``CISCO_SPARK_AUDIT_TOKEN`` - The Cisco Spark token used for audit - ``SERVER_URL`` - Public link used by Cisco Spark to reach your server The bot token is specific to your run-time, please visit Cisco Spark for Developers to get more details: https://developer.ciscospark.com/ The other token should be associated to a human being, and not to a bot. This is required so that the software can receive all events for a chat space. Without it, only messages sent to the bot will be audited. For example, if you run this script under Linux or macOs with support from ngrok for exposing services to the Internet:: export CHANNEL_DEFAULT_PARTICIPANTS="alice@acme.com" export CISCO_SPARK_BOT_TOKEN="<token id from Cisco Spark for bot>" export CISCO_SPARK_AUDIT_TOKEN="<token id from Cisco Spark for audit>" export SERVER_URL="http://1a107f21.ngrok.io" python audit.py """ import logging from multiprocessing import Process, Queue import os from shellbot import Engine, Context from shellbot.updaters import FileUpdater class UpdaterFactory(object): # create one updater per group channel def get_updater(self, id): return FileUpdater(path='./updater-{}.log'.format(id)) if __name__ == '__main__': Context.set_logger() engine = Engine( # use Cisco Spark and setup audit environment type='spark', command='shellbot.commands.audit', updater_factory=UpdaterFactory()) os.environ['CHAT_ROOM_TITLE'] = 'Audit tutorial' engine.configure() # ensure all components are ready engine.bond(reset=True) # create a group channel for this example engine.run() # until Ctl-C engine.dispose() # delete the initial group channel
apache-2.0
minhphung171093/GreenERP_V7
openerp/addons/web/tests/test_serving_base.py
138
1031
# -*- coding: utf-8 -*- import random import unittest2 from ..controllers.main import module_topological_sort as sort def sample(population): return random.sample( population, random.randint(0, min(len(population), 5))) class TestModulesLoading(unittest2.TestCase): def setUp(self): self.mods = map(str, range(1000)) def test_topological_sort(self): random.shuffle(self.mods) modules = [ (k, sample(self.mods[:i])) for i, k in enumerate(self.mods)] random.shuffle(modules) ms = dict(modules) seen = set() sorted_modules = sort(ms) for module in sorted_modules: deps = ms[module] self.assertGreaterEqual( seen, set(deps), 'Module %s (index %d), ' \ 'missing dependencies %s from loaded modules %s' % ( module, sorted_modules.index(module), deps, seen )) seen.add(module)
agpl-3.0
soumyajitpaul/Soumyajit-Github-Byte-3
lib/werkzeug/datastructures.py
122
87447
# -*- coding: utf-8 -*- """ werkzeug.datastructures ~~~~~~~~~~~~~~~~~~~~~~~ This module provides mixins and classes with an immutable interface. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import codecs import mimetypes from copy import deepcopy from itertools import repeat from werkzeug._internal import _missing, _empty_stream from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \ PY2, text_type, integer_types, string_types, make_literal_wrapper, \ to_native from werkzeug.filesystem import get_filesystem_encoding _locale_delim_re = re.compile(r'[_-]') def is_immutable(self): raise TypeError('%r objects are immutable' % self.__class__.__name__) def iter_multi_items(mapping): """Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures. """ if isinstance(mapping, MultiDict): for item in iteritems(mapping, multi=True): yield item elif isinstance(mapping, dict): for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): for value in value: yield key, value else: yield key, value else: for item in mapping: yield item def native_itermethods(names): if not PY2: return lambda x: x def setmethod(cls, name): itermethod = getattr(cls, name) setattr(cls, 'iter%s' % name, itermethod) listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw)) listmethod.__doc__ = \ 'Like :py:meth:`iter%s`, but returns a list.' % name setattr(cls, name, listmethod) def wrap(cls): for name in names: setmethod(cls, name) return cls return wrap class ImmutableListMixin(object): """Makes a :class:`list` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(tuple(self)) return rv def __reduce_ex__(self, protocol): return type(self), (list(self),) def __delitem__(self, key): is_immutable(self) def __delslice__(self, i, j): is_immutable(self) def __iadd__(self, other): is_immutable(self) __imul__ = __iadd__ def __setitem__(self, key, value): is_immutable(self) def __setslice__(self, i, j, value): is_immutable(self) def append(self, item): is_immutable(self) remove = append def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def reverse(self): is_immutable(self) def sort(self, cmp=None, key=None, reverse=None): is_immutable(self) class ImmutableList(ImmutableListMixin, list): """An immutable :class:`list`. .. versionadded:: 0.5 :private: """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, list.__repr__(self), ) class ImmutableDictMixin(object): """Makes a :class:`dict` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None @classmethod def fromkeys(cls, keys, value=None): instance = super(cls, cls).__new__(cls) instance.__init__(zip(keys, repeat(value))) return instance def __reduce_ex__(self, protocol): return type(self), (dict(self),) def _iter_hashitems(self): return iteritems(self) def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) return rv def setdefault(self, key, default=None): is_immutable(self) def update(self, *args, **kwargs): is_immutable(self) def pop(self, key, default=None): is_immutable(self) def popitem(self): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) def __delitem__(self, key): is_immutable(self) def clear(self): is_immutable(self) class ImmutableMultiDictMixin(ImmutableDictMixin): """Makes a :class:`MultiDict` immutable. .. versionadded:: 0.5 :private: """ def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def _iter_hashitems(self): return iteritems(self, multi=True) def add(self, key, value): is_immutable(self) def popitemlist(self): is_immutable(self) def poplist(self, key): is_immutable(self) def setlist(self, key, new_list): is_immutable(self) def setlistdefault(self, key, default_list=None): is_immutable(self) class UpdateDictMixin(object): """Makes dicts call `self.on_update` on modifications. .. versionadded:: 0.5 :private: """ on_update = None def calls_update(name): def oncall(self, *args, **kw): rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) if self.on_update is not None: self.on_update(self) return rv oncall.__name__ = name return oncall def setdefault(self, key, default=None): modified = key not in self rv = super(UpdateDictMixin, self).setdefault(key, default) if modified and self.on_update is not None: self.on_update(self) return rv def pop(self, key, default=_missing): modified = key in self if default is _missing: rv = super(UpdateDictMixin, self).pop(key) else: rv = super(UpdateDictMixin, self).pop(key, default) if modified and self.on_update is not None: self.on_update(self) return rv __setitem__ = calls_update('__setitem__') __delitem__ = calls_update('__delitem__') clear = calls_update('clear') popitem = calls_update('popitem') update = calls_update('update') del calls_update class TypeConversionDict(dict): """Works like a regular dict but the :meth:`get` method can perform type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` are subclasses of this class and provide the same feature. .. versionadded:: 0.5 """ def get(self, key, default=None, type=None): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = TypeConversionDict(foo='42', bar='blub') >>> d.get('foo', type=int) 42 >>> d.get('bar', -1, type=int) -1 :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the default value is returned. """ try: rv = self[key] if type is not None: rv = type(rv) except (KeyError, ValueError): rv = default return rv class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): """Works like a :class:`TypeConversionDict` but does not support modifications. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return TypeConversionDict(self) def __copy__(self): return self @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class MultiDict(TypeConversionDict): """A :class:`MultiDict` is a dictionary subclass customized to deal with multiple values for the same key which is for example used by the parsing functions in the wrappers. This is necessary because some HTML form elements pass multiple values for the same key. :class:`MultiDict` implements all standard dictionary methods. Internally, it saves all values for a key as a list, but the standard dict access methods will only return the first value for a key. If you want to gain access to the other values, too, you have to use the `list` methods as explained below. Basic Usage: >>> d = MultiDict([('a', 'b'), ('a', 'c')]) >>> d MultiDict([('a', 'b'), ('a', 'c')]) >>> d['a'] 'b' >>> d.getlist('a') ['b', 'c'] >>> 'a' in d True It behaves like a normal dict thus all dict functions will only return the first value when multiple values for one key are found. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. A :class:`MultiDict` can be constructed from an iterable of ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 onwards some keyword parameters. :param mapping: the initial value for the :class:`MultiDict`. Either a regular dict, an iterable of ``(key, value)`` tuples or `None`. """ def __init__(self, mapping=None): if isinstance(mapping, MultiDict): dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping))) elif isinstance(mapping, dict): tmp = {} for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): value = list(value) else: value = [value] tmp[key] = value dict.__init__(self, tmp) else: tmp = {} for key, value in mapping or (): tmp.setdefault(key, []).append(value) dict.__init__(self, tmp) def __getstate__(self): return dict(self.lists()) def __setstate__(self, value): dict.clear(self) dict.update(self, value) def __getitem__(self, key): """Return the first data value for this key; raises KeyError if not found. :param key: The key to be looked up. :raise KeyError: if the key does not exist. """ if key in self: return dict.__getitem__(self, key)[0] raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): """Like :meth:`add` but removes an existing key first. :param key: the key for the value. :param value: the value to set. """ dict.__setitem__(self, key, [value]) def add(self, key, value): """Adds a new value for the key. .. versionadded:: 0.6 :param key: the key for the value. :param value: the value to add. """ dict.setdefault(self, key, []).append(value) def getlist(self, key, type=None): """Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return list(rv) result = [] for item in rv: try: result.append(type(item)) except ValueError: pass return result def setlist(self, key, new_list): """Remove the old values for a key and add new ones. Note that the list you pass the values in will be shallow-copied before it is inserted in the dictionary. >>> d = MultiDict() >>> d.setlist('foo', ['1', '2']) >>> d['foo'] '1' >>> d.getlist('foo') ['1', '2'] :param key: The key for which the values are set. :param new_list: An iterable with the new values for the key. Old values are removed first. """ dict.__setitem__(self, key, list(new_list)) def setdefault(self, key, default=None): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key not in self: self[key] = default else: default = self[key] return default def setlistdefault(self, key, default_list=None): """Like `setdefault` but sets multiple values. The list returned is not a copy, but the list that is actually used internally. This means that you can put new values into the dict by appending items to the list: >>> d = MultiDict({"foo": 1}) >>> d.setlistdefault("foo").extend([2, 3]) >>> d.getlist("foo") [1, 2, 3] :param key: The key to be looked up. :param default: An iterable of default values. It is either copied (in case it was a list) or converted into a list before returned. :return: a :class:`list` """ if key not in self: default_list = list(default_list or ()) dict.__setitem__(self, key, default_list) else: default_list = dict.__getitem__(self, key) return default_list def items(self, multi=False): """Return an iterator of ``(key, value)`` pairs. :param multi: If set to `True` the iterator returned will have a pair for each value of each key. Otherwise it will only contain pairs for the first value of each key. """ for key, values in iteritems(dict, self): if multi: for value in values: yield key, value else: yield key, values[0] def lists(self): """Return a list of ``(key, values)`` pairs, where values is the list of all values associated with the key.""" for key, values in iteritems(dict, self): yield key, list(values) def keys(self): return iterkeys(dict, self) __iter__ = keys def values(self): """Returns an iterator of the first value on every key's value list.""" for values in itervalues(dict, self): yield values[0] def listvalues(self): """Return an iterator of all values associated with a key. Zipping :meth:`keys` and this is the same as calling :meth:`lists`: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> zip(d.keys(), d.listvalues()) == d.lists() True """ return itervalues(dict, self) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self) def deepcopy(self, memo=None): """Return a deep copy of this object.""" return self.__class__(deepcopy(self.to_dict(flat=False), memo)) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first value for each key. :return: a :class:`dict` """ if flat: return dict(iteritems(self)) return dict(self.lists()) def update(self, other_dict): """update() extends rather than replaces existing key lists: >>> a = MultiDict({'x': 1}) >>> b = MultiDict({'x': 2, 'y': 3}) >>> a.update(b) >>> a MultiDict([('y', 3), ('x', 1), ('x', 2)]) If the value list for a key in ``other_dict`` is empty, no new values will be added to the dict and the key will not be created: >>> x = {'empty_list': []} >>> y = MultiDict() >>> y.update(x) >>> y MultiDict([]) """ for key, value in iter_multi_items(other_dict): MultiDict.add(self, key, value) def pop(self, key, default=_missing): """Pop the first item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. """ try: return dict.pop(self, key)[0] except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) def popitem(self): """Pop an item from the dict.""" try: item = dict.popitem(self) return (item[0], item[1][0]) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def poplist(self, key): """Pop the list for a key from the dict. If the key is not in the dict an empty list is returned. .. versionchanged:: 0.5 If the key does no longer exist a list is returned instead of raising an error. """ return dict.pop(self, key, []) def popitemlist(self): """Pop a ``(key, list)`` tuple from the dict.""" try: return dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.deepcopy(memo=memo) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True))) class _omd_bucket(object): """Wraps values in the :class:`OrderedMultiDict`. This makes it possible to keep an order over multiple different keys. It requires a lot of extra memory and slows down access a lot, but makes it possible to access elements in O(1) and iterate in O(n). """ __slots__ = ('prev', 'key', 'value', 'next') def __init__(self, omd, key, value): self.prev = omd._last_bucket self.key = key self.value = value self.next = None if omd._first_bucket is None: omd._first_bucket = self if omd._last_bucket is not None: omd._last_bucket.next = self omd._last_bucket = self def unlink(self, omd): if self.prev: self.prev.next = self.next if self.next: self.next.prev = self.prev if omd._first_bucket is self: omd._first_bucket = self.next if omd._last_bucket is self: omd._last_bucket = self.prev @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class OrderedMultiDict(MultiDict): """Works like a regular :class:`MultiDict` but preserves the order of the fields. To convert the ordered multi dict into a list you can use the :meth:`items` method and pass it ``multi=True``. In general an :class:`OrderedMultiDict` is an order of magnitude slower than a :class:`MultiDict`. .. admonition:: note Due to a limitation in Python you cannot convert an ordered multi dict into a regular dict by using ``dict(multidict)``. Instead you have to use the :meth:`to_dict` method, otherwise the internal bucket objects are exposed. """ def __init__(self, mapping=None): dict.__init__(self) self._first_bucket = self._last_bucket = None if mapping is not None: OrderedMultiDict.update(self, mapping) def __eq__(self, other): if not isinstance(other, MultiDict): return NotImplemented if isinstance(other, OrderedMultiDict): iter1 = iteritems(self, multi=True) iter2 = iteritems(other, multi=True) try: for k1, v1 in iter1: k2, v2 = next(iter2) if k1 != k2 or v1 != v2: return False except StopIteration: return False try: next(iter2) except StopIteration: return True return False if len(self) != len(other): return False for key, values in iterlists(self): if other.getlist(key) != values: return False return True def __ne__(self, other): return not self.__eq__(other) def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def __getstate__(self): return list(iteritems(self, multi=True)) def __setstate__(self, values): dict.clear(self) for key, value in values: self.add(key, value) def __getitem__(self, key): if key in self: return dict.__getitem__(self, key)[0].value raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): self.poplist(key) self.add(key, value) def __delitem__(self, key): self.pop(key) def keys(self): return (key for key, value in iteritems(self)) __iter__ = keys def values(self): return (value for key, value in iteritems(self)) def items(self, multi=False): ptr = self._first_bucket if multi: while ptr is not None: yield ptr.key, ptr.value ptr = ptr.next else: returned_keys = set() while ptr is not None: if ptr.key not in returned_keys: returned_keys.add(ptr.key) yield ptr.key, ptr.value ptr = ptr.next def lists(self): returned_keys = set() ptr = self._first_bucket while ptr is not None: if ptr.key not in returned_keys: yield ptr.key, self.getlist(ptr.key) returned_keys.add(ptr.key) ptr = ptr.next def listvalues(self): for key, values in iterlists(self): yield values def add(self, key, value): dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) def getlist(self, key, type=None): try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return [x.value for x in rv] result = [] for item in rv: try: result.append(type(item.value)) except ValueError: pass return result def setlist(self, key, new_list): self.poplist(key) for value in new_list: self.add(key, value) def setlistdefault(self, key, default_list=None): raise TypeError('setlistdefault is unsupported for ' 'ordered multi dicts') def update(self, mapping): for key, value in iter_multi_items(mapping): OrderedMultiDict.add(self, key, value) def poplist(self, key): buckets = dict.pop(self, key, ()) for bucket in buckets: bucket.unlink(self) return [x.value for x in buckets] def pop(self, key, default=_missing): try: buckets = dict.pop(self, key) except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return buckets[0].value def popitem(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, buckets[0].value def popitemlist(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, [x.value for x in buckets] def _options_header_vkw(value, kw): return dump_options_header(value, dict((k.replace('_', '-'), v) for k, v in kw.items())) def _unicodify_header_value(value): if isinstance(value, bytes): value = value.decode('latin-1') if not isinstance(value, text_type): value = text_type(value) return value @native_itermethods(['keys', 'values', 'items']) class Headers(object): """An object that stores some headers. It has a dict-like interface but is ordered and can store the same keys multiple times. This data structure is useful if you want a nicer way to handle WSGI headers which are stored as tuples in a list. From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is also a subclass of the :class:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` class, with the exception of `__getitem__`. :mod:`wsgiref` will return `None` for ``headers['missing']``, whereas :class:`Headers` will raise a :class:`KeyError`. To create a new :class:`Headers` object pass it a list or dict of headers which are used as default values. This does not reuse the list passed to the constructor for internal usage. :param defaults: The list of default values for the :class:`Headers`. .. versionchanged:: 0.9 This data structure now stores unicode values similar to how the multi dicts do it. The main difference is that bytes can be set as well which will automatically be latin1 decoded. .. versionchanged:: 0.9 The :meth:`linked` function was removed without replacement as it was an API that does not support the changes to the encoding model. """ def __init__(self, defaults=None): self._list = [] if defaults is not None: if isinstance(defaults, (list, Headers)): self._list.extend(defaults) else: self.extend(defaults) def __getitem__(self, key, _get_mode=False): if not _get_mode: if isinstance(key, integer_types): return self._list[key] elif isinstance(key, slice): return self.__class__(self._list[key]) if not isinstance(key, string_types): raise exceptions.BadRequestKeyError(key) ikey = key.lower() for k, v in self._list: if k.lower() == ikey: return v # micro optimization: if we are in get mode we will catch that # exception one stack level down so we can raise a standard # key error instead of our special one. if _get_mode: raise KeyError() raise exceptions.BadRequestKeyError(key) def __eq__(self, other): return other.__class__ is self.__class__ and \ set(other._list) == set(self._list) def __ne__(self, other): return not self.__eq__(other) def get(self, key, default=None, type=None, as_bytes=False): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = Headers([('Content-Length', '42')]) >>> d.get('Content-Length', type=int) 42 If a headers object is bound you must not add unicode strings because no encoding takes place. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the default value is returned. :param as_bytes: return bytes instead of unicode strings. """ try: rv = self.__getitem__(key, _get_mode=True) except KeyError: return default if as_bytes: rv = rv.encode('latin1') if type is None: return rv try: return type(rv) except ValueError: return default def getlist(self, key, type=None, as_bytes=False): """Return the list of items for a given key. If that key is not in the :class:`Headers`, the return value will be an empty list. Just as :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will be converted with the callable defined there. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. :param as_bytes: return bytes instead of unicode strings. """ ikey = key.lower() result = [] for k, v in self: if k.lower() == ikey: if as_bytes: v = v.encode('latin1') if type is not None: try: v = type(v) except ValueError: continue result.append(v) return result def get_all(self, name): """Return a list of all the values for the named field. This method is compatible with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.get_all` method. """ return self.getlist(name) def items(self, lower=False): for key, value in self: if lower: key = key.lower() yield key, value def keys(self, lower=False): for key, _ in iteritems(self, lower): yield key def values(self): for _, value in iteritems(self): yield value def extend(self, iterable): """Extend the headers with a dict or an iterable yielding keys and values. """ if isinstance(iterable, dict): for key, value in iteritems(iterable): if isinstance(value, (tuple, list)): for v in value: self.add(key, v) else: self.add(key, value) else: for key, value in iterable: self.add(key, value) def __delitem__(self, key, _index_operation=True): if _index_operation and isinstance(key, (integer_types, slice)): del self._list[key] return key = key.lower() new = [] for k, v in self._list: if k.lower() != key: new.append((k, v)) self._list[:] = new def remove(self, key): """Remove a key. :param key: The key to be removed. """ return self.__delitem__(key, _index_operation=False) def pop(self, key=None, default=_missing): """Removes and returns a key or index. :param key: The key to be popped. If this is an integer the item at that position is removed, if it's a string the value for that key is. If the key is omitted or `None` the last item is removed. :return: an item. """ if key is None: return self._list.pop() if isinstance(key, integer_types): return self._list.pop(key) try: rv = self[key] self.remove(key) except KeyError: if default is not _missing: return default raise return rv def popitem(self): """Removes a key or index and returns a (key, value) item.""" return self.pop() def __contains__(self, key): """Check if a key is present.""" try: self.__getitem__(key, _get_mode=True) except KeyError: return False return True has_key = __contains__ def __iter__(self): """Yield ``(key, value)`` tuples.""" return iter(self._list) def __len__(self): return len(self._list) def add(self, _key, _value, **kw): """Add a new header tuple to the list. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes:: >>> d = Headers() >>> d.add('Content-Type', 'text/plain') >>> d.add('Content-Disposition', 'attachment', filename='foo.png') The keyword argument dumping uses :func:`dump_options_header` behind the scenes. .. versionadded:: 0.4.1 keyword arguments were added for :mod:`wsgiref` compatibility. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) self._list.append((_key, _value)) def _validate_value(self, value): if not isinstance(value, text_type): raise TypeError('Value should be unicode.') if u'\n' in value or u'\r' in value: raise ValueError('Detected newline in header value. This is ' 'a potential security problem') def add_header(self, _key, _value, **_kw): """Add a new header tuple to the list. An alias for :meth:`add` for compatibility with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.add_header` method. """ self.add(_key, _value, **_kw) def clear(self): """Clears all headers.""" del self._list[:] def set(self, _key, _value, **kw): """Remove all header tuples for `key` and add a new one. The newly added key either appears at the end of the list if there was no entry or replaces the first one. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes. See :meth:`add` for more information. .. versionchanged:: 0.6.1 :meth:`set` now accepts the same arguments as :meth:`add`. :param key: The key to be inserted. :param value: The value to be inserted. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) if not self._list: self._list.append((_key, _value)) return listiter = iter(self._list) ikey = _key.lower() for idx, (old_key, old_value) in enumerate(listiter): if old_key.lower() == ikey: # replace first ocurrence self._list[idx] = (_key, _value) break else: self._list.append((_key, _value)) return self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey] def setdefault(self, key, value): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key in self: return self[key] self.set(key, value) return value def __setitem__(self, key, value): """Like :meth:`set` but also supports index/slice based setting.""" if isinstance(key, (slice, integer_types)): if isinstance(key, integer_types): value = [value] value = [(k, _unicodify_header_value(v)) for (k, v) in value] [self._validate_value(v) for (k, v) in value] if isinstance(key, integer_types): self._list[key] = value[0] else: self._list[key] = value else: self.set(key, value) def to_list(self, charset='iso-8859-1'): """Convert the headers into a list suitable for WSGI.""" from warnings import warn warn(DeprecationWarning('Method removed, use to_wsgi_list instead'), stacklevel=2) return self.to_wsgi_list() def to_wsgi_list(self): """Convert the headers into a list suitable for WSGI. The values are byte strings in Python 2 converted to latin1 and unicode strings in Python 3 for the WSGI server to encode. :return: list """ if PY2: return [(to_native(k), v.encode('latin1')) for k, v in self] return list(self) def copy(self): return self.__class__(self._list) def __copy__(self): return self.copy() def __str__(self): """Returns formatted headers suitable for HTTP transmission.""" strs = [] for key, value in self.to_wsgi_list(): strs.append('%s: %s' % (key, value)) strs.append('\r\n') return '\r\n'.join(strs) def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, list(self) ) class ImmutableHeadersMixin(object): """Makes a :class:`Headers` immutable. We do not mark them as hashable though since the only usecase for this datastructure in Werkzeug is a view on a mutable structure. .. versionadded:: 0.5 :private: """ def __delitem__(self, key): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) set = __setitem__ def add(self, item): is_immutable(self) remove = add_header = add def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def popitem(self): is_immutable(self) def setdefault(self, key, default): is_immutable(self) class EnvironHeaders(ImmutableHeadersMixin, Headers): """Read only version of the headers from a WSGI environment. This provides the same interface as `Headers` and is constructed from a WSGI environment. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __init__(self, environ): self.environ = environ def __eq__(self, other): return self.environ is other.environ def __getitem__(self, key, _get_mode=False): # _get_mode is a no-op for this class as there is no index but # used because get() calls it. key = key.upper().replace('-', '_') if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): return _unicodify_header_value(self.environ[key]) return _unicodify_header_value(self.environ['HTTP_' + key]) def __len__(self): # the iter is necessary because otherwise list calls our # len which would call list again and so forth. return len(list(iter(self))) def __iter__(self): for key, value in iteritems(self.environ): if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield (key[5:].replace('_', '-').title(), _unicodify_header_value(value)) elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield (key.replace('_', '-').title(), _unicodify_header_value(value)) def copy(self): raise TypeError('cannot create %r copies' % self.__class__.__name__) @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` instances as sequence and it will combine the return values of all wrapped dicts: >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict >>> post = MultiDict([('foo', 'bar')]) >>> get = MultiDict([('blub', 'blah')]) >>> combined = CombinedMultiDict([get, post]) >>> combined['foo'] 'bar' >>> combined['blub'] 'blah' This works for all read operations and will raise a `TypeError` for methods that usually change data which isn't possible. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __reduce_ex__(self, protocol): return type(self), (self.dicts,) def __init__(self, dicts=None): self.dicts = dicts or [] @classmethod def fromkeys(cls): raise TypeError('cannot create %r instances by fromkeys' % cls.__name__) def __getitem__(self, key): for d in self.dicts: if key in d: return d[key] raise exceptions.BadRequestKeyError(key) def get(self, key, default=None, type=None): for d in self.dicts: if key in d: if type is not None: try: return type(d[key]) except ValueError: continue return d[key] return default def getlist(self, key, type=None): rv = [] for d in self.dicts: rv.extend(d.getlist(key, type)) return rv def _keys_impl(self): """This function exists so __len__ can be implemented more efficiently, saving one list creation from an iterator. Using this for Python 2's ``dict.keys`` behavior would be useless since `dict.keys` in Python 2 returns a list, while we have a set here. """ rv = set() for d in self.dicts: rv.update(iterkeys(d)) return rv def keys(self): return iter(self._keys_impl()) __iter__ = keys def items(self, multi=False): found = set() for d in self.dicts: for key, value in iteritems(d, multi): if multi: yield key, value elif key not in found: found.add(key) yield key, value def values(self): for key, value in iteritems(self): yield value def lists(self): rv = {} for d in self.dicts: for key, values in iterlists(d): rv.setdefault(key, []).extend(values) return iteritems(rv) def listvalues(self): return (x[1] for x in self.lists()) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self.dicts[:]) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first item for each key. :return: a :class:`dict` """ rv = {} for d in reversed(self.dicts): rv.update(d.to_dict(flat)) return rv def __len__(self): return len(self._keys_impl()) def __contains__(self, key): for d in self.dicts: if key in d: return True return False has_key = __contains__ def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.dicts) class FileMultiDict(MultiDict): """A special :class:`MultiDict` that has convenience methods to add files to it. This is used for :class:`EnvironBuilder` and generally useful for unittesting. .. versionadded:: 0.5 """ def add_file(self, name, file, filename=None, content_type=None): """Adds a new file to the dict. `file` can be a file name or a :class:`file`-like or a :class:`FileStorage` object. :param name: the name of the field. :param file: a filename or :class:`file`-like object :param filename: an optional filename :param content_type: an optional content type """ if isinstance(file, FileStorage): value = file else: if isinstance(file, string_types): if filename is None: filename = file file = open(file, 'rb') if filename and content_type is None: content_type = mimetypes.guess_type(filename)[0] or \ 'application/octet-stream' value = FileStorage(file, filename, name, content_type) self.add(name, value) class ImmutableDict(ImmutableDictMixin, dict): """An immutable :class:`dict`. .. versionadded:: 0.5 """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, dict.__repr__(self), ) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return dict(self) def __copy__(self): return self class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): """An immutable :class:`MultiDict`. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return MultiDict(self) def __copy__(self): return self class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): """An immutable :class:`OrderedMultiDict`. .. versionadded:: 0.6 """ def _iter_hashitems(self): return enumerate(iteritems(self, multi=True)) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return OrderedMultiDict(self) def __copy__(self): return self @native_itermethods(['values']) class Accept(ImmutableList): """An :class:`Accept` object is just a list subclass for lists of ``(value, quality)`` tuples. It is automatically sorted by quality. All :class:`Accept` objects work similar to a list but provide extra functionality for working with the data. Containment checks are normalized to the rules of that header: >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) >>> a.best 'ISO-8859-1' >>> 'iso-8859-1' in a True >>> 'UTF8' in a True >>> 'utf7' in a False To get the quality for an item you can use normal item lookup: >>> print a['utf-8'] 0.7 >>> a['utf7'] 0 .. versionchanged:: 0.5 :class:`Accept` objects are forced immutable now. """ def __init__(self, values=()): if values is None: list.__init__(self) self.provided = False elif isinstance(values, Accept): self.provided = values.provided list.__init__(self, values) else: self.provided = True values = [(a, b) for b, a in values] values.sort() values.reverse() list.__init__(self, [(a, b) for b, a in values]) def _value_matches(self, value, item): """Check if a value matches a given accept item.""" return item == '*' or item.lower() == value.lower() def __getitem__(self, key): """Besides index lookup (getting item n) you can also pass it a string to get the quality for the item. If the item is not in the list, the returned quality is ``0``. """ if isinstance(key, string_types): return self.quality(key) return list.__getitem__(self, key) def quality(self, key): """Returns the quality of the key. .. versionadded:: 0.6 In previous versions you had to use the item-lookup syntax (eg: ``obj[key]`` instead of ``obj.quality(key)``) """ for item, quality in self: if self._value_matches(key, item): return quality return 0 def __contains__(self, value): for item, quality in self: if self._value_matches(value, item): return True return False def __repr__(self): return '%s([%s])' % ( self.__class__.__name__, ', '.join('(%r, %s)' % (x, y) for x, y in self) ) def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, string_types): for idx, (item, quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key) def find(self, key): """Get the position of an entry or return -1. :param key: The key to be looked up. """ try: return self.index(key) except ValueError: return -1 def values(self): """Iterate over all values.""" for item in self: yield item[0] def to_header(self): """Convert the header set into an HTTP header string.""" result = [] for value, quality in self: if quality != 1: value = '%s;q=%s' % (value, quality) result.append(value) return ','.join(result) def __str__(self): return self.to_header() def best_match(self, matches, default=None): """Returns the best match from a list of possible matches based on the quality of the client. If two items have the same quality, the one is returned that comes first. :param matches: a list of matches to check for :param default: the value that is returned if none match """ best_quality = -1 result = default for server_item in matches: for client_item, quality in self: if quality <= best_quality: break if self._value_matches(server_item, client_item) \ and quality > 0: best_quality = quality result = server_item return result @property def best(self): """The best match as value.""" if self: return self[0][0] class MIMEAccept(Accept): """Like :class:`Accept` but with special methods and behavior for mimetypes. """ def _value_matches(self, value, item): def _normalize(x): x = x.lower() return x == '*' and ('*', '*') or x.split('/', 1) # this is from the application which is trusted. to avoid developer # frustration we actually check these for valid values if '/' not in value: raise ValueError('invalid mimetype %r' % value) value_type, value_subtype = _normalize(value) if value_type == '*' and value_subtype != '*': raise ValueError('invalid mimetype %r' % value) if '/' not in item: return False item_type, item_subtype = _normalize(item) if item_type == '*' and item_subtype != '*': return False return ( (item_type == item_subtype == '*' or value_type == value_subtype == '*') or (item_type == value_type and (item_subtype == '*' or value_subtype == '*' or item_subtype == value_subtype)) ) @property def accept_html(self): """True if this object accepts HTML.""" return ( 'text/html' in self or 'application/xhtml+xml' in self or self.accept_xhtml ) @property def accept_xhtml(self): """True if this object accepts XHTML.""" return ( 'application/xhtml+xml' in self or 'application/xml' in self ) @property def accept_json(self): """True if this object accepts JSON.""" return 'application/json' in self class LanguageAccept(Accept): """Like :class:`Accept` but with normalization for languages.""" def _value_matches(self, value, item): def _normalize(language): return _locale_delim_re.split(language.lower()) return item == '*' or _normalize(value) == _normalize(item) class CharsetAccept(Accept): """Like :class:`Accept` but with normalization for charsets.""" def _value_matches(self, value, item): def _normalize(name): try: return codecs.lookup(name).name except LookupError: return name.lower() return item == '*' or _normalize(value) == _normalize(item) def cache_property(key, empty, type): """Return a new property object for a cache header. Useful if you want to add support for a cache extension in a subclass.""" return property(lambda x: x._get_cache_value(key, empty, type), lambda x, v: x._set_cache_value(key, v, type), lambda x: x._del_cache_value(key), 'accessor for %r' % key) class _CacheControl(UpdateDictMixin, dict): """Subclass of a dict that stores values for a Cache-Control header. It has accessors for all the cache-control directives specified in RFC 2616. The class does not differentiate between request and response directives. Because the cache-control directives in the HTTP header use dashes the python descriptors use underscores for that. To get a header of the :class:`CacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionchanged:: 0.4 Setting `no_cache` or `private` to boolean `True` will set the implicit none-value which is ``*``: >>> cc = ResponseCacheControl() >>> cc.no_cache = True >>> cc <ResponseCacheControl 'no-cache'> >>> cc.no_cache '*' >>> cc.no_cache = None >>> cc <ResponseCacheControl ''> In versions before 0.5 the behavior documented here affected the now no longer existing `CacheControl` class. """ no_cache = cache_property('no-cache', '*', None) no_store = cache_property('no-store', None, bool) max_age = cache_property('max-age', -1, int) no_transform = cache_property('no-transform', None, None) def __init__(self, values=(), on_update=None): dict.__init__(self, values or ()) self.on_update = on_update self.provided = values is not None def _get_cache_value(self, key, empty, type): """Used internally by the accessor properties.""" if type is bool: return key in self if key in self: value = self[key] if value is None: return empty elif type is not None: try: value = type(value) except ValueError: pass return value def _set_cache_value(self, key, value, type): """Used internally by the accessor properties.""" if type is bool: if value: self[key] = None else: self.pop(key, None) else: if value is None: self.pop(key) elif value is True: self[key] = None else: self[key] = value def _del_cache_value(self, key): """Used internally by the accessor properties.""" if key in self: del self[key] def to_header(self): """Convert the stored values into a cache control header.""" return dump_header(self) def __str__(self): return self.to_header() def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, " ".join( "%s=%r" % (k, v) for k, v in sorted(self.items()) ), ) class RequestCacheControl(ImmutableDictMixin, _CacheControl): """A cache control for requests. This is immutable and gives access to all the request-relevant cache control headers. To get a header of the :class:`RequestCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ max_stale = cache_property('max-stale', '*', int) min_fresh = cache_property('min-fresh', '*', int) no_transform = cache_property('no-transform', None, None) only_if_cached = cache_property('only-if-cached', None, bool) class ResponseCacheControl(_CacheControl): """A cache control for responses. Unlike :class:`RequestCacheControl` this is mutable and gives access to response-relevant cache control headers. To get a header of the :class:`ResponseCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ public = cache_property('public', None, bool) private = cache_property('private', '*', None) must_revalidate = cache_property('must-revalidate', None, bool) proxy_revalidate = cache_property('proxy-revalidate', None, bool) s_maxage = cache_property('s-maxage', None, None) # attach cache_property to the _CacheControl as staticmethod # so that others can reuse it. _CacheControl.cache_property = staticmethod(cache_property) class CallbackDict(UpdateDictMixin, dict): """A dict that calls a function passed every time something is changed. The function is passed the dict instance. """ def __init__(self, initial=None, on_update=None): dict.__init__(self, initial or ()) self.on_update = on_update def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, dict.__repr__(self) ) class HeaderSet(object): """Similar to the :class:`ETags` class this implements a set-like structure. Unlike :class:`ETags` this is case insensitive and used for vary, allow, and content-language headers. If not constructed using the :func:`parse_set_header` function the instantiation works like this: >>> hs = HeaderSet(['foo', 'bar', 'baz']) >>> hs HeaderSet(['foo', 'bar', 'baz']) """ def __init__(self, headers=None, on_update=None): self._headers = list(headers or ()) self._set = set([x.lower() for x in self._headers]) self.on_update = on_update def add(self, header): """Add a new header to the set.""" self.update((header,)) def remove(self, header): """Remove a header from the set. This raises an :exc:`KeyError` if the header is not in the set. .. versionchanged:: 0.5 In older versions a :exc:`IndexError` was raised instead of a :exc:`KeyError` if the object was missing. :param header: the header to be removed. """ key = header.lower() if key not in self._set: raise KeyError(header) self._set.remove(key) for idx, key in enumerate(self._headers): if key.lower() == header: del self._headers[idx] break if self.on_update is not None: self.on_update(self) def update(self, iterable): """Add all the headers from the iterable to the set. :param iterable: updates the set with the items from the iterable. """ inserted_any = False for header in iterable: key = header.lower() if key not in self._set: self._headers.append(header) self._set.add(key) inserted_any = True if inserted_any and self.on_update is not None: self.on_update(self) def discard(self, header): """Like :meth:`remove` but ignores errors. :param header: the header to be discarded. """ try: return self.remove(header) except KeyError: pass def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1 def index(self, header): """Return the index of the header in the set or raise an :exc:`IndexError`. :param header: the header to be looked up. """ rv = self.find(header) if rv < 0: raise IndexError(header) return rv def clear(self): """Clear the set.""" self._set.clear() del self._headers[:] if self.on_update is not None: self.on_update(self) def as_set(self, preserve_casing=False): """Return the set as real python set type. When calling this, all the items are converted to lowercase and the ordering is lost. :param preserve_casing: if set to `True` the items in the set returned will have the original case like in the :class:`HeaderSet`, otherwise they will be lowercase. """ if preserve_casing: return set(self._headers) return set(self._set) def to_header(self): """Convert the header set into an HTTP header string.""" return ', '.join(map(quote_header_value, self._headers)) def __getitem__(self, idx): return self._headers[idx] def __delitem__(self, idx): rv = self._headers.pop(idx) self._set.remove(rv.lower()) if self.on_update is not None: self.on_update(self) def __setitem__(self, idx, value): old = self._headers[idx] self._set.remove(old.lower()) self._headers[idx] = value self._set.add(value.lower()) if self.on_update is not None: self.on_update(self) def __contains__(self, header): return header.lower() in self._set def __len__(self): return len(self._set) def __iter__(self): return iter(self._headers) def __nonzero__(self): return bool(self._set) def __str__(self): return self.to_header() def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, self._headers ) class ETags(object): """A set that can be used to check if one etag is present in a collection of etags. """ def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): self._strong = frozenset(not star_tag and strong_etags or ()) self._weak = frozenset(weak_etags or ()) self.star_tag = star_tag def as_set(self, include_weak=False): """Convert the `ETags` object into a python set. Per default all the weak etags are not part of this set.""" rv = set(self._strong) if include_weak: rv.update(self._weak) return rv def is_weak(self, etag): """Check if an etag is weak.""" return etag in self._weak def contains_weak(self, etag): """Check if an etag is part of the set including weak and strong tags.""" return self.is_weak(etag) or self.contains(etag) def contains(self, etag): """Check if an etag is part of the set ignoring weak tags. It is also possible to use the ``in`` operator. """ if self.star_tag: return True return etag in self._strong def contains_raw(self, etag): """When passed a quoted tag it will check if this tag is part of the set. If the tag is weak it is checked against weak and strong tags, otherwise strong only.""" etag, weak = unquote_etag(etag) if weak: return self.contains_weak(etag) return self.contains(etag) def to_header(self): """Convert the etags set into a HTTP header string.""" if self.star_tag: return '*' return ', '.join( ['"%s"' % x for x in self._strong] + ['w/"%s"' % x for x in self._weak] ) def __call__(self, etag=None, data=None, include_weak=False): if [etag, data].count(None) != 1: raise TypeError('either tag or data required, but at least one') if etag is None: etag = generate_etag(data) if include_weak: if etag in self._weak: return True return etag in self._strong def __bool__(self): return bool(self.star_tag or self._strong or self._weak) __nonzero__ = __bool__ def __str__(self): return self.to_header() def __iter__(self): return iter(self._strong) def __contains__(self, etag): return self.contains(etag) def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class IfRange(object): """Very simple object that represents the `If-Range` header in parsed form. It will either have neither a etag or date or one of either but never both. .. versionadded:: 0.7 """ def __init__(self, etag=None, date=None): #: The etag parsed and unquoted. Ranges always operate on strong #: etags so the weakness information is not necessary. self.etag = etag #: The date in parsed format or `None`. self.date = date def to_header(self): """Converts the object back into an HTTP header.""" if self.date is not None: return http_date(self.date) if self.etag is not None: return quote_etag(self.etag) return '' def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Range(object): """Represents a range header. All the methods are only supporting bytes as unit. It does store multiple ranges but :meth:`range_for_length` will only work if only one range is provided. .. versionadded:: 0.7 """ def __init__(self, units, ranges): #: The units of this range. Usually "bytes". self.units = units #: A list of ``(begin, end)`` tuples for the range header provided. #: The ranges are non-inclusive. self.ranges = ranges def range_for_length(self, length): """If the range is for bytes, the length is not None and there is exactly one range and it is satisfiable it returns a ``(start, stop)`` tuple, otherwise `None`. """ if self.units != 'bytes' or length is None or len(self.ranges) != 1: return None start, end = self.ranges[0] if end is None: end = length if start < 0: start += length if is_byte_range_valid(start, end, length): return start, min(end, length) def make_content_range(self, length): """Creates a :class:`~werkzeug.datastructures.ContentRange` object from the current range and given content length. """ rng = self.range_for_length(length) if rng is not None: return ContentRange(self.units, rng[0], rng[1], length) def to_header(self): """Converts the object back into an HTTP header.""" ranges = [] for begin, end in self.ranges: if end is None: ranges.append(begin >= 0 and '%s-' % begin or str(begin)) else: ranges.append('%s-%s' % (begin, end - 1)) return '%s=%s' % (self.units, ','.join(ranges)) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class ContentRange(object): """Represents the content range header. .. versionadded:: 0.7 """ def __init__(self, units, start, stop, length=None, on_update=None): assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self.on_update = on_update self.set(start, stop, length, units) def _callback_property(name): def fget(self): return getattr(self, name) def fset(self, value): setattr(self, name, value) if self.on_update is not None: self.on_update(self) return property(fget, fset) #: The units to use, usually "bytes" units = _callback_property('_units') #: The start point of the range or `None`. start = _callback_property('_start') #: The stop point of the range (non-inclusive) or `None`. Can only be #: `None` if also start is `None`. stop = _callback_property('_stop') #: The length of the range or `None`. length = _callback_property('_length') def set(self, start, stop, length=None, units='bytes'): """Simple method to update the ranges.""" assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self._units = units self._start = start self._stop = stop self._length = length if self.on_update is not None: self.on_update(self) def unset(self): """Sets the units to `None` which indicates that the header should no longer be used. """ self.set(None, None, units=None) def to_header(self): if self.units is None: return '' if self.length is None: length = '*' else: length = self.length if self.start is None: return '%s */%s' % (self.units, length) return '%s %s-%s/%s' % ( self.units, self.start, self.stop - 1, length ) def __nonzero__(self): return self.units is not None __bool__ = __nonzero__ def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Authorization(ImmutableDictMixin, dict): """Represents an `Authorization` header sent by the client. You should not create this kind of object yourself but use it when it's returned by the `parse_authorization_header` function. This object is a dict subclass and can be altered by setting dict items but it should be considered immutable as it's returned by the client and not meant for modifications. .. versionchanged:: 0.5 This object became immutable. """ def __init__(self, auth_type, data=None): dict.__init__(self, data or {}) self.type = auth_type username = property(lambda x: x.get('username'), doc=''' The username transmitted. This is set for both basic and digest auth all the time.''') password = property(lambda x: x.get('password'), doc=''' When the authentication type is basic this is the password transmitted by the client, else `None`.''') realm = property(lambda x: x.get('realm'), doc=''' This is the server realm sent back for HTTP digest auth.''') nonce = property(lambda x: x.get('nonce'), doc=''' The nonce the server sent for digest auth, sent back by the client. A nonce should be unique for every 401 response for HTTP digest auth.''') uri = property(lambda x: x.get('uri'), doc=''' The URI from Request-URI of the Request-Line; duplicated because proxies are allowed to change the Request-Line in transit. HTTP digest auth only.''') nc = property(lambda x: x.get('nc'), doc=''' The nonce count value transmitted by clients if a qop-header is also transmitted. HTTP digest auth only.''') cnonce = property(lambda x: x.get('cnonce'), doc=''' If the server sent a qop-header in the ``WWW-Authenticate`` header, the client has to provide this value for HTTP digest auth. See the RFC for more details.''') response = property(lambda x: x.get('response'), doc=''' A string of 32 hex digits computed as defined in RFC 2617, which proves that the user knows a password. Digest auth only.''') opaque = property(lambda x: x.get('opaque'), doc=''' The opaque header from the server returned unchanged by the client. It is recommended that this string be base64 or hexadecimal data. Digest auth only.''') @property def qop(self): """Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.""" def on_update(header_set): if not header_set and 'qop' in self: del self['qop'] elif header_set: self['qop'] = header_set.to_header() return parse_set_header(self.get('qop'), on_update) class WWWAuthenticate(UpdateDictMixin, dict): """Provides simple access to `WWW-Authenticate` headers.""" #: list of keys that require quoting in the generated header _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm', 'qop']) def __init__(self, auth_type=None, values=None, on_update=None): dict.__init__(self, values or ()) if auth_type: self['__auth_type__'] = auth_type self.on_update = on_update def set_basic(self, realm='authentication required'): """Clear the auth info and enable basic auth.""" dict.clear(self) dict.update(self, {'__auth_type__': 'basic', 'realm': realm}) if self.on_update: self.on_update(self) def set_digest(self, realm, nonce, qop=('auth',), opaque=None, algorithm=None, stale=False): """Clear the auth info and enable digest auth.""" d = { '__auth_type__': 'digest', 'realm': realm, 'nonce': nonce, 'qop': dump_header(qop) } if stale: d['stale'] = 'TRUE' if opaque is not None: d['opaque'] = opaque if algorithm is not None: d['algorithm'] = algorithm dict.clear(self) dict.update(self, d) if self.on_update: self.on_update(self) def to_header(self): """Convert the stored values into a WWW-Authenticate header.""" d = dict(self) auth_type = d.pop('__auth_type__', None) or 'basic' return '%s %s' % (auth_type.title(), ', '.join([ '%s=%s' % (key, quote_header_value(value, allow_token=key not in self._require_quoting)) for key, value in iteritems(d) ])) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.to_header() ) def auth_property(name, doc=None): """A static helper function for subclasses to add extra authentication system properties onto a class:: class FooAuthenticate(WWWAuthenticate): special_realm = auth_property('special_realm') For more information have a look at the sourcecode to see how the regular properties (:attr:`realm` etc.) are implemented. """ def _set_value(self, value): if value is None: self.pop(name, None) else: self[name] = str(value) return property(lambda x: x.get(name), _set_value, doc=doc) def _set_property(name, doc=None): def fget(self): def on_update(header_set): if not header_set and name in self: del self[name] elif header_set: self[name] = header_set.to_header() return parse_set_header(self.get(name), on_update) return property(fget, doc=doc) type = auth_property('__auth_type__', doc=''' The type of the auth mechanism. HTTP currently specifies `Basic` and `Digest`.''') realm = auth_property('realm', doc=''' A string to be displayed to users so they know which username and password to use. This string should contain at least the name of the host performing the authentication and might additionally indicate the collection of users who might have access.''') domain = _set_property('domain', doc=''' A list of URIs that define the protection space. If a URI is an absolute path, it is relative to the canonical root URL of the server being accessed.''') nonce = auth_property('nonce', doc=''' A server-specified data string which should be uniquely generated each time a 401 response is made. It is recommended that this string be base64 or hexadecimal data.''') opaque = auth_property('opaque', doc=''' A string of data, specified by the server, which should be returned by the client unchanged in the Authorization header of subsequent requests with URIs in the same protection space. It is recommended that this string be base64 or hexadecimal data.''') algorithm = auth_property('algorithm', doc=''' A string indicating a pair of algorithms used to produce the digest and a checksum. If this is not present it is assumed to be "MD5". If the algorithm is not understood, the challenge should be ignored (and a different one used, if there is more than one).''') qop = _set_property('qop', doc=''' A set of quality-of-privacy directives such as auth and auth-int.''') def _get_stale(self): val = self.get('stale') if val is not None: return val.lower() == 'true' def _set_stale(self, value): if value is None: self.pop('stale', None) else: self['stale'] = value and 'TRUE' or 'FALSE' stale = property(_get_stale, _set_stale, doc=''' A flag, indicating that the previous request from the client was rejected because the nonce value was stale.''') del _get_stale, _set_stale # make auth_property a staticmethod so that subclasses of # `WWWAuthenticate` can use it for new properties. auth_property = staticmethod(auth_property) del _set_property class FileStorage(object): """The :class:`FileStorage` class is a thin wrapper over incoming files. It is used by the request object to represent uploaded files. All the attributes of the wrapper stream are proxied by the file storage so it's possible to do ``storage.read()`` instead of the long form ``storage.stream.read()``. """ def __init__(self, stream=None, filename=None, name=None, content_type=None, content_length=None, headers=None): self.name = name self.stream = stream or _empty_stream # if no filename is provided we can attempt to get the filename # from the stream object passed. There we have to be careful to # skip things like <fdopen>, <stderr> etc. Python marks these # special filenames with angular brackets. if filename is None: filename = getattr(stream, 'name', None) s = make_literal_wrapper(filename) if filename and filename[0] == s('<') and filename[-1] == s('>'): filename = None # On Python 3 we want to make sure the filename is always unicode. # This might not be if the name attribute is bytes due to the # file being opened from the bytes API. if not PY2 and isinstance(filename, bytes): filename = filename.decode(get_filesystem_encoding(), 'replace') self.filename = filename if headers is None: headers = Headers() self.headers = headers if content_type is not None: headers['Content-Type'] = content_type if content_length is not None: headers['Content-Length'] = str(content_length) def _parse_content_type(self): if not hasattr(self, '_parsed_content_type'): self._parsed_content_type = \ parse_options_header(self.content_type) @property def content_type(self): """The content-type sent in the header. Usually not available""" return self.headers.get('content-type') @property def content_length(self): """The content-length sent in the header. Usually not available""" return int(self.headers.get('content-length') or 0) @property def mimetype(self): """Like :attr:`content_type`, but without parameters (eg, without charset, type etc.) and always lowercase. For example if the content type is ``text/HTML; charset=utf-8`` the mimetype would be ``'text/html'``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[0].lower() @property def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[1] def save(self, dst, buffer_size=16384): """Save the file to a destination path or file object. If the destination is a file object you have to close it yourself after the call. The buffer size is the number of bytes held in memory during the copy process. It defaults to 16KB. For secure file saving also have a look at :func:`secure_filename`. :param dst: a filename or open file object the uploaded file is saved to. :param buffer_size: the size of the buffer. This works the same as the `length` parameter of :func:`shutil.copyfileobj`. """ from shutil import copyfileobj close_dst = False if isinstance(dst, string_types): dst = open(dst, 'wb') close_dst = True try: copyfileobj(self.stream, dst, buffer_size) finally: if close_dst: dst.close() def close(self): """Close the underlying file if possible.""" try: self.stream.close() except Exception: pass def __nonzero__(self): return bool(self.filename) __bool__ = __nonzero__ def __getattr__(self, name): return getattr(self.stream, name) def __iter__(self): return iter(self.readline, '') def __repr__(self): return '<%s: %r (%r)>' % ( self.__class__.__name__, self.filename, self.content_type ) # circular dependencies from werkzeug.http import dump_options_header, dump_header, generate_etag, \ quote_header_value, parse_set_header, unquote_etag, quote_etag, \ parse_options_header, http_date, is_byte_range_valid from werkzeug import exceptions
apache-2.0
3L3N4/metagoofil
extractors/metadataMSOfficeXML.py
16
7833
import unzip import zipfile import sys import re import os import random import myparser class metaInfoMS: def __init__(self): self.template ="" self.totalTime ="" self.pages ="" self.words ="" self.characters ="" self.application ="" self.docSecurity ="" self.lines ="" self.paragraphs ="" self.scaleCrop ="" self.company ="" self.linksUpToDate ="" self.charactersWithSpaces ="" self.shareDoc ="" self.hyperlinksChanged ="" self.appVersion ="" self.title ="" self.subject ="" self.creator ="" self.keywords ="" self.lastModifiedBy ="" self.revision ="" self.createdDate ="" self.modifiedDate ="" self.userscomments ="" self.thumbnailPath ="" self.comments= "ok" self.text="" def __init__(self,filepath): self.template ="" self.totalTime ="" self.pages ="" self.words ="" self.characters ="" self.application ="" self.docSecurity ="" self.lines ="" self.paragraphs ="" self.scaleCrop ="" self.company ="" self.linksUpToDate ="" self.charactersWithSpaces ="" self.shareDoc ="" self.hyperlinksChanged ="" self.appVersion ="" self.title ="" self.subject ="" self.creator ="" self.keywords ="" self.lastModifiedBy ="" self.revision ="" self.createdDate ="" self.modifiedDate ="" self.thumbnailPath ="" rnd = str(random.randrange(0, 1001, 3)) zip = zipfile.ZipFile(filepath, 'r') file('app'+rnd+'.xml', 'w').write(zip.read('docProps/app.xml')) file('core'+rnd+'.xml', 'w').write(zip.read('docProps/core.xml')) file('docu'+rnd+'.xml', 'w').write(zip.read('word/document.xml')) try: file('comments'+rnd+'.xml', 'w').write(zip.read('word/comments.xml')) self.comments="ok" except: self.comments="error" thumbnailPath = "" #try: #file('thumbnail'+rnd+'.jpeg', 'w').write(zip.read('docProps/thumbnail.jpeg')) #thumbnailPath = 'thumbnail'+rnd+'.jpeg' #except: # pass zip.close() # primero algunas estadisticas del soft usado para la edicion y del documento f = open ('app'+rnd+'.xml','r') app = f.read() self.cargaApp(app) f.close() if self.comments=="ok": f = open ('comments'+rnd+'.xml','r') comm = f.read() self.cargaComm(comm) f.close() # document content f = open ('docu'+rnd+'.xml','r') docu = f.read() self.text = docu f.close() # datos respecto a autor, etc f = open ('core'+rnd+'.xml','r') core = f.read() self.cargaCore(core) self.thumbnailPath = thumbnailPath f.close() # borramos todo menos el thumbnail os.remove('app'+rnd+'.xml') os.remove('core'+rnd+'.xml') os.remove('comments'+rnd+'.xml') os.remove('docu'+rnd+'.xml') #self.toString() def toString(self): print "--- Metadata app ---" print " template: " + str(self.template) print " totalTime: " + str(self.totalTime) print " pages: "+ str(self.pages) print " words: "+ str(self.words) print " characters: "+ str(self.characters) print " application: "+ str(self.application) print " docSecurity: "+ str(self.docSecurity) print " lines: "+ str(self.lines) print " paragraphs: "+ str(self.paragraphs) print " scaleCrop: " + str(self.scaleCrop) print " company: "+ str(self.company) print " linksUpToDate: " + str(self.linksUpToDate) print " charactersWithSpaces: "+ str(self.charactersWithSpaces) print " shareDoc:" + str(self.shareDoc) print " hyperlinksChanged:" + str(self.hyperlinksChanged) print " appVersion:" + str(self.appVersion) print "\n --- Metadata core ---" print " title:" + str(self.title) print " subject:" + str(self.subject) print " creator:" + str(self.creator) print " keywords:" + str(self.keywords) print " lastModifiedBy:" + str(self.lastModifiedBy) print " revision:" + str(self.revision) print " createdDate:" + str(self.createdDate) print " modifiedDate:" + str(self.modifiedDate) print "\n thumbnailPath:" + str(self.thumbnailPath) def cargaComm(self,datos): try: p = re.compile('w:author="(.*?)" w') self.userscomments = p.findall(datos) except: pass def cargaApp(self,datos): try: p = re.compile('<Template>(.*)</Template>') self.template = str (p.findall(datos)[0]) except: pass try: p = re.compile('<TotalTime>(.*)</TotalTime>') self.totalTime = str (p.findall(datos)[0]) except: pass try: p = re.compile('<Pages>(.*)</Pages>') self.pages = str (p.findall(datos)[0]) except: pass try: p = re.compile('<Words>(.*)</Words>') self.words = str (p.findall(datos)[0]) except: pass try: p = re.compile('<Characters>(.*)</Characters>') self.characters = str (p.findall(datos)[0]) except: pass try: p = re.compile('<Application>(.*)</Application>') self.application = str (p.findall(datos)[0]) except: pass try: p = re.compile('<DocSecurity>(.*)</DocSecurity>') self.docSecurity = str (p.findall(datos)[0]) except: pass try: p = re.compile('<Lines>(.*)</Lines>') self.lines = str (p.findall(datos)[0]) except: pass try: p = re.compile('<Paragraphs>(.*)</Paragraphs>') self.paragraphs = str (p.findall(datos)[0]) except: pass try: p = re.compile('<ScaleCrop>(.*)</ScaleCrop>') self.scaleCrop = str (p.findall(datos)[0]) except: pass try: p = re.compile('<Company>(.*)</Company>') self.company = str (p.findall(datos)[0]) except: pass try: p = re.compile('<LinksUpToDate>(.*)</LinksUpToDate>') self.linksUpToDate = str (p.findall(datos)[0]) except: pass try: p = re.compile('<CharactersWithSpaces>(.*)</CharactersWithSpaces>') self.charactersWithSpaces = str (p.findall(datos)[0]) except: pass try: p = re.compile('<SharedDoc>(.*)</SharedDoc>') self.sharedDoc = str (p.findall(datos)[0]) except: pass try: p = re.compile('<HyperlinksChanged>(.*)</HyperlinksChanged>') self.hyperlinksChanged = str (p.findall(datos)[0]) except: pass try: p = re.compile('<AppVersion>(.*)</AppVersion>') self.appVersion = str (p.findall(datos)[0]) except: pass def cargaCore(self,datos): try: p = re.compile('<dc:title>(.*)</dc:title>') self.title = str (p.findall(datos)[0]) except: pass try: p = re.compile('<dc:subject>(.*)</dc:subject>') self.subject = str (p.findall(datos)[0]) except: pass try: p = re.compile('<dc:creator>(.*)</dc:creator>') self.creator = str (p.findall(datos)[0]) except: pass try: p = re.compile('<cp:keywords>(.*)</cp:keywords>') self.keywords = str (p.findall(datos)[0]) except: pass try: p = re.compile('<cp:lastModifiedBy>(.*)</cp:lastModifiedBy>') self.lastModifiedBy = str (p.findall(datos)[0]) except: pass try: p = re.compile('<cp:revision>(.*)</cp:revision>') self.revision = str (p.findall(datos)[0]) except: pass try: p = re.compile('<dcterms:created xsi:type=".*">(.*)</dcterms:created>') self.createdDate = str (p.findall(datos)[0]) except: pass try: p = re.compile('<dcterms:modified xsi:type=".*">(.*)</dcterms:modified>') self.modifiedDate = str (p.findall(datos)[0]) except: pass def getData(self): return "ok" def getTexts(self): return "ok" def getRaw(self): raw = "Not implemented yet" return raw def getUsers(self): res=[] temporal=[] res.append(self.creator) res.append(self.lastModifiedBy) if self.comments == "ok": res.extend(self.userscomments) else: pass for x in res: if temporal.count(x) ==0: temporal.append(x) else: pass return temporal def getEmails(self): res=myparser.parser(self.text) return res.emails() def getPaths(self): res=[] #res.append(self.revision) return res def getSoftware(self): res=[] res.append(self.application) return res
gpl-2.0
Kazade/NeHe-Website
google_appengine/lib/django-1.2/django/core/management/commands/shell.py
45
2962
import os from django.core.management.base import NoArgsCommand from optparse import make_option class Command(NoArgsCommand): option_list = NoArgsCommand.option_list + ( make_option('--plain', action='store_true', dest='plain', help='Tells Django to use plain Python, not IPython.'), ) help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available." requires_model_validation = False def handle_noargs(self, **options): # XXX: (Temporary) workaround for ticket #1796: force early loading of all # models from installed apps. from django.db.models.loading import get_models loaded_models = get_models() use_plain = options.get('plain', False) try: if use_plain: # Don't bother loading IPython, because the user wants plain Python. raise ImportError try: from IPython.frontend.terminal.embed import TerminalInteractiveShell shell = TerminalInteractiveShell() shell.mainloop() except ImportError: # IPython < 0.11 # Explicitly pass an empty list as arguments, because otherwise # IPython would use sys.argv from this script. try: from IPython.Shell import IPShell shell = IPShell(argv=[]) shell.mainloop() except ImportError: # IPython not found at all, raise ImportError raise except ImportError: import code # Set up a dictionary to serve as the environment for the shell, so # that tab completion works on objects that are imported at runtime. # See ticket 5082. imported_objects = {} try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', because # we already know 'readline' was imported successfully. import rlcompleter readline.set_completer(rlcompleter.Completer(imported_objects).complete) readline.parse_and_bind("tab:complete") # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system # conventions and get $PYTHONSTARTUP first then import user. if not use_plain: pythonrc = os.environ.get("PYTHONSTARTUP") if pythonrc and os.path.isfile(pythonrc): try: execfile(pythonrc) except NameError: pass # This will import .pythonrc.py as a side-effect import user code.interact(local=imported_objects)
bsd-3-clause
dashmug/saleor
saleor/registration/backends.py
20
1428
from django.contrib.auth import get_user_model from django.contrib.auth.backends import ModelBackend from .models import ExternalUserData User = get_user_model() class Backend(ModelBackend): def get_user(self, user_id): try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None class EmailPasswordBackend(Backend): """Authentication backend that expects an email in username parameter.""" def authenticate(self, username=None, password=None, **_kwargs): try: user = User.objects.get(email=username) except User.DoesNotExist: return None if user.check_password(password): return user class ExternalLoginBackend(Backend): """Authenticate with external service id.""" def authenticate(self, service=None, username=None, **_kwargs): try: user_data = (ExternalUserData.objects .select_related('user') .get(service=service, username=username)) return user_data.user except ExternalUserData.DoesNotExist: return None class TrivialBackend(Backend): """Authenticate with user instance.""" def authenticate(self, user=None, **_kwargs): if isinstance(user, User): return user
bsd-3-clause
cwilliamson226/SecondString
dbscripts/dbpop.py
1
1716
# dbpop.py # cwilliamson226 # Python script that uses nflgame to pull stats of all players then pushes them to the MongoDB import nflgame from pymongo import MongoClient # Imports the stats from the 2016 season games = nflgame.games(2016) plays = nflgame.combine_plays(games) # Creates a connection through pyMongo to the specified MongoDB, in this case mLab connection = MongoClient("mongodb://pledgemaster:skilodge@ds021356.mlab.com:21356/nfldb") db = connection.nfldb db.authenticate('pledgemaster', 'skilodge') # Goes through all players in the DB for p in plays.players(): # Checks to make sure that the player exists before accessing it's variables if not p or not p.player or not p.player.first_name or not p.player.last_name or not p.player.position or not p.team: continue # Creates empty stats list to read in the dict stats1 = [] stats2 = [] # Goes through stats dictionary for each player, adding them to their respective lists for x in p.stats: # We don't want defense, kicking, and punting stats for now due to indexing limitations in Mongo if 'defense' not in x and 'kicking' not in x and 'punting' not in x: stats1.append(x) stats2.append(p.stats[x]) # Creates a temp player dictionary using the attributes of the player we're on tempPlayer = { 'firstname': str(p.player.first_name), 'lastname': str(p.player.last_name), 'pos': str(p.player.position), 'team': str(p.team), 'tds': str(p.tds) } # Goes through the stats lists and appends them to the dictionary for the temp player for x in range(0, len(stats1)): tempPlayer[stats1[x]] = stats2[x] # Inserts the temp player to the DB db.nflgame_players.insert_one(tempPlayer)
mit
emote-project/Scenario1
NAOBridges/NAOThalamusGUI/bin/Release/python/soundLocationModule.py
2
2745
from datetime import datetime import naoqi from naoqi import ALBroker from naoqi import ALModule from naoqi import ALProxy instance = None SoundLocator = None def log(text): instance.nao.log("SoundLocationModule: " + text) return; class SoundLocationModule(object): nao = None proxy = None lastDetectionTime = datetime.now() def __init__(self, naoServer): global instance instance = self self.nao = naoServer return def start(self): log("Connecting AudioSourceLocalizationProxy...") try: global myBroker myBroker = ALBroker("soundLocalizationBroker", "0.0.0.0", 0, self.nao.brokerIp(), self.nao.brokerPort()) self.proxy = ALProxy("ALAudioSourceLocalization") log("Version " + self.proxy.version()) self.nao.memory.proxy.subscribeToEvent("ALAudioSourceLocalization/SoundLocated","SoundLocator","onSoundLocated") except Exception, Argument: log("Failed: " + str(Argument)) self.proxy = None return False return True def dispose(self): if (self.proxy==None): return False log("Disconnect AudioSourceLocalizationProxy...") try: if (self.nao.memory.proxy!=None): self.nao.memory.proxy.unsubscribeToEvent("ALAudioSourceLocalization/SoundLocated","SoundLocator") except Exception, Argument: log("Failed: " + str(Argument)) return #try: # global myBroker # myBroker.shutdown() #except Exception, Argument: # log("Failed: " + str(Argument)) return class SoundLocatorModule(ALModule): def onSoundLocated(self, *_args): """ does this and that """ global instance #print ("sound located") try: if (instance != None and instance.nao != None and not instance.nao.speech.isSpeaking): currentTime = datetime.now() deltaTime = currentTime - instance.lastDetectionTime if (deltaTime.seconds > 1): instance.lastDetectionTime = currentTime s = "" for field in _args[1][1]: s = s + " " + str(field) if (abs(_args[1][1][0])>0): if (instance != None and instance.nao != None): instance.nao.NotifySoundLocated(_args[1][1][0], _args[1][1][1], _args[1][1][2]) else: #print("SoundLocator: skipped " + str(deltaTime.seconds)) pass except Exception, Argument: log("SoundLocator Failed: " + str(Argument))
lgpl-3.0
sun1991/lvsys
lvsys/env_lvsys/Lib/encodings/iso8859_16.py
593
13813
""" Python Character Mapping Codec iso8859_16 generated from 'MAPPINGS/ISO8859/8859-16.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-16', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK u'\u0105' # 0xA2 -> LATIN SMALL LETTER A WITH OGONEK u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE u'\u20ac' # 0xA4 -> EURO SIGN u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON u'\xa7' # 0xA7 -> SECTION SIGN u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u0218' # 0xAA -> LATIN CAPITAL LETTER S WITH COMMA BELOW u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE u'\xad' # 0xAD -> SOFT HYPHEN u'\u017a' # 0xAE -> LATIN SMALL LETTER Z WITH ACUTE u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\u010c' # 0xB2 -> LATIN CAPITAL LETTER C WITH CARON u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON u'\u201d' # 0xB5 -> RIGHT DOUBLE QUOTATION MARK u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON u'\u010d' # 0xB9 -> LATIN SMALL LETTER C WITH CARON u'\u0219' # 0xBA -> LATIN SMALL LETTER S WITH COMMA BELOW u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\u0106' # 0xC5 -> LATIN CAPITAL LETTER C WITH ACUTE u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\u015a' # 0xD7 -> LATIN CAPITAL LETTER S WITH ACUTE u'\u0170' # 0xD8 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u0118' # 0xDD -> LATIN CAPITAL LETTER E WITH OGONEK u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\u0107' # 0xE5 -> LATIN SMALL LETTER C WITH ACUTE u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\u015b' # 0xF7 -> LATIN SMALL LETTER S WITH ACUTE u'\u0171' # 0xF8 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u0119' # 0xFD -> LATIN SMALL LETTER E WITH OGONEK u'\u021b' # 0xFE -> LATIN SMALL LETTER T WITH COMMA BELOW u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
mit
AyoubZahid/odoo
openerp/addons/base/ir/ir_rule.py
34
7149
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import time from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import fields, osv, expression from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.misc import unquote as unquote class ir_rule(osv.osv): _name = 'ir.rule' _order = 'name' _MODES = ['read', 'write', 'create', 'unlink'] def _eval_context_for_combinations(self): """Returns a dictionary to use as evaluation context for ir.rule domains, when the goal is to obtain python lists that are easier to parse and combine, but not to actually execute them.""" return {'user': unquote('user'), 'time': unquote('time')} def _eval_context(self, cr, uid): """Returns a dictionary to use as evaluation context for ir.rule domains.""" return {'user': self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid), 'time':time} def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} eval_context = self._eval_context(cr, uid) for rule in self.browse(cr, uid, ids, context): if rule.domain_force: res[rule.id] = expression.normalize_domain(eval(rule.domain_force, eval_context)) else: res[rule.id] = [] return res def _get_value(self, cr, uid, ids, field_name, arg, context=None): res = {} for rule in self.browse(cr, uid, ids, context): if not rule.groups: res[rule.id] = True else: res[rule.id] = False return res def _check_model_obj(self, cr, uid, ids, context=None): return not any(self.pool[rule.model_id.model].is_transient() for rule in self.browse(cr, uid, ids, context)) def _check_model_name(self, cr, uid, ids, context=None): # Don't allow rules on rules records (this model). return not any(rule.model_id.model == self._name for rule in self.browse(cr, uid, ids, context)) _columns = { 'name': fields.char('Name', select=1), 'active': fields.boolean('Active', help="If you uncheck the active field, it will disable the record rule without deleting it (if you delete a native record rule, it may be re-created when you reload the module."), 'model_id': fields.many2one('ir.model', 'Object',select=1, required=True, ondelete="cascade"), 'global': fields.function(_get_value, string='Global', type='boolean', store=True, help="If no group is specified the rule is global and applied to everyone"), 'groups': fields.many2many('res.groups', 'rule_group_rel', 'rule_group_id', 'group_id', 'Groups'), 'domain_force': fields.text('Domain'), 'domain': fields.function(_domain_force_get, string='Domain', type='binary'), 'perm_read': fields.boolean('Apply for Read'), 'perm_write': fields.boolean('Apply for Write'), 'perm_create': fields.boolean('Apply for Create'), 'perm_unlink': fields.boolean('Apply for Delete') } _order = 'model_id DESC' _defaults = { 'active': True, 'perm_read': True, 'perm_write': True, 'perm_create': True, 'perm_unlink': True, 'global': True, } _sql_constraints = [ ('no_access_rights', 'CHECK (perm_read!=False or perm_write!=False or perm_create!=False or perm_unlink!=False)', 'Rule must have at least one checked access right !'), ] _constraints = [ (_check_model_obj, 'Rules can not be applied on Transient models.', ['model_id']), (_check_model_name, 'Rules can not be applied on the Record Rules model.', ['model_id']), ] @tools.ormcache('uid', 'model_name', 'mode') def _compute_domain(self, cr, uid, model_name, mode="read"): if mode not in self._MODES: raise ValueError('Invalid mode: %r' % (mode,)) if uid == SUPERUSER_ID: return None cr.execute("""SELECT r.id FROM ir_rule r JOIN ir_model m ON (r.model_id = m.id) WHERE m.model = %s AND r.active is True AND r.perm_""" + mode + """ AND (r.id IN (SELECT rule_group_id FROM rule_group_rel g_rel JOIN res_groups_users_rel u_rel ON (g_rel.group_id = u_rel.gid) WHERE u_rel.uid = %s) OR r.global)""", (model_name, uid)) rule_ids = [x[0] for x in cr.fetchall()] if rule_ids: # browse user as super-admin root to avoid access errors! user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid) global_domains = [] # list of domains group_domains = {} # map: group -> list of domains for rule in self.browse(cr, SUPERUSER_ID, rule_ids): # read 'domain' as UID to have the correct eval context for the rule. rule_domain = self.read(cr, uid, [rule.id], ['domain'])[0]['domain'] dom = expression.normalize_domain(rule_domain) for group in rule.groups: if group in user.groups_id: group_domains.setdefault(group, []).append(dom) if not rule.groups: global_domains.append(dom) # combine global domains and group domains if group_domains: group_domain = expression.OR(map(expression.OR, group_domains.values())) else: group_domain = [] domain = expression.AND(global_domains + [group_domain]) return domain return [] def clear_cache(self, cr, uid): """ Deprecated, use `clear_caches` instead. """ self.clear_caches() def domain_get(self, cr, uid, model_name, mode='read', context=None): dom = self._compute_domain(cr, uid, model_name, mode) if dom: # _where_calc is called as superuser. This means that rules can # involve objects on which the real uid has no acces rights. # This means also there is no implicit restriction (e.g. an object # references another object the user can't see). query = self.pool[model_name]._where_calc(cr, SUPERUSER_ID, dom, active_test=False) return query.where_clause, query.where_clause_params, query.tables return [], [], ['"' + self.pool[model_name]._table + '"'] def unlink(self, cr, uid, ids, context=None): res = super(ir_rule, self).unlink(cr, uid, ids, context=context) self.clear_caches() return res def create(self, cr, uid, vals, context=None): res = super(ir_rule, self).create(cr, uid, vals, context=context) self.clear_caches() return res def write(self, cr, uid, ids, vals, context=None): res = super(ir_rule, self).write(cr, uid, ids, vals, context=context) self.clear_caches() return res
gpl-3.0
pcampese/codewars
where_is_my_parent_cry.py
1
2360
# https://www.codewars.com/kata/where-is-my-parent-cry/python def find_children(input_string): print('original = [{}]'.format(input_string)) ########### # Method 1: 1st Attempt ########### string_sorted = ''.join(sorted(input_string)) # Separate the string by case upper = [c for c in string_sorted if c.isupper()] lower = [c for c in string_sorted if c.islower()] # For each of the uppercase letters, get the number of lowercase letters, and append them all together, in that order upper_then_lower = '' # Stores the final result for u in upper: # For each uppercase letter count = lower.count(u.lower()) # Count the number of times it appears in the lowercase list upper_then_lower += u + (u.lower() * count) # Append that to the final result ########### # Method 3: Single line (abysmal to read) (made from combining Method 1...) ########### upper_then_lower2 = ''.join([u + (u.lower() * [c for c in ''.join(sorted(input_string)) if c.islower()].count(u.lower())) for u in [c for c in ''.join(sorted(input_string)) if c.isupper()]]) ########### # Method 3: Refactor Method 1 ########### upper_then_lower3 = '' # Store the final result for char in sorted(input_string): # For each character in the sorted input string if char.isupper(): # If the character is uppercase lower_char = char.lower() # Save a lowercase version of it lower_char_count = input_string.count(lower_char) # Count the number of lowercase characters in the original string upper_then_lower3 += char + (lower_char * lower_char_count) # In the final result, append the upper case character with the repeated count of lowercase characters ########### # Method 4: Single line (horible to read still) (made from combining Method 3...) ########### upper_then_lower4 = ''.join([char + (char.lower() * input_string.count(char.lower())) for char in sorted(input_string) if char.isupper()]) return upper_then_lower3
gpl-3.0
wenderen/servo
tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py
499
1859
# Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Wrong web_socket_do_extra_handshake signature. """ def no_web_socket_do_extra_handshake(request): pass def web_socket_transfer_data(request): request.connection.write( 'sub/wrong_handshake_sig_wsh.py is called for %s, %s' % (request.ws_resource, request.ws_protocol)) # vi:sts=4 sw=4 et
mpl-2.0
JFriel/honours_project
networkx/build/lib/networkx/algorithms/tree/tests/test_branchings.py
31
9935
from nose import SkipTest from nose.tools import * import networkx as nx try: import numpy as np except: raise SkipTest('NumPy not available.') from networkx.algorithms.tree import branchings from networkx.algorithms.tree import recognition from networkx.testing import * # # Explicitly discussed examples from Edmonds paper. # # Used in Figures A-F. # G_array = np.array([ # 0 1 2 3 4 5 6 7 8 [ 0, 0, 12, 0, 12, 0, 0, 0, 0], # 0 [ 4, 0, 0, 0, 0, 13, 0, 0, 0], # 1 [ 0, 17, 0, 21, 0, 12, 0, 0, 0], # 2 [ 5, 0, 0, 0, 17, 0, 18, 0, 0], # 3 [ 0, 0, 0, 0, 0, 0, 0, 12, 0], # 4 [ 0, 0, 0, 0, 0, 0, 14, 0, 12], # 5 [ 0, 0, 21, 0, 0, 0, 0, 0, 15], # 6 [ 0, 0, 0, 19, 0, 0, 15, 0, 0], # 7 [ 0, 0, 0, 0, 0, 0, 0, 18, 0], # 8 ], dtype=int) # We convert to MultiDiGraph after using from_numpy_matrix # https://github.com/networkx/networkx/pull/1305 def G1(): G = nx.DiGraph() G = nx.from_numpy_matrix(G_array, create_using=G) G = nx.MultiDiGraph(G) return G def G2(): # Now we shift all the weights by -10. # Should not affect optimal arborescence, but does affect optimal branching. G = nx.DiGraph() Garr = G_array.copy() Garr[np.nonzero(Garr)] -= 10 G = nx.from_numpy_matrix(Garr, create_using=G) G = nx.MultiDiGraph(G) return G # An optimal branching for G1 that is also a spanning arborescence. So it is # also an optimal spanning arborescence. # optimal_arborescence_1 = [ (0, 2, 12), (2, 1, 17), (2, 3, 21), (1, 5, 13), (3, 4, 17), (3, 6, 18), (6, 8, 15), (8, 7, 18), ] # For G2, the optimal branching of G1 (with shifted weights) is no longer # an optimal branching, but it is still an optimal spanning arborescence # (just with shifted weights). An optimal branching for G2 is similar to what # appears in figure G (this is greedy_subopt_branching_1a below), but with the # edge (3, 0, 5), which is now (3, 0, -5), removed. Thus, the optimal branching # is not a spanning arborescence. The code finds optimal_branching_2a. # An alternative and equivalent branching is optimal_branching_2b. We would # need to modify the code to iterate through all equivalent optimal branchings. # # These are maximal branchings or arborescences. optimal_branching_2a = [ (5, 6, 4), (6, 2, 11), (6, 8, 5), (8, 7, 8), (2, 1, 7), (2, 3, 11), (3, 4, 7), ] optimal_branching_2b = [ (8, 7, 8), (7, 3, 9), (3, 4, 7), (3, 6, 8), (6, 2, 11), (2, 1, 7), (1, 5, 3), ] optimal_arborescence_2 = [ (0, 2, 2), (2, 1, 7), (2, 3, 11), (1, 5, 3), (3, 4, 7), (3, 6, 8), (6, 8, 5), (8, 7, 8), ] # Two suboptimal maximal branchings on G1 obtained from a greedy algorithm. # 1a matches what is shown in Figure G in Edmonds's paper. greedy_subopt_branching_1a = [ (5, 6, 14), (6, 2, 21), (6, 8, 15), (8, 7, 18), (2, 1, 17), (2, 3, 21), (3, 0, 5), (3, 4, 17), ] greedy_subopt_branching_1b = [ (8, 7, 18), (7, 6, 15), (6, 2, 21), (2, 1, 17), (2, 3, 21), (1, 5, 13), (3, 0, 5), (3, 4, 17), ] def build_branching(edges): G = nx.DiGraph() for u, v, weight in edges: G.add_edge(u, v, weight=weight) return G def sorted_edges(G, attr='weight', default=1): edges = [(u,v,data.get(attr, default)) for (u,v,data) in G.edges(data=True)] edges = sorted(edges, key=lambda x: x[2]) return edges def assert_equal_branchings(G1, G2, attr='weight', default=1): edges1 = G1.edges(data=True) edges2 = G2.edges(data=True) # Grab the weights only. e1 = sorted_edges(G1, attr, default) e2 = sorted_edges(G2, attr, default) # If we have an exception, let's see the edges. print(e1) print(e2) print for a, b in zip(e1, e2): assert_equal(a[:2], b[:2]) np.testing.assert_almost_equal(a[2], b[2]) assert_equal(len(edges1), len(edges2)) ################ def test_optimal_branching1(): G = build_branching(optimal_arborescence_1) assert_true(recognition.is_arborescence(G), True) assert_equal(branchings.branching_weight(G), 131) def test_optimal_branching2a(): G = build_branching(optimal_branching_2a) assert_true(recognition.is_arborescence(G), True) assert_equal(branchings.branching_weight(G), 53) def test_optimal_branching2b(): G = build_branching(optimal_branching_2b) assert_true(recognition.is_arborescence(G), True) assert_equal(branchings.branching_weight(G), 53) def test_optimal_arborescence2(): G = build_branching(optimal_arborescence_2) assert_true(recognition.is_arborescence(G), True) assert_equal(branchings.branching_weight(G), 51) def test_greedy_suboptimal_branching1a(): G = build_branching(greedy_subopt_branching_1a) assert_true(recognition.is_arborescence(G), True) assert_equal(branchings.branching_weight(G), 128) def test_greedy_suboptimal_branching1b(): G = build_branching(greedy_subopt_branching_1b) assert_true(recognition.is_arborescence(G), True) assert_equal(branchings.branching_weight(G), 127) def test_greedy_max1(): # Standard test. # G = G1() B = branchings.greedy_branching(G) # There are only two possible greedy branchings. The sorting is such # that it should equal the second suboptimal branching: 1b. B_ = build_branching(greedy_subopt_branching_1b) assert_equal_branchings(B, B_) def test_greedy_max2(): # Different default weight. # G = G1() del G[1][0][0]['weight'] B = branchings.greedy_branching(G, default=6) # Chosen so that edge (3,0,5) is not selected and (1,0,6) is instead. edges = [ (1, 0, 6), (1, 5, 13), (7, 6, 15), (2, 1, 17), (3, 4, 17), (8, 7, 18), (2, 3, 21), (6, 2, 21), ] B_ = build_branching(edges) assert_equal_branchings(B, B_) def test_greedy_max3(): # All equal weights. # G = G1() B = branchings.greedy_branching(G, attr=None) # This is mostly arbitrary...the output was generated by running the algo. edges = [ (2, 1, 1), (3, 0, 1), (3, 4, 1), (5, 8, 1), (6, 2, 1), (7, 3, 1), (7, 6, 1), (8, 7, 1), ] B_ = build_branching(edges) assert_equal_branchings(B, B_, default=1) def test_greedy_min(): G = G1() B = branchings.greedy_branching(G, kind='min') edges = [ (1, 0, 4), (0, 2, 12), (0, 4, 12), (2, 5, 12), (4, 7, 12), (5, 8, 12), (5, 6, 14), (7, 3, 19) ] B_ = build_branching(edges) assert_equal_branchings(B, B_) def test_edmonds1_maxbranch(): G = G1() x = branchings.maximum_branching(G) x_ = build_branching(optimal_arborescence_1) assert_equal_branchings(x, x_) def test_edmonds1_maxarbor(): G = G1() x = branchings.maximum_spanning_arborescence(G) x_ = build_branching(optimal_arborescence_1) assert_equal_branchings(x, x_) def test_edmonds2_maxbranch(): G = G2() x = branchings.maximum_branching(G) x_ = build_branching(optimal_branching_2a) assert_equal_branchings(x, x_) def test_edmonds2_maxarbor(): G = G2() x = branchings.maximum_spanning_arborescence(G) x_ = build_branching(optimal_arborescence_2) assert_equal_branchings(x, x_) def test_edmonds2_minarbor(): G = G1() x = branchings.minimum_spanning_arborescence(G) # This was obtained from algorithm. Need to verify it independently. # Branch weight is: 96 edges = [ (3, 0, 5), (0, 2, 12), (0, 4, 12), (2, 5, 12), (4, 7, 12), (5, 8, 12), (5, 6, 14), (2, 1, 17) ] x_ = build_branching(edges) assert_equal_branchings(x, x_) def test_edmonds3_minbranch1(): G = G1() x = branchings.minimum_branching(G) edges = [] x_ = build_branching(edges) assert_equal_branchings(x, x_) def test_edmonds3_minbranch2(): G = G1() G.add_edge(8, 9, weight=-10) x = branchings.minimum_branching(G) edges = [(8, 9, -10)] x_ = build_branching(edges) assert_equal_branchings(x, x_) # Need more tests def test_mst(): # Make sure we get the same results for undirected graphs. # Example from: http://en.wikipedia.org/wiki/Kruskal's_algorithm G = nx.Graph() edgelist = [(0, 3, [('weight', 5)]), (0, 1, [('weight', 7)]), (1, 3, [('weight', 9)]), (1, 2, [('weight', 8)]), (1, 4, [('weight', 7)]), (3, 4, [('weight', 15)]), (3, 5, [('weight', 6)]), (2, 4, [('weight', 5)]), (4, 5, [('weight', 8)]), (4, 6, [('weight', 9)]), (5, 6, [('weight', 11)])] G.add_edges_from(edgelist) G = G.to_directed() x = branchings.minimum_spanning_arborescence(G) edges = [(set([0, 1]), 7), (set([0, 3]), 5), (set([3, 5]), 6), (set([1, 4]), 7), (set([4, 2]), 5), (set([4, 6]), 9)] assert_equal(x.number_of_edges(), len(edges)) for u, v, d in x.edges(data=True): assert_true( (set([u,v]), d['weight']) in edges ) def test_mixed_nodetypes(): # Smoke test to make sure no TypeError is raised for mixed node types. G = nx.Graph() edgelist = [(0, 3, [('weight', 5)]), (0, '1', [('weight', 5)])] G.add_edges_from(edgelist) G = G.to_directed() x = branchings.minimum_spanning_arborescence(G) def test_edmonds1_minbranch(): # Using -G_array and min should give the same as optimal_arborescence_1, # but with all edges negative. edges = [ (u, v, -w) for (u, v, w) in optimal_arborescence_1 ] G = nx.DiGraph() G = nx.from_numpy_matrix(-G_array, create_using=G) # Quickly make sure max branching is empty. x = branchings.maximum_branching(G) x_ = build_branching([]) assert_equal_branchings(x, x_) # Now test the min branching. x = branchings.minimum_branching(G) x_ = build_branching(edges) assert_equal_branchings(x, x_)
gpl-3.0
techdragon/django
django/core/management/commands/dbshell.py
174
1240
from django.core.management.base import BaseCommand, CommandError from django.db import DEFAULT_DB_ALIAS, connections class Command(BaseCommand): help = ( "Runs the command-line client for specified database, or the " "default database if none is provided." ) requires_system_checks = False def add_arguments(self, parser): parser.add_argument( '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Nominates a database onto which to open a shell. Defaults to the "default" database.', ) def handle(self, **options): connection = connections[options['database']] try: connection.client.runshell() except OSError: # Note that we're assuming OSError means that the client program # isn't installed. There's a possibility OSError would be raised # for some other reason, in which case this error message would be # inaccurate. Still, this message catches the common case. raise CommandError( 'You appear not to have the %r program installed or on your path.' % connection.client.executable_name )
bsd-3-clause
gurneyalex/OpenUpgrade
addons/base_gengo/__openerp__.py
68
2119
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Automated Translations through Gengo API', 'version': '0.1', 'category': 'Tools', 'description': """ Automated Translations through Gengo API ======================================== This module will install passive scheduler job for automated translations using the Gengo API. To activate it, you must 1) Configure your Gengo authentication parameters under `Settings > Companies > Gengo Parameters` 2) Launch the wizard under `Settings > Application Terms > Gengo: Manual Request of Translation` and follow the wizard. This wizard will activate the CRON job and the Scheduler and will start the automatic translation via Gengo Services for all the terms where you requested it. """, 'author': 'OpenERP SA', 'website': 'http://www.openerp.com', 'depends': ['base'], 'data': [ 'gengo_sync_schedular_data.xml', 'ir_translation.xml', 'res_company_view.xml', 'wizard/base_gengo_translations_view.xml', ], 'demo': [], 'test': [], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
rendermotion/RMMel
snippets/ReverseSideCustomRig.py
2
2215
import maya.cmds as cmds import RMUncategorized def MirrorChildren(Objects): for eachObject in Objects: children = cmds.listRelatives(eachObject, children = True,type='transform') if (children): MirrorChildren(children) for eachObject in Objects: ObjectTransformDic = RMUncategorized.ObjectTransformDic( [eachObject] ) SplitArray = eachObject.split("_") Side = SplitArray[1] if Side == "R": SplitArray[1]="L" OpositObject = "_".join(SplitArray) if cmds.objExists(OpositObject): RMUncategorized.SetObjectTransformDic({OpositObject : ObjectTransformDic[eachObject]}, MirrorTranslateX = -1 , MirrorTranslateY = 1 , MirrorTranslateZ = 1 , MirrorRotateX = 1 , MirrorRotateY = -1 , MirrorRotateZ = -1) if cmds.objectType(eachObject) == 'joint': X = cmds.getAttr("%s.jointOrientX"%(eachObject)) Y = cmds.getAttr("%s.jointOrientY"%(eachObject)) Z = cmds.getAttr("%s.jointOrientZ"%(eachObject)) cmds.setAttr ("%s.jointOrientX"%(OpositObject),-X) cmds.setAttr ("%s.jointOrientY"%(OpositObject),Y) cmds.setAttr ("%s.jointOrientZ"%(OpositObject),Z) else: SplitArray[1]="R" OpositObject = "_".join(SplitArray) if cmds.objExists(OpositObject): RMUncategorized.SetObjectTransformDic({OpositObject : ObjectTransformDic[eachObject]}, MirrorTranslateX = -1 , MirrorTranslateY = 1 , MirrorTranslateZ = 1 , MirrorRotateX = 1 , MirrorRotateY = -1 , MirrorRotateZ = -1) if cmds.objectType(eachObject) == 'joint': X = cmds.getAttr("%s.jointOrientX"%(eachObject)) Y = cmds.getAttr("%s.jointOrientY"%(eachObject)) Z = cmds.getAttr("%s.jointOrientZ"%(eachObject)) cmds.setAttr ("%s.jointOrientX"%(OpositObject), -X) cmds.setAttr ("%s.jointOrientY"%(OpositObject), Y) cmds.setAttr ("%s.jointOrientZ"%(OpositObject), Z) selection = cmds.ls(selection = True) MirrorChildren(selection)
lgpl-3.0
nicktendo64/brady-vs-grey
httplib2/socks.py
811
18459
"""SocksiPy - Python SOCKS module. Version 1.00 Copyright 2006 Dan-Haim. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Dan Haim nor the names of his contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. This module provides a standard socket-like interface for Python for tunneling connections through SOCKS proxies. """ """ Minor modifications made by Christopher Gilbert (http://motomastyle.com/) for use in PyLoris (http://pyloris.sourceforge.net/) Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/) mainly to merge bug fixes found in Sourceforge """ import base64 import socket import struct import sys if getattr(socket, 'socket', None) is None: raise ImportError('socket.socket missing, proxy support unusable') PROXY_TYPE_SOCKS4 = 1 PROXY_TYPE_SOCKS5 = 2 PROXY_TYPE_HTTP = 3 PROXY_TYPE_HTTP_NO_TUNNEL = 4 _defaultproxy = None _orgsocket = socket.socket class ProxyError(Exception): pass class GeneralProxyError(ProxyError): pass class Socks5AuthError(ProxyError): pass class Socks5Error(ProxyError): pass class Socks4Error(ProxyError): pass class HTTPError(ProxyError): pass _generalerrors = ("success", "invalid data", "not connected", "not available", "bad proxy type", "bad input") _socks5errors = ("succeeded", "general SOCKS server failure", "connection not allowed by ruleset", "Network unreachable", "Host unreachable", "Connection refused", "TTL expired", "Command not supported", "Address type not supported", "Unknown error") _socks5autherrors = ("succeeded", "authentication is required", "all offered authentication methods were rejected", "unknown username or invalid password", "unknown error") _socks4errors = ("request granted", "request rejected or failed", "request rejected because SOCKS server cannot connect to identd on the client", "request rejected because the client program and identd report different user-ids", "unknown error") def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None): """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) Sets a default proxy which all further socksocket objects will use, unless explicitly changed. """ global _defaultproxy _defaultproxy = (proxytype, addr, port, rdns, username, password) def wrapmodule(module): """wrapmodule(module) Attempts to replace a module's socket library with a SOCKS socket. Must set a default proxy using setdefaultproxy(...) first. This will only work on modules that import socket directly into the namespace; most of the Python Standard Library falls into this category. """ if _defaultproxy != None: module.socket.socket = socksocket else: raise GeneralProxyError((4, "no proxy specified")) class socksocket(socket.socket): """socksocket([family[, type[, proto]]]) -> socket object Open a SOCKS enabled socket. The parameters are the same as those of the standard socket init. In order for SOCKS to work, you must specify family=AF_INET, type=SOCK_STREAM and proto=0. """ def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None): _orgsocket.__init__(self, family, type, proto, _sock) if _defaultproxy != None: self.__proxy = _defaultproxy else: self.__proxy = (None, None, None, None, None, None) self.__proxysockname = None self.__proxypeername = None self.__httptunnel = True def __recvall(self, count): """__recvall(count) -> data Receive EXACTLY the number of bytes requested from the socket. Blocks until the required number of bytes have been received. """ data = self.recv(count) while len(data) < count: d = self.recv(count-len(data)) if not d: raise GeneralProxyError((0, "connection closed unexpectedly")) data = data + d return data def sendall(self, content, *args): """ override socket.socket.sendall method to rewrite the header for non-tunneling proxies if needed """ if not self.__httptunnel: content = self.__rewriteproxy(content) return super(socksocket, self).sendall(content, *args) def __rewriteproxy(self, header): """ rewrite HTTP request headers to support non-tunneling proxies (i.e. those which do not support the CONNECT method). This only works for HTTP (not HTTPS) since HTTPS requires tunneling. """ host, endpt = None, None hdrs = header.split("\r\n") for hdr in hdrs: if hdr.lower().startswith("host:"): host = hdr elif hdr.lower().startswith("get") or hdr.lower().startswith("post"): endpt = hdr if host and endpt: hdrs.remove(host) hdrs.remove(endpt) host = host.split(" ")[1] endpt = endpt.split(" ") if (self.__proxy[4] != None and self.__proxy[5] != None): hdrs.insert(0, self.__getauthheader()) hdrs.insert(0, "Host: %s" % host) hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2])) return "\r\n".join(hdrs) def __getauthheader(self): auth = self.__proxy[4] + ":" + self.__proxy[5] return "Proxy-Authorization: Basic " + base64.b64encode(auth) def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None): """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) Sets the proxy to be used. proxytype - The type of the proxy to be used. Three types are supported: PROXY_TYPE_SOCKS4 (including socks4a), PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP addr - The address of the server (IP or DNS). port - The port of the server. Defaults to 1080 for SOCKS servers and 8080 for HTTP proxy servers. rdns - Should DNS queries be preformed on the remote side (rather than the local side). The default is True. Note: This has no effect with SOCKS4 servers. username - Username to authenticate with to the server. The default is no authentication. password - Password to authenticate with to the server. Only relevant when username is also provided. """ self.__proxy = (proxytype, addr, port, rdns, username, password) def __negotiatesocks5(self, destaddr, destport): """__negotiatesocks5(self,destaddr,destport) Negotiates a connection through a SOCKS5 server. """ # First we'll send the authentication packages we support. if (self.__proxy[4]!=None) and (self.__proxy[5]!=None): # The username/password details were supplied to the # setproxy method so we support the USERNAME/PASSWORD # authentication (in addition to the standard none). self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02)) else: # No username/password were entered, therefore we # only support connections with no authentication. self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00)) # We'll receive the server's response to determine which # method was selected chosenauth = self.__recvall(2) if chosenauth[0:1] != chr(0x05).encode(): self.close() raise GeneralProxyError((1, _generalerrors[1])) # Check the chosen authentication method if chosenauth[1:2] == chr(0x00).encode(): # No authentication is required pass elif chosenauth[1:2] == chr(0x02).encode(): # Okay, we need to perform a basic username/password # authentication. self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5]) authstat = self.__recvall(2) if authstat[0:1] != chr(0x01).encode(): # Bad response self.close() raise GeneralProxyError((1, _generalerrors[1])) if authstat[1:2] != chr(0x00).encode(): # Authentication failed self.close() raise Socks5AuthError((3, _socks5autherrors[3])) # Authentication succeeded else: # Reaching here is always bad self.close() if chosenauth[1] == chr(0xFF).encode(): raise Socks5AuthError((2, _socks5autherrors[2])) else: raise GeneralProxyError((1, _generalerrors[1])) # Now we can request the actual connection req = struct.pack('BBB', 0x05, 0x01, 0x00) # If the given destination address is an IP address, we'll # use the IPv4 address request even if remote resolving was specified. try: ipaddr = socket.inet_aton(destaddr) req = req + chr(0x01).encode() + ipaddr except socket.error: # Well it's not an IP number, so it's probably a DNS name. if self.__proxy[3]: # Resolve remotely ipaddr = None req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr else: # Resolve locally ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) req = req + chr(0x01).encode() + ipaddr req = req + struct.pack(">H", destport) self.sendall(req) # Get the response resp = self.__recvall(4) if resp[0:1] != chr(0x05).encode(): self.close() raise GeneralProxyError((1, _generalerrors[1])) elif resp[1:2] != chr(0x00).encode(): # Connection failed self.close() if ord(resp[1:2])<=8: raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])])) else: raise Socks5Error((9, _socks5errors[9])) # Get the bound address/port elif resp[3:4] == chr(0x01).encode(): boundaddr = self.__recvall(4) elif resp[3:4] == chr(0x03).encode(): resp = resp + self.recv(1) boundaddr = self.__recvall(ord(resp[4:5])) else: self.close() raise GeneralProxyError((1,_generalerrors[1])) boundport = struct.unpack(">H", self.__recvall(2))[0] self.__proxysockname = (boundaddr, boundport) if ipaddr != None: self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) else: self.__proxypeername = (destaddr, destport) def getproxysockname(self): """getsockname() -> address info Returns the bound IP address and port number at the proxy. """ return self.__proxysockname def getproxypeername(self): """getproxypeername() -> address info Returns the IP and port number of the proxy. """ return _orgsocket.getpeername(self) def getpeername(self): """getpeername() -> address info Returns the IP address and port number of the destination machine (note: getproxypeername returns the proxy) """ return self.__proxypeername def __negotiatesocks4(self,destaddr,destport): """__negotiatesocks4(self,destaddr,destport) Negotiates a connection through a SOCKS4 server. """ # Check if the destination address provided is an IP address rmtrslv = False try: ipaddr = socket.inet_aton(destaddr) except socket.error: # It's a DNS name. Check where it should be resolved. if self.__proxy[3]: ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01) rmtrslv = True else: ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) # Construct the request packet req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr # The username parameter is considered userid for SOCKS4 if self.__proxy[4] != None: req = req + self.__proxy[4] req = req + chr(0x00).encode() # DNS name if remote resolving is required # NOTE: This is actually an extension to the SOCKS4 protocol # called SOCKS4A and may not be supported in all cases. if rmtrslv: req = req + destaddr + chr(0x00).encode() self.sendall(req) # Get the response from the server resp = self.__recvall(8) if resp[0:1] != chr(0x00).encode(): # Bad data self.close() raise GeneralProxyError((1,_generalerrors[1])) if resp[1:2] != chr(0x5A).encode(): # Server returned an error self.close() if ord(resp[1:2]) in (91, 92, 93): self.close() raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90])) else: raise Socks4Error((94, _socks4errors[4])) # Get the bound address/port self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0]) if rmtrslv != None: self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) else: self.__proxypeername = (destaddr, destport) def __negotiatehttp(self, destaddr, destport): """__negotiatehttp(self,destaddr,destport) Negotiates a connection through an HTTP server. """ # If we need to resolve locally, we do this now if not self.__proxy[3]: addr = socket.gethostbyname(destaddr) else: addr = destaddr headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"] headers += ["Host: ", destaddr, "\r\n"] if (self.__proxy[4] != None and self.__proxy[5] != None): headers += [self.__getauthheader(), "\r\n"] headers.append("\r\n") self.sendall("".join(headers).encode()) # We read the response until we get the string "\r\n\r\n" resp = self.recv(1) while resp.find("\r\n\r\n".encode()) == -1: resp = resp + self.recv(1) # We just need the first line to check if the connection # was successful statusline = resp.splitlines()[0].split(" ".encode(), 2) if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()): self.close() raise GeneralProxyError((1, _generalerrors[1])) try: statuscode = int(statusline[1]) except ValueError: self.close() raise GeneralProxyError((1, _generalerrors[1])) if statuscode != 200: self.close() raise HTTPError((statuscode, statusline[2])) self.__proxysockname = ("0.0.0.0", 0) self.__proxypeername = (addr, destport) def connect(self, destpair): """connect(self, despair) Connects to the specified destination through a proxy. destpar - A tuple of the IP/DNS address and the port number. (identical to socket's connect). To select the proxy server use setproxy(). """ # Do a minimal input check first if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int): raise GeneralProxyError((5, _generalerrors[5])) if self.__proxy[0] == PROXY_TYPE_SOCKS5: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 1080 _orgsocket.connect(self, (self.__proxy[1], portnum)) self.__negotiatesocks5(destpair[0], destpair[1]) elif self.__proxy[0] == PROXY_TYPE_SOCKS4: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 1080 _orgsocket.connect(self,(self.__proxy[1], portnum)) self.__negotiatesocks4(destpair[0], destpair[1]) elif self.__proxy[0] == PROXY_TYPE_HTTP: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 8080 _orgsocket.connect(self,(self.__proxy[1], portnum)) self.__negotiatehttp(destpair[0], destpair[1]) elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 8080 _orgsocket.connect(self,(self.__proxy[1],portnum)) if destpair[1] == 443: self.__negotiatehttp(destpair[0],destpair[1]) else: self.__httptunnel = False elif self.__proxy[0] == None: _orgsocket.connect(self, (destpair[0], destpair[1])) else: raise GeneralProxyError((4, _generalerrors[4]))
mit
akopich/spark
python/pyspark/ml/pipeline.py
69
13177
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import os if sys.version > '3': basestring = str from pyspark import since, keyword_only, SparkContext from pyspark.ml.base import Estimator, Model, Transformer from pyspark.ml.param import Param, Params from pyspark.ml.util import * from pyspark.ml.wrapper import JavaParams from pyspark.ml.common import inherit_doc @inherit_doc class Pipeline(Estimator, MLReadable, MLWritable): """ A simple pipeline, which acts as an estimator. A Pipeline consists of a sequence of stages, each of which is either an :py:class:`Estimator` or a :py:class:`Transformer`. When :py:meth:`Pipeline.fit` is called, the stages are executed in order. If a stage is an :py:class:`Estimator`, its :py:meth:`Estimator.fit` method will be called on the input dataset to fit a model. Then the model, which is a transformer, will be used to transform the dataset as the input to the next stage. If a stage is a :py:class:`Transformer`, its :py:meth:`Transformer.transform` method will be called to produce the dataset for the next stage. The fitted model from a :py:class:`Pipeline` is a :py:class:`PipelineModel`, which consists of fitted models and transformers, corresponding to the pipeline stages. If stages is an empty list, the pipeline acts as an identity transformer. .. versionadded:: 1.3.0 """ stages = Param(Params._dummy(), "stages", "a list of pipeline stages") @keyword_only def __init__(self, stages=None): """ __init__(self, stages=None) """ super(Pipeline, self).__init__() kwargs = self._input_kwargs self.setParams(**kwargs) @since("1.3.0") def setStages(self, value): """ Set pipeline stages. :param value: a list of transformers or estimators :return: the pipeline instance """ return self._set(stages=value) @since("1.3.0") def getStages(self): """ Get pipeline stages. """ return self.getOrDefault(self.stages) @keyword_only @since("1.3.0") def setParams(self, stages=None): """ setParams(self, stages=None) Sets params for Pipeline. """ kwargs = self._input_kwargs return self._set(**kwargs) def _fit(self, dataset): stages = self.getStages() for stage in stages: if not (isinstance(stage, Estimator) or isinstance(stage, Transformer)): raise TypeError( "Cannot recognize a pipeline stage of type %s." % type(stage)) indexOfLastEstimator = -1 for i, stage in enumerate(stages): if isinstance(stage, Estimator): indexOfLastEstimator = i transformers = [] for i, stage in enumerate(stages): if i <= indexOfLastEstimator: if isinstance(stage, Transformer): transformers.append(stage) dataset = stage.transform(dataset) else: # must be an Estimator model = stage.fit(dataset) transformers.append(model) if i < indexOfLastEstimator: dataset = model.transform(dataset) else: transformers.append(stage) return PipelineModel(transformers) @since("1.4.0") def copy(self, extra=None): """ Creates a copy of this instance. :param extra: extra parameters :returns: new instance """ if extra is None: extra = dict() that = Params.copy(self, extra) stages = [stage.copy(extra) for stage in that.getStages()] return that.setStages(stages) @since("2.0.0") def write(self): """Returns an MLWriter instance for this ML instance.""" allStagesAreJava = PipelineSharedReadWrite.checkStagesForJava(self.getStages()) if allStagesAreJava: return JavaMLWriter(self) return PipelineWriter(self) @classmethod @since("2.0.0") def read(cls): """Returns an MLReader instance for this class.""" return PipelineReader(cls) @classmethod def _from_java(cls, java_stage): """ Given a Java Pipeline, create and return a Python wrapper of it. Used for ML persistence. """ # Create a new instance of this stage. py_stage = cls() # Load information from java_stage to the instance. py_stages = [JavaParams._from_java(s) for s in java_stage.getStages()] py_stage.setStages(py_stages) py_stage._resetUid(java_stage.uid()) return py_stage def _to_java(self): """ Transfer this instance to a Java Pipeline. Used for ML persistence. :return: Java object equivalent to this instance. """ gateway = SparkContext._gateway cls = SparkContext._jvm.org.apache.spark.ml.PipelineStage java_stages = gateway.new_array(cls, len(self.getStages())) for idx, stage in enumerate(self.getStages()): java_stages[idx] = stage._to_java() _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.Pipeline", self.uid) _java_obj.setStages(java_stages) return _java_obj @inherit_doc class PipelineWriter(MLWriter): """ (Private) Specialization of :py:class:`MLWriter` for :py:class:`Pipeline` types """ def __init__(self, instance): super(PipelineWriter, self).__init__() self.instance = instance def saveImpl(self, path): stages = self.instance.getStages() PipelineSharedReadWrite.validateStages(stages) PipelineSharedReadWrite.saveImpl(self.instance, stages, self.sc, path) @inherit_doc class PipelineReader(MLReader): """ (Private) Specialization of :py:class:`MLReader` for :py:class:`Pipeline` types """ def __init__(self, cls): super(PipelineReader, self).__init__() self.cls = cls def load(self, path): metadata = DefaultParamsReader.loadMetadata(path, self.sc) if 'language' not in metadata['paramMap'] or metadata['paramMap']['language'] != 'Python': return JavaMLReader(self.cls).load(path) else: uid, stages = PipelineSharedReadWrite.load(metadata, self.sc, path) return Pipeline(stages=stages)._resetUid(uid) @inherit_doc class PipelineModelWriter(MLWriter): """ (Private) Specialization of :py:class:`MLWriter` for :py:class:`PipelineModel` types """ def __init__(self, instance): super(PipelineModelWriter, self).__init__() self.instance = instance def saveImpl(self, path): stages = self.instance.stages PipelineSharedReadWrite.validateStages(stages) PipelineSharedReadWrite.saveImpl(self.instance, stages, self.sc, path) @inherit_doc class PipelineModelReader(MLReader): """ (Private) Specialization of :py:class:`MLReader` for :py:class:`PipelineModel` types """ def __init__(self, cls): super(PipelineModelReader, self).__init__() self.cls = cls def load(self, path): metadata = DefaultParamsReader.loadMetadata(path, self.sc) if 'language' not in metadata['paramMap'] or metadata['paramMap']['language'] != 'Python': return JavaMLReader(self.cls).load(path) else: uid, stages = PipelineSharedReadWrite.load(metadata, self.sc, path) return PipelineModel(stages=stages)._resetUid(uid) @inherit_doc class PipelineModel(Model, MLReadable, MLWritable): """ Represents a compiled pipeline with transformers and fitted models. .. versionadded:: 1.3.0 """ def __init__(self, stages): super(PipelineModel, self).__init__() self.stages = stages def _transform(self, dataset): for t in self.stages: dataset = t.transform(dataset) return dataset @since("1.4.0") def copy(self, extra=None): """ Creates a copy of this instance. :param extra: extra parameters :returns: new instance """ if extra is None: extra = dict() stages = [stage.copy(extra) for stage in self.stages] return PipelineModel(stages) @since("2.0.0") def write(self): """Returns an MLWriter instance for this ML instance.""" allStagesAreJava = PipelineSharedReadWrite.checkStagesForJava(self.stages) if allStagesAreJava: return JavaMLWriter(self) return PipelineModelWriter(self) @classmethod @since("2.0.0") def read(cls): """Returns an MLReader instance for this class.""" return PipelineModelReader(cls) @classmethod def _from_java(cls, java_stage): """ Given a Java PipelineModel, create and return a Python wrapper of it. Used for ML persistence. """ # Load information from java_stage to the instance. py_stages = [JavaParams._from_java(s) for s in java_stage.stages()] # Create a new instance of this stage. py_stage = cls(py_stages) py_stage._resetUid(java_stage.uid()) return py_stage def _to_java(self): """ Transfer this instance to a Java PipelineModel. Used for ML persistence. :return: Java object equivalent to this instance. """ gateway = SparkContext._gateway cls = SparkContext._jvm.org.apache.spark.ml.Transformer java_stages = gateway.new_array(cls, len(self.stages)) for idx, stage in enumerate(self.stages): java_stages[idx] = stage._to_java() _java_obj =\ JavaParams._new_java_obj("org.apache.spark.ml.PipelineModel", self.uid, java_stages) return _java_obj @inherit_doc class PipelineSharedReadWrite(): """ .. note:: DeveloperApi Functions for :py:class:`MLReader` and :py:class:`MLWriter` shared between :py:class:`Pipeline` and :py:class:`PipelineModel` .. versionadded:: 2.3.0 """ @staticmethod def checkStagesForJava(stages): return all(isinstance(stage, JavaMLWritable) for stage in stages) @staticmethod def validateStages(stages): """ Check that all stages are Writable """ for stage in stages: if not isinstance(stage, MLWritable): raise ValueError("Pipeline write will fail on this pipeline " + "because stage %s of type %s is not MLWritable", stage.uid, type(stage)) @staticmethod def saveImpl(instance, stages, sc, path): """ Save metadata and stages for a :py:class:`Pipeline` or :py:class:`PipelineModel` - save metadata to path/metadata - save stages to stages/IDX_UID """ stageUids = [stage.uid for stage in stages] jsonParams = {'stageUids': stageUids, 'language': 'Python'} DefaultParamsWriter.saveMetadata(instance, path, sc, paramMap=jsonParams) stagesDir = os.path.join(path, "stages") for index, stage in enumerate(stages): stage.write().save(PipelineSharedReadWrite .getStagePath(stage.uid, index, len(stages), stagesDir)) @staticmethod def load(metadata, sc, path): """ Load metadata and stages for a :py:class:`Pipeline` or :py:class:`PipelineModel` :return: (UID, list of stages) """ stagesDir = os.path.join(path, "stages") stageUids = metadata['paramMap']['stageUids'] stages = [] for index, stageUid in enumerate(stageUids): stagePath = \ PipelineSharedReadWrite.getStagePath(stageUid, index, len(stageUids), stagesDir) stage = DefaultParamsReader.loadParamsInstance(stagePath, sc) stages.append(stage) return (metadata['uid'], stages) @staticmethod def getStagePath(stageUid, stageIdx, numStages, stagesDir): """ Get path for saving the given stage. """ stageIdxDigits = len(str(numStages)) stageDir = str(stageIdx).zfill(stageIdxDigits) + "_" + stageUid stagePath = os.path.join(stagesDir, stageDir) return stagePath
apache-2.0
utensil-star/PeachPy
peachpy/arm/microarchitecture.py
6
3571
# This file is part of Peach-Py package and is licensed under the Simplified BSD license. # See license.rst for the full text of the license. from peachpy.arm.isa import Extension, Extensions class Microarchitecture: def __init__(self, name, extensions): self.name = name self.extensions = Extensions(*[prerequisite for extension in extensions for prerequisite in extension.prerequisites]) def is_supported(self, extension): return extension in self.extensions @property def id(self): return self.name.replace(" ", "") def __add__(self, extension): return Microarchitecture(self.name, self.extensions + extension) def __sub__(self, extension): return Microarchitecture(self.name, self.extensions - extension) def __str__(self): return self.name Default = None XScale = None ARM9, ARM11 = None, None CortexA5, CortexA7, CortexA8, CortexA9, CortexA12, CortexA15 = None, None, None, None, None, None Scorpion, Krait = None, None PJ4 = None Microarchitecture.Default = Microarchitecture('Default', Extension.All) Microarchitecture.XScale = Microarchitecture('XScale', [Extension.V5E, Extension.Thumb, Extension.XScale, Extension.WMMX2]) Microarchitecture.ARM9 = Microarchitecture('ARM9', [Extension.V5E, Extension.Thumb]) Microarchitecture.ARM11 = Microarchitecture('ARM11', [Extension.V6K, Extension.Thumb, Extension.VFP2, Extension.VFPVectorMode]) Microarchitecture.CortexA5 = Microarchitecture('Cortex A5', [Extension.V7MP, Extension.Thumb2, Extension.VFP4, Extension.VFPd32, Extension.NEON2]) Microarchitecture.CortexA7 = Microarchitecture('Cortex A7', [Extension.V7MP, Extension.Thumb2, Extension.Div, Extension.VFP4, Extension.VFPd32, Extension.NEON2]) Microarchitecture.CortexA8 = Microarchitecture('Cortex A8', [Extension.V7, Extension.Thumb2, Extension.VFP3, Extension.VFPd32, Extension.NEON]) Microarchitecture.CortexA9 = Microarchitecture('Cortex A9', [Extension.V7MP, Extension.Thumb2, Extension.VFP3, Extension.VFPHP]) Microarchitecture.CortexA12 = Microarchitecture('Cortex A12', [Extension.V7MP, Extension.Thumb2, Extension.Div, Extension.VFP4, Extension.VFPd32, Extension.NEON2]) Microarchitecture.CortexA15 = Microarchitecture('Cortex A15', [Extension.V7MP, Extension.Thumb2, Extension.Div, Extension.VFP4, Extension.VFPd32, Extension.NEON2]) Microarchitecture.Scorpion = Microarchitecture('Scorpion', [Extension.V7MP, Extension.Thumb2, Extension.VFP3, Extension.VFPd32, Extension.VFPHP, Extension.NEON, Extension.NEONHP]) Microarchitecture.Krait = Microarchitecture('Krait', [Extension.V7MP, Extension.Thumb2, Extension.Div, Extension.VFP4, Extension.VFPd32, Extension.NEON2]) Microarchitecture.PJ4 = Microarchitecture('PJ4', [Extension.V7, Extension.Thumb2, Extension.VFP3, Extension.WMMX2])
bsd-2-clause
loco-odoo/localizacion_co
openerp/addons-extra/odoo-pruebas/odoo-server/addons/l10n_be_invoice_bba/__init__.py
438
1101
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import partner import invoice # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ucldc/harvester
scripts/delete_couchdocs_by_obj_checksum.py
1
3470
#! /bin/env python # -*- coding: utf-8 -*- import sys import argparse from harvester.couchdb_init import get_couchdb from harvester.couchdb_sync_db_by_collection import delete_id_list from harvester.post_processing.couchdb_runner import CouchDBCollectionFilter def confirm_deletion(count, objChecksum, cid): prompt = "\nDelete {0} documents with object checksum {1} from Collection {2}? yes to confirm\n".format(count, objChecksum, cid) while True: ans = raw_input(prompt).lower() if ans == "yes": return True else: return False if __name__ == '__main__': parser = argparse.ArgumentParser( description='Delete all documents in given collection matching given object checksum. ' \ 'Use for metadata-only records that can only be identified by value in object field ' \ 'USAGE: delete_couchdocs_by_obj_checksum.py [collection id] [object value]') parser.add_argument('cid', help='Collection ID') parser.add_argument('objChecksum', help='CouchDB "object" value of documents to delete') args = parser.parse_args(sys.argv[1:]) if not args.cid or not args.objChecksum: parser.print_help() sys.exit(27) ids = [] _couchdb = get_couchdb() rows = CouchDBCollectionFilter(collection_key=args.cid, couchdb_obj=_couchdb) for row in rows: couchdoc = row.doc if 'object' in couchdoc and couchdoc['object'] == args.objChecksum: couchID = couchdoc['_id'] ids.append(couchID) if not ids: print 'No docs found with object checksum matching {}'.format(args.objChecksum) sys.exit(27) if confirm_deletion(len(ids), args.objChecksum, args.cid): num_deleted, delete_ids = delete_id_list(ids, _couchdb=_couchdb) print 'Deleted {} documents'.format(num_deleted) else: print "Exiting without deleting" # Copyright © 2016, Regents of the University of California # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # - Neither the name of the University of California nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.
bsd-3-clause
niphlod/pydal
pydal/parsers/sqlite.py
4
1042
from datetime import datetime, date from decimal import Decimal from ..adapters.sqlite import SQLite from .base import ListsParser, DateParser, TimeParser, DateTimeParser, \ JSONParser from . import parsers, for_type, before_parse @parsers.register_for(SQLite) class SQLiteParser( ListsParser, DateParser, TimeParser, DateTimeParser, JSONParser ): @before_parse('decimal') def decimal_extras(self, field_type): return {'decimals': field_type[8:-1].split(',')[-1]} @for_type('decimal') def _decimal(self, value, decimals): value = ('%.' + decimals + 'f') % value return Decimal(value) @for_type('date') def _date(self, value): if not isinstance(value, date): return DateParser._declared_parsers_['_date'].f(self, value) return value @for_type('datetime') def _datetime(self, value): if not isinstance(value, datetime): return DateTimeParser._declared_parsers_['_datetime'].f( self, value) return value
bsd-3-clause
robbertvanginkel/buck
third-party/py/pex/pex/commands/bdist_pex.py
39
2822
import os from distutils import log from setuptools import Command from pex.bin.pex import build_pex, configure_clp from pex.common import die from pex.variables import ENV # Suppress checkstyle violations due to setuptools command requirements. class bdist_pex(Command): # noqa description = "create a PEX file from a source distribution" # noqa user_options = [ # noqa ('bdist-all', None, 'pexify all defined entry points'), ('bdist-dir=', None, 'the directory into which pexes will be written, default: dist.'), ('pex-args=', None, 'additional arguments to the pex tool'), ] boolean_options = [ # noqa 'bdist-all', ] def initialize_options(self): self.bdist_all = False self.bdist_dir = None self.pex_args = '' def finalize_options(self): self.pex_args = self.pex_args.split() def _write(self, pex_builder, target, script=None): builder = pex_builder.clone() if script is not None: builder.set_script(script) builder.build(target) def run(self): name = self.distribution.get_name() version = self.distribution.get_version() parser, options_builder = configure_clp() package_dir = os.path.dirname(os.path.realpath(os.path.expanduser( self.distribution.script_name))) if self.bdist_dir is None: self.bdist_dir = os.path.join(package_dir, 'dist') options, reqs = parser.parse_args(self.pex_args) if options.entry_point or options.script: die('Must not specify entry_point or script to --pex-args') reqs = [package_dir] + reqs with ENV.patch(PEX_VERBOSE=str(options.verbosity)): pex_builder = build_pex(reqs, options, options_builder) def split_and_strip(entry_point): console_script, entry_point = entry_point.split('=', 2) return console_script.strip(), entry_point.strip() try: console_scripts = dict(split_and_strip(script) for script in self.distribution.entry_points.get('console_scripts', [])) except ValueError: console_scripts = {} if self.bdist_all: # Write all entry points into unversioned pex files. for script_name in console_scripts: target = os.path.join(self.bdist_dir, script_name) log.info('Writing %s to %s' % (script_name, target)) self._write(pex_builder, target, script=script_name) elif name in console_scripts: # The package has a namesake entry point, so use it. target = os.path.join(self.bdist_dir, name + '-' + version + '.pex') log.info('Writing %s to %s' % (name, target)) self._write(pex_builder, target, script=name) else: # The package has no namesake entry point, so build an environment pex. log.info('Writing environment pex into %s' % target) self._write(pex_builder, target, script=None)
apache-2.0
Jgarcia-IAS/ReporsitorioVacioOdoo
openerp/addons/hr_attendance/report/attendance_errors.py
377
3669
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import datetime import time from openerp.osv import osv from openerp.report import report_sxw class attendance_print(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(attendance_print, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, 'lst': self._lst, 'total': self._lst_total, 'get_employees':self._get_employees, }) def _get_employees(self, emp_ids): emp_obj_list = self.pool.get('hr.employee').browse(self.cr, self.uid, emp_ids) return emp_obj_list def _lst(self, employee_id, dt_from, dt_to, max, *args): self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out')) res = self.cr.dictfetchall() for r in res: if r['action'] == 'sign_out': r['delay'] = -r['delay'] temp = r['delay'].seconds r['delay'] = str(r['delay']).split('.')[0] if abs(temp) < max*60: r['delay2'] = r['delay'] else: r['delay2'] = '/' return res def _lst_total(self, employee_id, dt_from, dt_to, max, *args): self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out')) res = self.cr.dictfetchall() if not res: return ('/','/') total2 = datetime.timedelta(seconds = 0, minutes = 0, hours = 0) total = datetime.timedelta(seconds = 0, minutes = 0, hours = 0) for r in res: if r['action'] == 'sign_out': r['delay'] = -r['delay'] total += r['delay'] if abs(r['delay'].seconds) < max*60: total2 += r['delay'] result_dict = { 'total': total and str(total).split('.')[0], 'total2': total2 and str(total2).split('.')[0] } return [result_dict] class report_hr_attendanceerrors(osv.AbstractModel): _name = 'report.hr_attendance.report_attendanceerrors' _inherit = 'report.abstract_report' _template = 'hr_attendance.report_attendanceerrors' _wrapped_report_class = attendance_print # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ResEnv/chain-api
chain/influx_client.py
1
7963
import requests from pytz import UTC from datetime import datetime from django.db import IntegrityError import itertools from time import sleep from chain.core.api import BadRequestException from itertools import izip from time import sleep EPOCH = UTC.localize(datetime.utcfromtimestamp(0)) HTTP_STATUS_SUCCESSFUL_WRITE = 204 class InfluxClient(object): def __init__(self, host, port, database, measurement): self._host = host self._port = port self._database = database self._measurement = measurement # Persist TCP connection self._session = requests self._url = 'http://' + self._host + ':' + self._port if self._database not in self.get_databases(): self.get('CREATE DATABASE ' + self._database) def request(self, method, url, params=None, data=None, headers=None, retries=4): sleeptime = 1 while True: response = self._session.request(method=method, url=url, params=params, data=data, headers=headers) if response.status_code / 100 == 2: return response retries -= 1 if retries == 0: return response sleep(sleeptime) sleeptime *= 2 def post(self, endpoint, data, query=False): if endpoint == 'write': url = self._url + '/write' else: url = self._url + '/query' if query: data = {'q': data} response = self.request('POST', url, {'db': self._database}, data) return response def make_post_query_string(self, site_id, device_id, sensor_id, metric, value, timestamp=None): data = '{0},sensor_id={1},site_id={2},device_id={3},metric={4} value={5}'.format(self._measurement, sensor_id, site_id, device_id, metric, value) if timestamp: timestamp = InfluxClient.convert_timestamp(timestamp) data += ' ' + str(timestamp) return data def post_data(self, site_id, device_id, sensor_id, metric, value, timestamp=None): query = self.make_post_query_string(site_id, device_id, sensor_id, metric, value, timestamp) response = self.post('write', query) if response.status_code != HTTP_STATUS_SUCCESSFUL_WRITE: querylines = query.splitlines() if len(querylines) > 20: querylines = querylines[0:20] + ["...{} more".format(len(querylines)-20)] raise IntegrityError('Failed Query(status {}):\nQuery:\n{}\n\nResponse:\n{}\n'.format( response.status_code, "\n".join(querylines), response.json())) return response def post_data_bulk(self, site_id, device_id, sensor_id, metric, values, timestamps): query = "" for (value, timestamp) in izip(values, timestamps): query += self.make_post_query_string(site_id, device_id, sensor_id, metric, value, timestamp) + "\n" # print("posting query:\n{}\n".format(query)) response = self.post('write', query) if response.status_code != HTTP_STATUS_SUCCESSFUL_WRITE: querylines = query.splitlines() if len(querylines) > 20: querylines = querylines[0:20] + ["...{} more".format(len(querylines)-20)] raise IntegrityError('Failed Query(status {}):\nQuery:{}\nResponse:\n{}'.format( response.status_code, "\n".join(querylines), response.json())) return response def get(self, query, database=False, **kwargs): # database argument should be true for any sensor data queries kwargs['q'] = query if database: kwargs['db'] = self._database return self.request('GET', self._url + '/query', kwargs) def get_sensor_data(self, filters): if 'aggtime' not in filters: measurement = self._measurement # arguements are unicode strings elif filters['aggtime'] == u'1h': measurement = self._measurement + '_1h' elif filters['aggtime'] == u'1d': measurement = self._measurement + '_1d' elif filters['aggtime'] == u'1w': measurement = self._measurement + '_1w' else: raise BadRequestException('Invalid argument for aggtime. Must be 1h, 1d, or 1w') # exclude the old values that don't have metrics query = "SELECT * FROM {0} WHERE sensor_id = '{1}' AND metric != ''".format(measurement, filters['sensor_id']) if 'timestamp__gte' in filters: timestamp_gte = InfluxClient.convert_timestamp(filters['timestamp__gte']) query += ' AND time >= {}'.format(timestamp_gte) if 'timestamp__lt' in filters: timestamp_lt = InfluxClient.convert_timestamp(filters['timestamp__lt']) query += ' AND time < {}'.format(timestamp_lt) result = self.get_values(self.get(query, True)) return result def get_last_sensor_data(self, sensor_id): query = "SELECT LAST(value) FROM {0} WHERE sensor_id = \'{1}\'".format(self._measurement, sensor_id) result = self.get_values(self.get(query, True)) return result def get_last_data_from_all_sensors(self, site_id): query = "SELECT LAST(*) FROM {0} WHERE site_id = \'{1}\' GROUP BY sensor_id".format(self._measurement, site_id) result = self.get_values(self.get(query, True)) return result def get_databases(self): response = self.get('SHOW DATABASES', False) series = response.json()['results'][0]['series'][0] if 'values' not in series: # there's only a values list if there's at least one value return [] return [sub[0] for sub in series['values']] # returns a list of dictionaries, with one dictionary for each series in the # query result. Each dictionary maps the column name to a list of data def get_values(self,response): json = response.json() try: if len(json['results'])==0: return [] except: import pdb pdb.set_trace() if 'series' not in json['results'][0]: return [] if len(json['results'][0]['series']) == 0: return [] if 'tags' in json['results'][0]['series'][0]: series = json['results'][0]['series'] result = [] for d in series: values = d['values'][0] columns = d['columns'] data = dict(itertools.izip(columns,values)) data.update(d['tags']) result.append(data) else: series = json['results'][0]['series'][0] values = series['values'] columns = series['columns'] result = [dict(itertools.izip(columns, value)) for value in values] return result @classmethod def convert_timestamp(cls, timestamp): if not timestamp.tzinfo: timestamp = UTC.localize(timestamp) return int((timestamp - EPOCH).total_seconds() * 1e9)
mit
karllessard/tensorflow
tensorflow/python/saved_model/simple_save.py
25
4169
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel simple save functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.saved_model import builder from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['saved_model.simple_save']) @deprecation.deprecated( None, 'This function will only be available through the v1 compatibility ' 'library as tf.compat.v1.saved_model.simple_save.') def simple_save(session, export_dir, inputs, outputs, legacy_init_op=None): """Convenience function to build a SavedModel suitable for serving. In many common cases, saving models for serving will be as simple as: simple_save(session, export_dir, inputs={"x": x, "y": y}, outputs={"z": z}) Although in many cases it's not necessary to understand all of the many ways to configure a SavedModel, this method has a few practical implications: - It will be treated as a graph for inference / serving (i.e. uses the tag `saved_model.SERVING`) - The SavedModel will load in TensorFlow Serving and supports the [Predict API](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/predict.proto). To use the Classify, Regress, or MultiInference APIs, please use either [tf.Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator) or the lower level [SavedModel APIs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md). - Some TensorFlow ops depend on information on disk or other information called "assets". These are generally handled automatically by adding the assets to the `GraphKeys.ASSET_FILEPATHS` collection. Only assets in that collection are exported; if you need more custom behavior, you'll need to use the [SavedModelBuilder](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/builder.py). More information about SavedModel and signatures can be found here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md. Args: session: The TensorFlow session from which to save the meta graph and variables. export_dir: The path to which the SavedModel will be stored. inputs: dict mapping string input names to tensors. These are added to the SignatureDef as the inputs. outputs: dict mapping string output names to tensors. These are added to the SignatureDef as the outputs. legacy_init_op: Legacy support for op or group of ops to execute after the restore op upon a load. """ signature_def_map = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def_utils.predict_signature_def(inputs, outputs) } b = builder.SavedModelBuilder(export_dir) b.add_meta_graph_and_variables( session, tags=[tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS), main_op=legacy_init_op, clear_devices=True) b.save()
apache-2.0
adw0rd/lettuce
tests/integration/lib/Django-1.3/django/contrib/localflavor/fi/forms.py
309
1803
""" FI-specific Form helpers """ import re from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField, Select from django.utils.translation import ugettext_lazy as _ class FIZipCodeField(RegexField): default_error_messages = { 'invalid': _('Enter a zip code in the format XXXXX.'), } def __init__(self, *args, **kwargs): super(FIZipCodeField, self).__init__(r'^\d{5}$', max_length=None, min_length=None, *args, **kwargs) class FIMunicipalitySelect(Select): """ A Select widget that uses a list of Finnish municipalities as its choices. """ def __init__(self, attrs=None): from fi_municipalities import MUNICIPALITY_CHOICES super(FIMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES) class FISocialSecurityNumber(Field): default_error_messages = { 'invalid': _('Enter a valid Finnish social security number.'), } def clean(self, value): super(FISocialSecurityNumber, self).clean(value) if value in EMPTY_VALUES: return u'' checkmarks = "0123456789ABCDEFHJKLMNPRSTUVWXY" result = re.match(r"""^ (?P<date>([0-2]\d|3[01]) (0\d|1[012]) (\d{2})) [A+-] (?P<serial>(\d{3})) (?P<checksum>[%s])$""" % checkmarks, value, re.VERBOSE | re.IGNORECASE) if not result: raise ValidationError(self.error_messages['invalid']) gd = result.groupdict() checksum = int(gd['date'] + gd['serial']) if checkmarks[checksum % len(checkmarks)] == gd['checksum'].upper(): return u'%s' % value.upper() raise ValidationError(self.error_messages['invalid'])
gpl-3.0
mjs/juju
acceptancetests/repository/trusty/mysql/hooks/lib/cluster_utils.py
19
3331
# # Copyright 2012 Canonical Ltd. # # This file is sourced from lp:openstack-charm-helpers # # Authors: # James Page <james.page@ubuntu.com> # Adam Gandelman <adamg@ubuntu.com> # from lib.utils import ( juju_log, relation_ids, relation_list, relation_get, get_unit_hostname, config_get ) import subprocess import os def is_clustered(): for r_id in (relation_ids('ha') or []): for unit in (relation_list(r_id) or []): clustered = relation_get('clustered', rid=r_id, unit=unit) if clustered: return True return False def is_leader(resource): cmd = [ "crm", "resource", "show", resource ] try: status = subprocess.check_output(cmd) except subprocess.CalledProcessError: return False else: if get_unit_hostname() in status: return True else: return False def peer_units(): peers = [] for r_id in (relation_ids('cluster') or []): for unit in (relation_list(r_id) or []): peers.append(unit) return peers def oldest_peer(peers): local_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1] for peer in peers: remote_unit_no = peer.split('/')[1] if remote_unit_no < local_unit_no: return False return True def eligible_leader(resource): if is_clustered(): if not is_leader(resource): juju_log('INFO', 'Deferring action to CRM leader.') return False else: peers = peer_units() if peers and not oldest_peer(peers): juju_log('INFO', 'Deferring action to oldest service unit.') return False return True def https(): ''' Determines whether enough data has been provided in configuration or relation data to configure HTTPS . returns: boolean ''' if config_get('use-https') == "yes": return True if config_get('ssl_cert') and config_get('ssl_key'): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if (relation_get('https_keystone', rid=r_id, unit=unit) and relation_get('ssl_cert', rid=r_id, unit=unit) and relation_get('ssl_key', rid=r_id, unit=unit) and relation_get('ca_cert', rid=r_id, unit=unit)): return True return False def determine_api_port(public_port): ''' Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service returns: int: the correct listening port for the API service ''' i = 0 if len(peer_units()) > 0 or is_clustered(): i += 1 if https(): i += 1 return public_port - (i * 10) def determine_haproxy_port(public_port): ''' Description: Determine correct proxy listening port based on public IP + existence of HTTPS reverse proxy. public_port: int: standard public port for given service returns: int: the correct listening port for the HAProxy service ''' i = 0 if https(): i += 1 return public_port - (i * 10)
agpl-3.0
jimi-c/ansible
lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
17
30286
#!/usr/bin/python # # This is a free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This Ansible library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this library. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = ''' --- module: ec2_vpc_route_table short_description: Manage route tables for AWS virtual private clouds description: - Manage route tables for AWS virtual private clouds version_added: "2.0" author: - Robert Estelle (@erydo) - Rob White (@wimnat) - Will Thames (@willthames) options: lookup: description: Look up route table by either tags or by route table ID. Non-unique tag lookup will fail. If no tags are specified then no lookup for an existing route table is performed and a new route table will be created. To change tags of a route table you must look up by id. default: tag choices: [ 'tag', 'id' ] propagating_vgw_ids: description: Enable route propagation from virtual gateways specified by ID. purge_routes: version_added: "2.3" description: Purge existing routes that are not found in routes. type: bool default: 'yes' purge_subnets: version_added: "2.3" description: Purge existing subnets that are not found in subnets. Ignored unless the subnets option is supplied. default: 'true' purge_tags: version_added: "2.5" description: Purge existing tags that are not found in route table type: bool default: 'no' route_table_id: description: The ID of the route table to update or delete. routes: description: List of routes in the route table. Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', 'instance_id', 'network_interface_id', or 'vpc_peering_connection_id'. If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. Routes are required for present states. state: description: Create or destroy the VPC route table default: present choices: [ 'present', 'absent' ] subnets: description: An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. tags: description: > A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }. Tags are used to uniquely identify route tables within a VPC when the route_table_id is not supplied. aliases: [ "resource_tags" ] vpc_id: description: VPC ID of the VPC in which to create the route table. required: true extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic creation example: - name: Set up public subnet route table ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 tags: Name: Public subnets: - "{{ jumpbox_subnet.subnet.id }}" - "{{ frontend_subnet.subnet.id }}" - "{{ vpn_subnet.subnet_id }}" routes: - dest: 0.0.0.0/0 gateway_id: "{{ igw.gateway_id }}" register: public_route_table - name: Set up NAT-protected route table ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 tags: Name: Internal subnets: - "{{ application_subnet.subnet.id }}" - 'Database Subnet' - '10.0.0.0/8' routes: - dest: 0.0.0.0/0 instance_id: "{{ nat.instance_id }}" register: nat_route_table - name: delete route table ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 route_table_id: "{{ route_table.id }}" lookup: id state: absent ''' RETURN = ''' route_table: description: Route Table result returned: always type: complex contains: associations: description: List of subnets associated with the route table returned: always type: complex contains: main: description: Whether this is the main route table returned: always type: bool sample: false route_table_association_id: description: ID of association between route table and subnet returned: always type: string sample: rtbassoc-ab47cfc3 route_table_id: description: ID of the route table returned: always type: string sample: rtb-bf779ed7 subnet_id: description: ID of the subnet returned: always type: string sample: subnet-82055af9 id: description: ID of the route table (same as route_table_id for backwards compatibility) returned: always type: string sample: rtb-bf779ed7 propagating_vgws: description: List of Virtual Private Gateways propagating routes returned: always type: list sample: [] route_table_id: description: ID of the route table returned: always type: string sample: rtb-bf779ed7 routes: description: List of routes in the route table returned: always type: complex contains: destination_cidr_block: description: CIDR block of destination returned: always type: string sample: 10.228.228.0/22 gateway_id: description: ID of the gateway returned: when gateway is local or internet gateway type: string sample: local instance_id: description: ID of a NAT instance returned: when the route is via an EC2 instance type: string sample: i-abcd123456789 instance_owner_id: description: AWS account owning the NAT instance returned: when the route is via an EC2 instance type: string sample: 123456789012 nat_gateway_id: description: ID of the NAT gateway returned: when the route is via a NAT gateway type: string sample: local origin: description: mechanism through which the route is in the table returned: always type: string sample: CreateRouteTable state: description: state of the route returned: always type: string sample: active tags: description: Tags applied to the route table returned: always type: dict sample: Name: Public route table Public: 'true' vpc_id: description: ID for the VPC in which the route lives returned: always type: string sample: vpc-6e2d2407 ''' import re from time import sleep from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.aws.waiters import get_waiter from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, get_aws_connection_info from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry try: import botocore except ImportError: pass # handled by AnsibleAWSModule CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$') @AWSRetry.exponential_backoff() def describe_subnets_with_backoff(connection, **params): return connection.describe_subnets(**params)['Subnets'] def find_subnets(connection, module, vpc_id, identified_subnets): """ Finds a list of subnets, each identified either by a raw ID, a unique 'Name' tag, or a CIDR such as 10.0.0.0/8. Note that this function is duplicated in other ec2 modules, and should potentially be moved into a shared module_utils """ subnet_ids = [] subnet_names = [] subnet_cidrs = [] for subnet in (identified_subnets or []): if re.match(SUBNET_RE, subnet): subnet_ids.append(subnet) elif re.match(CIDR_RE, subnet): subnet_cidrs.append(subnet) else: subnet_names.append(subnet) subnets_by_id = [] if subnet_ids: filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) try: subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids) subnets_by_cidr = [] if subnet_cidrs: filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs}) try: subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs) subnets_by_name = [] if subnet_names: filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names}) try: subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names) for name in subnet_names: matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name]) if matching_count == 0: module.fail_json(msg='Subnet named "{0}" does not exist'.format(name)) elif matching_count > 1: module.fail_json(msg='Multiple subnets named "{0}"'.format(name)) return subnets_by_id + subnets_by_cidr + subnets_by_name def find_igw(connection, module, vpc_id): """ Finds the Internet gateway for the given VPC ID. """ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) try: igw = connection.describe_internet_gateways(Filters=filters)['InternetGateways'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id)) if len(igw) == 1: return igw[0]['InternetGatewayId'] elif len(igw) == 0: module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id)) else: module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id)) @AWSRetry.exponential_backoff() def describe_tags_with_backoff(connection, resource_id): filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id}) paginator = connection.get_paginator('describe_tags') tags = paginator.paginate(Filters=filters).build_full_result()['Tags'] return boto3_tag_list_to_ansible_dict(tags) def tags_match(match_tags, candidate_tags): return all((k in candidate_tags and candidate_tags[k] == v for k, v in match_tags.items())) def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge_tags=None, check_mode=None): try: cur_tags = describe_tags_with_backoff(connection, resource_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to list tags for VPC') to_add, to_delete = compare_aws_tags(cur_tags, tags, purge_tags) if not to_add and not to_delete: return {'changed': False, 'tags': cur_tags} if check_mode: if not purge_tags: tags = cur_tags.update(tags) return {'changed': True, 'tags': tags} if to_delete: try: connection.delete_tags(Resources=[resource_id], Tags=[{'Key': k} for k in to_delete]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags") if to_add: try: connection.create_tags(Resources=[resource_id], Tags=ansible_dict_to_boto3_tag_list(to_add)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create tags") try: latest_tags = describe_tags_with_backoff(connection, resource_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to list tags for VPC') return {'changed': True, 'tags': latest_tags} @AWSRetry.exponential_backoff() def describe_route_tables_with_backoff(connection, **params): try: return connection.describe_route_tables(**params)['RouteTables'] except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound': return None else: raise def get_route_table_by_id(connection, module, route_table_id): route_table = None try: route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route table") if route_tables: route_table = route_tables[0] return route_table def get_route_table_by_tags(connection, module, vpc_id, tags): count = 0 route_table = None filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route table") for table in route_tables: this_tags = describe_tags_with_backoff(connection, table['RouteTableId']) if tags_match(tags, this_tags): route_table = table count += 1 if count > 1: module.fail_json(msg="Tags provided do not identify a unique route table") else: return route_table def route_spec_matches_route(route_spec, route): if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']: route_spec['NatGatewayId'] = route_spec.pop('GatewayId') if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']: if route_spec.get('DestinationCidrBlock', '').startswith('pl-'): route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock') return set(route_spec.items()).issubset(route.items()) def route_spec_matches_route_cidr(route_spec, route): return route_spec['DestinationCidrBlock'] == route.get('DestinationCidrBlock') def rename_key(d, old_key, new_key): d[new_key] = d.pop(old_key) def index_of_matching_route(route_spec, routes_to_match): for i, route in enumerate(routes_to_match): if route_spec_matches_route(route_spec, route): return "exact", i elif route_spec_matches_route_cidr(route_spec, route): return "replace", i def ensure_routes(connection=None, module=None, route_table=None, route_specs=None, propagating_vgw_ids=None, check_mode=None, purge_routes=None): routes_to_match = [route for route in route_table['Routes']] route_specs_to_create = [] route_specs_to_recreate = [] for route_spec in route_specs: match = index_of_matching_route(route_spec, routes_to_match) if match is None: if route_spec.get('DestinationCidrBlock'): route_specs_to_create.append(route_spec) else: module.warn("Skipping creating {0} because it has no destination cidr block. " "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec)) else: if match[0] == "replace": if route_spec.get('DestinationCidrBlock'): route_specs_to_recreate.append(route_spec) else: module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec)) del routes_to_match[match[1]] routes_to_delete = [] if purge_routes: for r in routes_to_match: if not r.get('DestinationCidrBlock'): module.warn("Skipping purging route {0} because it has no destination cidr block. " "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(r)) continue if r['Origin'] == 'CreateRoute': routes_to_delete.append(r) changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate) if changed and not check_mode: for route in routes_to_delete: try: connection.delete_route(RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=route['DestinationCidrBlock']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete route") for route_spec in route_specs_to_recreate: try: connection.replace_route(RouteTableId=route_table['RouteTableId'], **route_spec) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't recreate route") for route_spec in route_specs_to_create: try: connection.create_route(RouteTableId=route_table['RouteTableId'], **route_spec) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create route") return {'changed': bool(changed)} def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_table_id=None, subnet_id=None, check_mode=None): filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route tables") for route_table in route_tables: if route_table['RouteTableId'] is None: continue for a in route_table['Associations']: if a['Main']: continue if a['SubnetId'] == subnet_id: if route_table['RouteTableId'] == route_table_id: return {'changed': False, 'association_id': a['RouteTableAssociationId']} else: if check_mode: return {'changed': True} try: connection.disassociate_route_table(AssociationId=a['RouteTableAssociationId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") try: association_id = connection.associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't associate subnet with route table") return {'changed': True, 'association_id': association_id} def ensure_subnet_associations(connection=None, module=None, route_table=None, subnets=None, check_mode=None, purge_subnets=None): current_association_ids = [a['RouteTableAssociationId'] for a in route_table['Associations'] if not a['Main']] new_association_ids = [] changed = False for subnet in subnets: result = ensure_subnet_association(connection=connection, module=module, vpc_id=route_table['VpcId'], route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], check_mode=check_mode) changed = changed or result['changed'] if changed and check_mode: return {'changed': True} new_association_ids.append(result['association_id']) if purge_subnets: to_delete = [a_id for a_id in current_association_ids if a_id not in new_association_ids] for a_id in to_delete: changed = True if not check_mode: try: connection.disassociate_route_table(AssociationId=a_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") return {'changed': changed} def ensure_propagation(connection=None, module=None, route_table=None, propagating_vgw_ids=None, check_mode=None): changed = False gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']] to_add = set(propagating_vgw_ids) - set(gateways) if to_add: changed = True if not check_mode: for vgw_id in to_add: try: connection.enable_vgw_route_propagation(RouteTableId=route_table['RouteTableId'], GatewayId=vgw_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't enable route propagation") return {'changed': changed} def ensure_route_table_absent(connection, module): lookup = module.params.get('lookup') route_table_id = module.params.get('route_table_id') tags = module.params.get('tags') vpc_id = module.params.get('vpc_id') purge_subnets = module.params.get('purge_subnets') if lookup == 'tag': if tags is not None: route_table = get_route_table_by_tags(connection, module, vpc_id, tags) else: route_table = None elif lookup == 'id': route_table = get_route_table_by_id(connection, module, route_table_id) if route_table is None: return {'changed': False} # disassociate subnets before deleting route table if not module.check_mode: ensure_subnet_associations(connection=connection, module=module, route_table=route_table, subnets=[], check_mode=False, purge_subnets=purge_subnets) try: connection.delete_route_table(RouteTableId=route_table['RouteTableId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error deleting route table") return {'changed': True} def get_route_table_info(connection, module, route_table): result = get_route_table_by_id(connection, module, route_table['RouteTableId']) try: result['Tags'] = describe_tags_with_backoff(connection, route_table['RouteTableId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get tags for route table") result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) # backwards compatibility result['id'] = result['route_table_id'] return result def create_route_spec(connection, module, vpc_id): routes = module.params.get('routes') for route_spec in routes: rename_key(route_spec, 'dest', 'destination_cidr_block') if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': igw = find_igw(connection, module, vpc_id) route_spec['gateway_id'] = igw if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'): rename_key(route_spec, 'gateway_id', 'nat_gateway_id') return snake_dict_to_camel_dict(routes, capitalize_first=True) def ensure_route_table_present(connection, module): lookup = module.params.get('lookup') propagating_vgw_ids = module.params.get('propagating_vgw_ids') purge_routes = module.params.get('purge_routes') purge_subnets = module.params.get('purge_subnets') purge_tags = module.params.get('purge_tags') route_table_id = module.params.get('route_table_id') subnets = module.params.get('subnets') tags = module.params.get('tags') vpc_id = module.params.get('vpc_id') routes = create_route_spec(connection, module, vpc_id) changed = False tags_valid = False if lookup == 'tag': if tags is not None: try: route_table = get_route_table_by_tags(connection, module, vpc_id, tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'") else: route_table = None elif lookup == 'id': try: route_table = get_route_table_by_id(connection, module, route_table_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error finding route table with lookup 'id'") # If no route table returned then create new route table if route_table is None: changed = True if not module.check_mode: try: route_table = connection.create_route_table(VpcId=vpc_id)['RouteTable'] # try to wait for route table to be present before moving on get_waiter( connection, 'route_table_exists' ).wait( RouteTableIds=[route_table['RouteTableId']], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating route table") else: route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id} module.exit_json(changed=changed, route_table=route_table) if routes is not None: result = ensure_routes(connection=connection, module=module, route_table=route_table, route_specs=routes, propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode, purge_routes=purge_routes) changed = changed or result['changed'] if propagating_vgw_ids is not None: result = ensure_propagation(connection=connection, module=module, route_table=route_table, propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode) changed = changed or result['changed'] if not tags_valid and tags is not None: result = ensure_tags(connection=connection, module=module, resource_id=route_table['RouteTableId'], tags=tags, purge_tags=purge_tags, check_mode=module.check_mode) route_table['Tags'] = result['tags'] changed = changed or result['changed'] if subnets is not None: associated_subnets = find_subnets(connection, module, vpc_id, subnets) result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table, subnets=associated_subnets, check_mode=module.check_mode, purge_subnets=purge_subnets) changed = changed or result['changed'] if changed: # pause to allow route table routes/subnets/associations to be updated before exiting with final state sleep(5) module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table)) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( lookup=dict(default='tag', choices=['tag', 'id']), propagating_vgw_ids=dict(type='list'), purge_routes=dict(default=True, type='bool'), purge_subnets=dict(default=True, type='bool'), purge_tags=dict(default=False, type='bool'), route_table_id=dict(), routes=dict(default=[], type='list'), state=dict(default='present', choices=['present', 'absent']), subnets=dict(type='list'), tags=dict(type='dict', aliases=['resource_tags']), vpc_id=dict() ) ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['lookup', 'id', ['route_table_id']], ['lookup', 'tag', ['vpc_id']], ['state', 'present', ['vpc_id']]], supports_check_mode=True) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) state = module.params.get('state') if state == 'present': result = ensure_route_table_present(connection, module) elif state == 'absent': result = ensure_route_table_absent(connection, module) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
ergenekonyigit/Numerical-Analysis-Examples
Python/leastsquares.py
1
2271
# # The MIT License (MIT) # # Copyright (c) 2016 Scott J. Johnson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # def leastsquares(x, y): ''' Example implementation of the Least Squares method for calculating a best-fit line through a set of points. Linear least squares Args: x: array of floats representing x values for each point y: array of floats representing y values for each point Returns: (float, float): representing the y-intercept and slope of the best-fit line Raises: ValueError: if the two arrays are not the same length ''' if len(x) != len(y): raise ValueError('Point arrays must be equal length') numberOfPoints = len(x) sumX = sum(x) sumY = sum(y) sumXYProduct = sum(x[i] * y[i] for i in range(numberOfPoints)) sumXSquared = sum(map(lambda a: a ** 2, x)) xBar = sumX / numberOfPoints yBar = sumY / numberOfPoints a1 = (numberOfPoints * sumXYProduct - sumX * sumY) / (numberOfPoints * sumXSquared - sumX ** 2) a0 = yBar - a1 * xBar return a0, a1 # example data x = (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0) y = (0.5, 2.5, 2.0, 4.0, 3.5, 6.0, 5.5) print ("least squares fit ==> y = %.10f + %.10fx" % leastsquares(x, y))
mit
sekikn/incubator-airflow
tests/providers/google/cloud/hooks/test_cloud_memorystore.py
5
25979
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Dict, Sequence, Tuple from unittest import TestCase, mock from unittest.mock import PropertyMock from google.api_core.retry import Retry from google.cloud.exceptions import NotFound from google.cloud.memcache_v1beta2.types import cloud_memcache from google.cloud.redis_v1.types import Instance from airflow import version from airflow.exceptions import AirflowException from airflow.providers.google.cloud.hooks.cloud_memorystore import ( CloudMemorystoreHook, CloudMemorystoreMemcachedHook, ) from tests.providers.google.cloud.utils.base_gcp_mock import ( GCP_PROJECT_ID_HOOK_UNIT_TEST, mock_base_gcp_hook_default_project_id, mock_base_gcp_hook_no_default_project_id, ) TEST_GCP_CONN_ID = "test-gcp-conn-id" # type: str TEST_DELEGATE_TO = "test-delegate-to" # type: str TEST_LOCATION = "test-location" # type: str TEST_INSTANCE_ID = "test-instance-id" # type: str TEST_PROJECT_ID = "test-project-id" # type: str TEST_RETRY = Retry() # type: Retry TEST_TIMEOUT = 10 # type: float TEST_METADATA = [("KEY", "VALUE")] # type: Sequence[Tuple[str, str]] TEST_PAGE_SIZE = 100 # type: int TEST_UPDATE_MASK = {"paths": ["memory_size_gb"]} # type: Dict TEST_UPDATE_MASK_MEMCACHED = {"displayName": "updated name"} # type: Dict TEST_PARENT = "projects/test-project-id/locations/test-location" # type: str TEST_NAME = "projects/test-project-id/locations/test-location/instances/test-instance-id" # type: str TEST_PARENT_DEFAULT_PROJECT_ID = "projects/{}/locations/test-location".format( GCP_PROJECT_ID_HOOK_UNIT_TEST ) # type: str TEST_NAME_DEFAULT_PROJECT_ID = "projects/{}/locations/test-location/instances/test-instance-id".format( GCP_PROJECT_ID_HOOK_UNIT_TEST ) # type: str class TestCloudMemorystoreWithDefaultProjectIdHook(TestCase): def setUp( self, ): with mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.__init__", new=mock_base_gcp_hook_default_project_id, ): self.hook = CloudMemorystoreHook(gcp_conn_id="test") @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_create_instance_when_exists(self, mock_get_conn, mock_project_id): mock_get_conn.return_value.get_instance.return_value = Instance(name=TEST_NAME) result = self.hook.create_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, instance=Instance(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.assert_called_once_with( request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) self.assertEqual(Instance(name=TEST_NAME), result) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_create_instance_when_not_exists(self, mock_get_conn, mock_project_id): mock_get_conn.return_value.get_instance.side_effect = [ NotFound("Instance not found"), Instance(name=TEST_NAME), ] mock_get_conn.return_value.create_instance.return_value.result.return_value = Instance(name=TEST_NAME) result = self.hook.create_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, instance=Instance(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.has_calls( [ mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA), mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA), ] ) mock_get_conn.return_value.create_instance.assert_called_once_with( request=dict( parent=TEST_PARENT_DEFAULT_PROJECT_ID, instance=Instance( name=TEST_NAME, labels={"airflow-version": "v" + version.version.replace(".", "-").replace("+", "-")}, ), instance_id=TEST_INSTANCE_ID, ), metadata=TEST_METADATA, retry=TEST_RETRY, timeout=TEST_TIMEOUT, ) self.assertEqual(Instance(name=TEST_NAME), result) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_delete_instance(self, mock_get_conn, mock_project_id): self.hook.delete_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance=TEST_INSTANCE_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.delete_instance.assert_called_once_with( request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_get_instance(self, mock_get_conn, mock_project_id): self.hook.get_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance=TEST_INSTANCE_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.assert_called_once_with( request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_list_instances(self, mock_get_conn, mock_project_id): self.hook.list_instances( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, page_size=TEST_PAGE_SIZE, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.list_instances.assert_called_once_with( request=dict(parent=TEST_PARENT_DEFAULT_PROJECT_ID, page_size=TEST_PAGE_SIZE), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_update_instance(self, mock_get_conn, mock_project_id): self.hook.update_instance( # pylint: disable=no-value-for-parameter update_mask=TEST_UPDATE_MASK, instance=Instance(name=TEST_NAME), location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.update_instance.assert_called_once_with( request=dict(update_mask=TEST_UPDATE_MASK, instance=Instance(name=TEST_NAME_DEFAULT_PROJECT_ID)), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) class TestCloudMemorystoreWithoutDefaultProjectIdHook(TestCase): def setUp( self, ): with mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.__init__", new=mock_base_gcp_hook_no_default_project_id, ): self.hook = CloudMemorystoreHook(gcp_conn_id="test") @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_create_instance_when_exists(self, mock_get_conn): mock_get_conn.return_value.get_instance.return_value = Instance(name=TEST_NAME) result = self.hook.create_instance( location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, instance=Instance(name=TEST_NAME), project_id=TEST_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.assert_called_once_with( request=dict(name="projects/test-project-id/locations/test-location/instances/test-instance-id"), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) self.assertEqual(Instance(name=TEST_NAME), result) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_create_instance_when_not_exists(self, mock_get_conn): mock_get_conn.return_value.get_instance.side_effect = [ NotFound("Instance not found"), Instance(name=TEST_NAME), ] mock_get_conn.return_value.create_instance.return_value.result.return_value = Instance(name=TEST_NAME) result = self.hook.create_instance( location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, instance=Instance(name=TEST_NAME), project_id=TEST_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.has_calls( [ mock.call( name="projects/test-project-id/locations/test-location/instances/test-instance-id", retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ), mock.call( name="projects/test-project-id/locations/test-location/instances/test-instance-id", retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ), ] ) mock_get_conn.return_value.create_instance.assert_called_once_with( request=dict( parent=TEST_PARENT, instance=Instance( name=TEST_NAME, labels={"airflow-version": "v" + version.version.replace(".", "-").replace("+", "-")}, ), instance_id=TEST_INSTANCE_ID, ), metadata=TEST_METADATA, retry=TEST_RETRY, timeout=TEST_TIMEOUT, ) self.assertEqual(Instance(name=TEST_NAME), result) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=None, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_create_instance_without_project_id(self, mock_get_conn, mock_project_id): with self.assertRaises(AirflowException): self.hook.create_instance( location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, instance=Instance(name=TEST_NAME), project_id=None, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_delete_instance(self, mock_get_conn): self.hook.delete_instance( location=TEST_LOCATION, instance=TEST_INSTANCE_ID, project_id=TEST_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.delete_instance.assert_called_once_with( request=dict(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=None, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_delete_instance_without_project_id(self, mock_get_conn, mock_project_id): with self.assertRaises(AirflowException): self.hook.delete_instance( location=TEST_LOCATION, instance=Instance(name=TEST_NAME), project_id=None, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_get_instance(self, mock_get_conn): self.hook.get_instance( location=TEST_LOCATION, instance=TEST_INSTANCE_ID, project_id=TEST_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.assert_called_once_with( request=dict(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=None, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_get_instance_without_project_id(self, mock_get_conn, mock_project_id): with self.assertRaises(AirflowException): self.hook.get_instance( location=TEST_LOCATION, instance=Instance(name=TEST_NAME), project_id=None, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_list_instances(self, mock_get_conn): self.hook.list_instances( location=TEST_LOCATION, page_size=TEST_PAGE_SIZE, project_id=TEST_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.list_instances.assert_called_once_with( request=dict(parent=TEST_PARENT, page_size=TEST_PAGE_SIZE), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=None, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_list_instances_without_project_id(self, mock_get_conn, mock_project_id): with self.assertRaises(AirflowException): self.hook.list_instances( location=TEST_LOCATION, page_size=TEST_PAGE_SIZE, project_id=None, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_update_instance(self, mock_get_conn): self.hook.update_instance( update_mask=TEST_UPDATE_MASK, instance=Instance(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, project_id=TEST_PROJECT_ID, ) mock_get_conn.return_value.update_instance.assert_called_once_with( request=dict(update_mask={'paths': ['memory_size_gb']}, instance=Instance(name=TEST_NAME)), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=None, ) @mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn") def test_update_instance_without_project_id(self, mock_get_conn, mock_project_id): with self.assertRaises(AirflowException): self.hook.update_instance( # pylint: disable=no-value-for-parameter update_mask=TEST_UPDATE_MASK, instance=Instance(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) class TestCloudMemorystoreMemcachedWithDefaultProjectIdHook(TestCase): def setUp( self, ): with mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.__init__", new=mock_base_gcp_hook_default_project_id, ): self.hook = CloudMemorystoreMemcachedHook(gcp_conn_id="test") @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn" ) def test_create_instance_when_exists(self, mock_get_conn, mock_project_id): mock_get_conn.return_value.get_instance.return_value = cloud_memcache.Instance(name=TEST_NAME) result = self.hook.create_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, instance=cloud_memcache.Instance(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.assert_called_once_with( name=TEST_NAME_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA ) self.assertEqual(cloud_memcache.Instance(name=TEST_NAME), result) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn" ) def test_create_instance_when_not_exists(self, mock_get_conn, mock_project_id): mock_get_conn.return_value.get_instance.side_effect = [ NotFound("Instance not found"), cloud_memcache.Instance(name=TEST_NAME), ] mock_get_conn.return_value.create_instance.return_value.result.return_value = cloud_memcache.Instance( name=TEST_NAME ) result = self.hook.create_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, instance=cloud_memcache.Instance(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.has_calls( [ mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA), mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA), ] ) mock_get_conn.return_value.create_instance.assert_called_once_with( resource=cloud_memcache.Instance( name=TEST_NAME, labels={"airflow-version": "v" + version.version.replace(".", "-").replace("+", "-")}, ), instance_id=TEST_INSTANCE_ID, metadata=TEST_METADATA, parent=TEST_PARENT_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, ) self.assertEqual(cloud_memcache.Instance(name=TEST_NAME), result) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn" ) def test_delete_instance(self, mock_get_conn, mock_project_id): self.hook.delete_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance=TEST_INSTANCE_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.delete_instance.assert_called_once_with( name=TEST_NAME_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn" ) def test_get_instance(self, mock_get_conn, mock_project_id): self.hook.get_instance( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, instance=TEST_INSTANCE_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.get_instance.assert_called_once_with( name=TEST_NAME_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn" ) def test_list_instances(self, mock_get_conn, mock_project_id): self.hook.list_instances( # pylint: disable=no-value-for-parameter location=TEST_LOCATION, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.list_instances.assert_called_once_with( parent=TEST_PARENT_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) @mock.patch( 'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id', new_callable=PropertyMock, return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST, ) @mock.patch( "airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn" ) def test_update_instance(self, mock_get_conn, mock_project_id): self.hook.update_instance( # pylint: disable=no-value-for-parameter update_mask=TEST_UPDATE_MASK_MEMCACHED, instance=cloud_memcache.Instance(name=TEST_NAME), location=TEST_LOCATION, instance_id=TEST_INSTANCE_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_conn.return_value.update_instance.assert_called_once_with( update_mask=TEST_UPDATE_MASK_MEMCACHED, resource=cloud_memcache.Instance(name=TEST_NAME_DEFAULT_PROJECT_ID), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, )
apache-2.0
snoonetIRC/CloudBot
plugins/metars.py
3
1095
import requests from cloudbot import hook api_url_metar = "http://api.av-wx.com/metar/" api_url_taf = "http://api.av-wx.com/taf/" def lookup(text, url): station = text.split(' ')[0].upper() if len(station) != 4: return "please specify a valid station code see http://weather.rap.ucar.edu/surface/stations.txt for a list." request = requests.get(url + station) if request.status_code == 404: return "Station not found" request.raise_for_status() r = request.json()['reports'][0] out = r['name'] + ": " + r['raw_text'] return out @hook.command() def metar(text): """[ICAO station code] - returns the metars information for the specified station. A list of station codes can be found here: http://weather.rap.ucar.edu/surface/stations.txt""" return lookup(text, api_url_metar) @hook.command() def taf(text): """[ICAO station code] - returns the taf information for the specified station. A list of station codes can be found here: http://weather.rap.ucar.edu/surface/stations.txt""" return lookup(text, api_url_taf)
gpl-3.0
z1gm4/desarrollo_web_udp
env/lib/python2.7/site-packages/django/contrib/syndication/views.py
137
8793
from __future__ import unicode_literals from calendar import timegm from django.conf import settings from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist from django.http import Http404, HttpResponse from django.template import TemplateDoesNotExist, loader from django.utils import feedgenerator, six from django.utils.encoding import force_text, iri_to_uri, smart_text from django.utils.html import escape from django.utils.http import http_date from django.utils.timezone import get_default_timezone, is_naive, make_aware def add_domain(domain, url, secure=False): protocol = 'https' if secure else 'http' if url.startswith('//'): # Support network-path reference (see #16753) - RSS requires a protocol url = '%s:%s' % (protocol, url) elif not url.startswith(('http://', 'https://', 'mailto:')): url = iri_to_uri('%s://%s%s' % (protocol, domain, url)) return url class FeedDoesNotExist(ObjectDoesNotExist): pass class Feed(object): feed_type = feedgenerator.DefaultFeed title_template = None description_template = None def __call__(self, request, *args, **kwargs): try: obj = self.get_object(request, *args, **kwargs) except ObjectDoesNotExist: raise Http404('Feed object does not exist.') feedgen = self.get_feed(obj, request) response = HttpResponse(content_type=feedgen.content_type) if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'): # if item_pubdate or item_updateddate is defined for the feed, set # header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED response['Last-Modified'] = http_date( timegm(feedgen.latest_post_date().utctimetuple())) feedgen.write(response, 'utf-8') return response def item_title(self, item): # Titles should be double escaped by default (see #6533) return escape(force_text(item)) def item_description(self, item): return force_text(item) def item_link(self, item): try: return item.get_absolute_url() except AttributeError: raise ImproperlyConfigured( 'Give your %s class a get_absolute_url() method, or define an ' 'item_link() method in your Feed class.' % item.__class__.__name__ ) def item_enclosures(self, item): enc_url = self.__get_dynamic_attr('item_enclosure_url', item) if enc_url: enc = feedgenerator.Enclosure( url=smart_text(enc_url), length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)), mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item)), ) return [enc] return [] def __get_dynamic_attr(self, attname, obj, default=None): try: attr = getattr(self, attname) except AttributeError: return default if callable(attr): # Check co_argcount rather than try/excepting the function and # catching the TypeError, because something inside the function # may raise the TypeError. This technique is more accurate. try: code = six.get_function_code(attr) except AttributeError: code = six.get_function_code(attr.__call__) if code.co_argcount == 2: # one argument is 'self' return attr(obj) else: return attr() return attr def feed_extra_kwargs(self, obj): """ Returns an extra keyword arguments dictionary that is used when initializing the feed generator. """ return {} def item_extra_kwargs(self, item): """ Returns an extra keyword arguments dictionary that is used with the `add_item` call of the feed generator. """ return {} def get_object(self, request, *args, **kwargs): return None def get_context_data(self, **kwargs): """ Returns a dictionary to use as extra context if either ``self.description_template`` or ``self.item_template`` are used. Default implementation preserves the old behavior of using {'obj': item, 'site': current_site} as the context. """ return {'obj': kwargs.get('item'), 'site': kwargs.get('site')} def get_feed(self, obj, request): """ Returns a feedgenerator.DefaultFeed object, fully populated, for this feed. Raises FeedDoesNotExist for invalid parameters. """ current_site = get_current_site(request) link = self.__get_dynamic_attr('link', obj) link = add_domain(current_site.domain, link, request.is_secure()) feed = self.feed_type( title=self.__get_dynamic_attr('title', obj), subtitle=self.__get_dynamic_attr('subtitle', obj), link=link, description=self.__get_dynamic_attr('description', obj), language=settings.LANGUAGE_CODE, feed_url=add_domain( current_site.domain, self.__get_dynamic_attr('feed_url', obj) or request.path, request.is_secure(), ), author_name=self.__get_dynamic_attr('author_name', obj), author_link=self.__get_dynamic_attr('author_link', obj), author_email=self.__get_dynamic_attr('author_email', obj), categories=self.__get_dynamic_attr('categories', obj), feed_copyright=self.__get_dynamic_attr('feed_copyright', obj), feed_guid=self.__get_dynamic_attr('feed_guid', obj), ttl=self.__get_dynamic_attr('ttl', obj), **self.feed_extra_kwargs(obj) ) title_tmp = None if self.title_template is not None: try: title_tmp = loader.get_template(self.title_template) except TemplateDoesNotExist: pass description_tmp = None if self.description_template is not None: try: description_tmp = loader.get_template(self.description_template) except TemplateDoesNotExist: pass for item in self.__get_dynamic_attr('items', obj): context = self.get_context_data(item=item, site=current_site, obj=obj, request=request) if title_tmp is not None: title = title_tmp.render(context, request) else: title = self.__get_dynamic_attr('item_title', item) if description_tmp is not None: description = description_tmp.render(context, request) else: description = self.__get_dynamic_attr('item_description', item) link = add_domain( current_site.domain, self.__get_dynamic_attr('item_link', item), request.is_secure(), ) enclosures = self.__get_dynamic_attr('item_enclosures', item) author_name = self.__get_dynamic_attr('item_author_name', item) if author_name is not None: author_email = self.__get_dynamic_attr('item_author_email', item) author_link = self.__get_dynamic_attr('item_author_link', item) else: author_email = author_link = None tz = get_default_timezone() pubdate = self.__get_dynamic_attr('item_pubdate', item) if pubdate and is_naive(pubdate): pubdate = make_aware(pubdate, tz) updateddate = self.__get_dynamic_attr('item_updateddate', item) if updateddate and is_naive(updateddate): updateddate = make_aware(updateddate, tz) feed.add_item( title=title, link=link, description=description, unique_id=self.__get_dynamic_attr('item_guid', item, link), unique_id_is_permalink=self.__get_dynamic_attr( 'item_guid_is_permalink', item), enclosures=enclosures, pubdate=pubdate, updateddate=updateddate, author_name=author_name, author_email=author_email, author_link=author_link, categories=self.__get_dynamic_attr('item_categories', item), item_copyright=self.__get_dynamic_attr('item_copyright', item), **self.item_extra_kwargs(item) ) return feed
gpl-3.0
Justin-Yuan/Image2Music-Generator
library/jython2.5.3/Lib/encodings/iso8859_4.py
593
13632
""" Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-4', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK u'\u0138' # 0xA2 -> LATIN SMALL LETTER KRA u'\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA u'\xa4' # 0xA4 -> CURRENCY SIGN u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE u'\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA u'\xa7' # 0xA7 -> SECTION SIGN u'\xa8' # 0xA8 -> DIAERESIS u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON u'\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON u'\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA u'\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE u'\xad' # 0xAD -> SOFT HYPHEN u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON u'\xaf' # 0xAF -> MACRON u'\xb0' # 0xB0 -> DEGREE SIGN u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK u'\u02db' # 0xB2 -> OGONEK u'\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA u'\xb4' # 0xB4 -> ACUTE ACCENT u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE u'\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA u'\u02c7' # 0xB7 -> CARON u'\xb8' # 0xB8 -> CEDILLA u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON u'\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON u'\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA u'\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE u'\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON u'\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE u'\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON u'\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE u'\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON u'\u02d9' # 0xFF -> DOT ABOVE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
gpl-2.0
jackkiej/SickRage
lib/imdb/parser/sql/objectadapter.py
62
7978
""" parser.sql.objectadapter module (imdb.parser.sql package). This module adapts the SQLObject ORM to the internal mechanism. Copyright 2008-2010 Davide Alberani <da@erlug.linux.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ import sys import logging from sqlobject import * from sqlobject.sqlbuilder import ISNULL, ISNOTNULL, AND, OR, IN, CONTAINSSTRING from dbschema import * _object_logger = logging.getLogger('imdbpy.parser.sql.object') # Maps our placeholders to SQLAlchemy's column types. MAP_COLS = { INTCOL: IntCol, UNICODECOL: UnicodeCol, STRINGCOL: StringCol } # Exception raised when Table.get(id) returns no value. NotFoundError = SQLObjectNotFound # class method to be added to the SQLObject class. def addIndexes(cls, ifNotExists=True): """Create all required indexes.""" for col in cls._imdbpySchema.cols: if col.index: idxName = col.index colToIdx = col.name if col.indexLen: colToIdx = {'column': col.name, 'length': col.indexLen} if idxName in [i.name for i in cls.sqlmeta.indexes]: # Check if the index is already present. continue idx = DatabaseIndex(colToIdx, name=idxName) cls.sqlmeta.addIndex(idx) try: cls.createIndexes(ifNotExists) except dberrors.OperationalError, e: _object_logger.warn('Skipping creation of the %s.%s index: %s' % (cls.sqlmeta.table, col.name, e)) addIndexes = classmethod(addIndexes) # Global repository for "fake" tables with Foreign Keys - need to # prevent troubles if addForeignKeys is called more than one time. FAKE_TABLES_REPOSITORY = {} def _buildFakeFKTable(cls, fakeTableName): """Return a "fake" table, with foreign keys where needed.""" countCols = 0 attrs = {} for col in cls._imdbpySchema.cols: countCols += 1 if col.name == 'id': continue if not col.foreignKey: # A non-foreign key column - add it as usual. attrs[col.name] = MAP_COLS[col.kind](**col.params) continue # XXX: Foreign Keys pointing to TableName.ColName not yet supported. thisColName = col.name if thisColName.endswith('ID'): thisColName = thisColName[:-2] fks = col.foreignKey.split('.', 1) foreignTableName = fks[0] if len(fks) == 2: foreignColName = fks[1] else: foreignColName = 'id' # Unused... #fkName = 'fk_%s_%s_%d' % (foreignTableName, foreignColName, # countCols) # Create a Foreign Key column, with the correct references. fk = ForeignKey(foreignTableName, name=thisColName, default=None) attrs[thisColName] = fk # Build a _NEW_ SQLObject subclass, with foreign keys, if needed. newcls = type(fakeTableName, (SQLObject,), attrs) return newcls def addForeignKeys(cls, mapTables, ifNotExists=True): """Create all required foreign keys.""" # Do not even try, if there are no FK, in this table. if not filter(None, [col.foreignKey for col in cls._imdbpySchema.cols]): return fakeTableName = 'myfaketable%s' % cls.sqlmeta.table if fakeTableName in FAKE_TABLES_REPOSITORY: newcls = FAKE_TABLES_REPOSITORY[fakeTableName] else: newcls = _buildFakeFKTable(cls, fakeTableName) FAKE_TABLES_REPOSITORY[fakeTableName] = newcls # Connect the class with foreign keys. newcls.setConnection(cls._connection) for col in cls._imdbpySchema.cols: if col.name == 'id': continue if not col.foreignKey: continue # Get the SQL that _WOULD BE_ run, if we had to create # this "fake" table. fkQuery = newcls._connection.createReferenceConstraint(newcls, newcls.sqlmeta.columns[col.name]) if not fkQuery: # Probably the db doesn't support foreign keys (SQLite). continue # Remove "myfaketable" to get references to _real_ tables. fkQuery = fkQuery.replace('myfaketable', '') # Execute the query. newcls._connection.query(fkQuery) # Disconnect it. newcls._connection.close() addForeignKeys = classmethod(addForeignKeys) # Module-level "cache" for SQLObject classes, to prevent # "class TheClass is already in the registry" errors, when # two or more connections to the database are made. # XXX: is this the best way to act? TABLES_REPOSITORY = {} def getDBTables(uri=None): """Return a list of classes to be used to access the database through the SQLObject ORM. The connection uri is optional, and can be used to tailor the db schema to specific needs.""" DB_TABLES = [] for table in DB_SCHEMA: if table.name in TABLES_REPOSITORY: DB_TABLES.append(TABLES_REPOSITORY[table.name]) continue attrs = {'_imdbpyName': table.name, '_imdbpySchema': table, 'addIndexes': addIndexes, 'addForeignKeys': addForeignKeys} for col in table.cols: if col.name == 'id': continue attrs[col.name] = MAP_COLS[col.kind](**col.params) # Create a subclass of SQLObject. # XXX: use a metaclass? I can't see any advantage. cls = type(table.name, (SQLObject,), attrs) DB_TABLES.append(cls) TABLES_REPOSITORY[table.name] = cls return DB_TABLES def toUTF8(s): """For some strange reason, sometimes SQLObject wants utf8 strings instead of unicode.""" return s.encode('utf_8') def setConnection(uri, tables, encoding='utf8', debug=False): """Set connection for every table.""" kw = {} # FIXME: it's absolutely unclear what we should do to correctly # support unicode in MySQL; with some versions of SQLObject, # it seems that setting use_unicode=1 is the _wrong_ thing to do. _uriLower = uri.lower() if _uriLower.startswith('mysql'): kw['use_unicode'] = 1 #kw['sqlobject_encoding'] = encoding kw['charset'] = encoding # On some server configurations, we will need to explictly enable # loading data from local files kw['local_infile'] = 1 conn = connectionForURI(uri, **kw) conn.debug = debug # XXX: doesn't work and a work-around was put in imdbpy2sql.py; # is there any way to modify the text_factory parameter of # a SQLite connection? #if uri.startswith('sqlite'): # major = sys.version_info[0] # minor = sys.version_info[1] # if major > 2 or (major == 2 and minor > 5): # sqliteConn = conn.getConnection() # sqliteConn.text_factory = str for table in tables: table.setConnection(conn) #table.sqlmeta.cacheValues = False # FIXME: is it safe to set table._cacheValue to False? Looks like # we can't retrieve correct values after an update (I think # it's never needed, but...) Anyway, these are set to False # for performance reason at insert time (see imdbpy2sql.py). table._cacheValue = False # Required by imdbpy2sql.py. conn.paramstyle = conn.module.paramstyle return conn
gpl-3.0
lucasplus/MABDI
mabdi/FilterDepthImageToSurface.py
1
7799
import vtk from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase from vtk.util import numpy_support from vtk.numpy_interface import dataset_adapter as dsa from Utilities import DebugTimeVTKFilter import numpy as np from scipy import ndimage from timeit import default_timer as timer import logging class FilterDepthImageToSurface(VTKPythonAlgorithmBase): """ vtkAlgorithm with input of vtkImageData and output of vtkPolyData This filter first defines a connectivity on the depth image that is like a checkerboard but with two triangles in each square. It then throws away all points farther than the param_farplane_threshold and all points with a large difference between neighbors (controlled with param_convolution_theshold) Input: Depth image Output: Mesh created by projecting depth image """ def __init__(self, param_farplane_threshold=1.0, param_convolution_threshold=0.01): """ Algorithm setup and define parameters. :param param_farplane_threshold: default=1.0 Values on the depth image range from 0.0-1.0. Points with depth values greater than param_farplane_threshold will be thrown away. :param param_convolution_threshold: default=0.01 Convolution is used to determine pixel neighbors with a large difference. If there is one, the point will be thrown away. This threshold controls sensitivity. """ VTKPythonAlgorithmBase.__init__(self, nInputPorts=1, inputType='vtkImageData', nOutputPorts=1, outputType='vtkPolyData') self._param_farplane_threshold = param_farplane_threshold self.param_convolution_theshold = param_convolution_threshold self._sizex = [] self._sizey = [] self._viewport = [] self._display_pts = [] self._viewport_pts = [] self._world_pts = [] self._points = vtk.vtkPoints() self._polys = vtk.vtkCellArray() self._polydata = vtk.vtkPolyData() self._polydata.SetPoints(self._points) self._polydata.SetPolys(self._polys) self._extract = vtk.vtkExtractPolyDataGeometry() DebugTimeVTKFilter(self._extract) self._extract.SetInputData(self._polydata) planefunc = vtk.vtkPlane() planefunc.SetNormal(0.0, -1.0, 0.0) planefunc.SetOrigin(0.0, -1.0, 0.0) self._extract.SetImplicitFunction(planefunc) def RequestData(self, request, inInfo, outInfo): logging.info('') start = timer() # input (vtkImageData) inp = vtk.vtkImageData.GetData(inInfo[0]) # if the vtkImageData size has changed or this is the first time # save new size info and initialize containers if (self._sizex, self._sizey, self._viewport) != (inp.sizex, inp.sizey, inp.viewport): (self._sizex, self._sizey) = (inp.sizex, inp.sizey) self._viewport = inp.viewport self._init_containers() # the incoming depth image di = numpy_support.vtk_to_numpy(inp.GetPointData().GetScalars())\ .reshape((self._sizey, self._sizex)) # add z values to viewport_pts based on incoming depth image self._viewport_pts[2, :] = di.reshape(-1) # project to world coordinates self._world_pts = np.dot(inp.tmat, self._viewport_pts) self._world_pts = self._world_pts / self._world_pts[3] """ Remove invalid points """ # index to pts outside sensor range (defined by vtkCamera clipping range) outside_range = ~(di < self._param_farplane_threshold) # find pixel neighbors with large differences in value # http://docs.scipy.org/doc/scipy/reference/tutorial/ndimage.html kh = np.array([[1, -1], [0, 0]]) edges_h = abs(ndimage.convolve(di, kh, mode='nearest', origin=-1)) > self.param_convolution_theshold kv = np.array([[1, 0], [-1, 0]]) edges_v = abs(ndimage.convolve(di, kv, mode='nearest', origin=-1)) > self.param_convolution_theshold # combine all the points found to be invalid # and set them to a value underneath the "floor of the environment" # http://stackoverflow.com/a/20528566/4068274 invalid_index = np.logical_or.reduce((outside_range.reshape(-1), edges_h.reshape(-1), edges_v.reshape(-1))) self._world_pts[0:3, invalid_index] = np.array([[0.0], [-2.0], [0.0]]) """ Update and set filter output """ # update vtkPoints vtkarray = dsa.numpyTovtkDataArray(self._world_pts[0:3, :].T) self._points.SetData(vtkarray) # update output (vtkPolyData) out = vtk.vtkPolyData.GetData(outInfo) self._extract.Update() logging.info('Number of triangles: {}'.format(self._extract.GetOutput().GetNumberOfCells())) out.ShallowCopy(self._extract.GetOutput()) end = timer() logging.info('Execution time {:.4f} seconds'.format(end - start)) return 1 def _init_containers(self): logging.info('Initializing arrays for projection calculation.') tstart = timer() # helper variables (width, height) (w, h) = (self._sizex, self._sizey) """ display points (list of all pixel coordinates) """ self._display_pts = np.ones((2, w * h)) self._display_pts[0, :], self._display_pts[1, :] = \ zip(*[(j, i) for i in np.arange(h) for j in np.arange(w)]) """ viewport points """ # https://github.com/Kitware/VTK/blob/52d45496877b00852a08a5b9819d109c2fd9bfab/Rendering/Core/vtkCoordinate.h#L26 self._viewport_pts = np.ones((4, self._display_pts.shape[1])) self._viewport_pts[0, :] = 2.0 * (self._display_pts[0, :] - w * self._viewport[0]) / \ (w * (self._viewport[2] - self._viewport[0])) - 1.0 self._viewport_pts[1, :] = 2.0 * (self._display_pts[1, :] - h * self._viewport[1]) / \ (h * (self._viewport[3] - self._viewport[1])) - 1.0 """ new world points (just initializing the container) """ self._world_pts = np.ones(self._viewport_pts.shape) """ cells (list of triangles created by connecting neighbors in depth image space ) """ # connectivity on the depth image is almost like a checkerboard pattern # except with two triangles in every checkerboard square nt = (2*w)*(h-1) # number of triangles cells = np.zeros((3, nt), dtype=np.int) i = 0 while i < (nt/2): if ((i+1) % w) != 0: # if on the side of the image skip cells[:, 2*i] = (i, i+1, w+i) cells[:, 2*i+1] = (i+1, w+i+1, w+i) i += 1 # remove columns with zeros (the ones we skipped in the while loop) index = np.where(cells.any(axis=0))[0] # all columns that are non zero cells = cells[:, index] # turn our connectivity list into a vtk object (vtkCellArray) for tpt in cells.T: self._polys.InsertNextCell(3) self._polys.InsertCellPoint(tpt[0]) self._polys.InsertCellPoint(tpt[1]) self._polys.InsertCellPoint(tpt[2]) self._polydata.SetPolys(self._polys) # time me tend = timer() logging.info('Initializing arrays for projection calculation {:.4f} seconds'.format(tend - tstart))
bsd-3-clause