repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/io_directx_bel/bel/fs.py | 3 | 2579 | # v0.1
import bpy
from os import path as os_path, listdir as os_listdir
from bpy import path as bpy_path
# cross platform paths (since ms conform to / path ;) )
# maybe add utf8 replace to old ascii blender builtin
# // can be omitted for relative
def clean(path) :
path = path.strip().replace('\\','/')
if ('/') not in path : path = '//'+path
return path
## test for existence of a file or a dir
def exist(path) :
if isfile(path) or isdir(path) : return True
return False
## test for existence of a file
def isfile(path) :
if os_path.isfile(path) : return True
# could be blender relative
path = bpy_path.abspath(path)
if os_path.isfile(path) : return True
return False
## test for existence of a dir
def isdir(path) :
if os_path.isdir(path) : return True
# could be blender relative
path = bpy_path.abspath(path)
if os_path.isdir(path) : return True
return False
## returns a list of every absolute filepath
# to each file within the 'ext' extensions
# from a folder and its subfolders
# warning, in windows filename are returned in lowercase.
def scanDir(path,ext='all') :
files = []
fields = os_listdir(path)
if ext != 'all' and type(ext) != list : ext = [ext]
for item in fields :
if os_path.isfile(path + '/' + item) and (ext == 'all' or item.split('.')[-1] in ext) :
#print(' file %s'%item)
files.append(path + '/' + item)
elif os_path.isdir(path + '/' + item) :
#print('folder %s/%s :'%(path,item))
files.extend(scanDir(path + '/' + item,ext))
return files
def saveOptions(op,operator_name, tokens, filename='last_run'):
#print(op.as_keywords())
#print(dir(op))
target_path = os_path.join("operator", operator_name)
target_path = os_path.join("presets", target_path)
target_path = bpy.utils.user_resource('SCRIPTS',target_path,create=True)
if target_path:
filepath = os_path.join(target_path, filename) + ".py"
file_preset = open(filepath, 'w')
file_preset.write("import bpy\nop = bpy.context.active_operator\n\n")
properties_blacklist = bpy.types.Operator.bl_rna.properties.keys()
for key, value in tokens.items() :
if key not in properties_blacklist :
# convert thin wrapped sequences to simple lists to repr()
try:
value = value[:]
except:
pass
file_preset.write("op.%s = %r\n" % (key, value))
file_preset.close()
| gpl-3.0 |
dmilith/SublimeText3-dmilith | Packages/pygments/all/pygments/lexers/_mapping.py | 12 | 45823 | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JadeLexer': ('pygments.lexers.html', 'Jade', ('jade',), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.python', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml',), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), ('*.txt',), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust',), ('*.rs',), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))
| mit |
paolodedios/tensorflow | tensorflow/compiler/mlir/tfr/examples/customization/ops_defs.py | 8 | 2327 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how the composition overrides the behavior of an existing op."""
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=missing-function-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import app
from tensorflow.compiler.mlir.tfr.python import composite
from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_array_ops as array_ops
from tensorflow.python.platform import flags
Composite = composite.Composite
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output', None,
'Path to write the genereated register op file and MLIR file.')
flags.DEFINE_bool('gen_register_op', True,
'Generate register op cc file or tfr mlir file.')
# The original kernel is defined in 'tensorflow/python/framework/ops_test.py'
# and prints out the current graph def version.
@Composite('TestAttr')
def _override_test_attr_op():
ret = array_ops.Const(value=100.0, dtype=dtypes.float32)
return ret
def main(_):
if FLAGS.gen_register_op:
assert FLAGS.output.endswith('.cc')
generated_code = gen_register_op(sys.modules[__name__], '_override_')
else:
assert FLAGS.output.endswith('.mlir')
generated_code = tfr_gen_from_module(sys.modules[__name__], '_override_')
dirname = os.path.dirname(FLAGS.output)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(FLAGS.output, 'w') as f:
f.write(generated_code)
if __name__ == '__main__':
app.run(main=main)
| apache-2.0 |
cuboxi/android_external_chromium_org | build/linux/unbundle/replace_gyp_files.py | 27 | 2857 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Replaces gyp files in tree with files from here that
make the build use system libraries.
"""
import optparse
import os.path
import shutil
import sys
REPLACEMENTS = {
'use_system_expat': 'third_party/expat/expat.gyp',
'use_system_ffmpeg': 'third_party/ffmpeg/ffmpeg.gyp',
'use_system_flac': 'third_party/flac/flac.gyp',
'use_system_harfbuzz': 'third_party/harfbuzz-ng/harfbuzz.gyp',
'use_system_icu': 'third_party/icu/icu.gyp',
'use_system_jsoncpp': 'third_party/jsoncpp/jsoncpp.gyp',
'use_system_libevent': 'third_party/libevent/libevent.gyp',
'use_system_libjpeg': 'third_party/libjpeg/libjpeg.gyp',
'use_system_libpng': 'third_party/libpng/libpng.gyp',
'use_system_libusb': 'third_party/libusb/libusb.gyp',
'use_system_libvpx': 'third_party/libvpx/libvpx.gyp',
'use_system_libwebp': 'third_party/libwebp/libwebp.gyp',
'use_system_libxml': 'third_party/libxml/libxml.gyp',
'use_system_libxslt': 'third_party/libxslt/libxslt.gyp',
'use_system_openssl': 'third_party/openssl/openssl.gyp',
'use_system_opus': 'third_party/opus/opus.gyp',
'use_system_re2': 'third_party/re2/re2.gyp',
'use_system_snappy': 'third_party/snappy/snappy.gyp',
'use_system_speex': 'third_party/speex/speex.gyp',
'use_system_sqlite': 'third_party/sqlite/sqlite.gyp',
'use_system_v8': 'v8/tools/gyp/v8.gyp',
'use_system_zlib': 'third_party/zlib/zlib.gyp',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = optparse.OptionParser()
# Accept arguments in gyp command-line syntax, so that the caller can re-use
# command-line for this script and gyp.
parser.add_option('-D', dest='defines', action='append')
parser.add_option('--undo', action='store_true')
options, args = parser.parse_args(argv)
for flag, path in REPLACEMENTS.items():
if '%s=1' % flag not in options.defines:
continue
if options.undo:
# Restore original file, and also remove the backup.
# This is meant to restore the source tree to its original state.
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
# Create a backup copy for --undo.
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
# Copy the gyp file from directory of this script to target path.
shutil.copyfile(os.path.join(my_dirname, os.path.basename(path)),
os.path.join(source_tree_root, path))
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv))
| bsd-3-clause |
staranjeet/fjord | vendor/packages/click/click/decorators.py | 19 | 10343 | import sys
import inspect
from functools import update_wrapper
from ._compat import iteritems
from .utils import echo
def pass_context(f):
"""Marks a callback as wanting to receive the current context
object as first argument.
"""
f.__click_pass_context__ = True
return f
def pass_obj(f):
"""Similar to :func:`pass_context`, but only pass the object on the
context onwards (:attr:`Context.obj`). This is useful if that object
represents the state of a nested system.
"""
@pass_context
def new_func(*args, **kwargs):
ctx = args[0]
return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)
return update_wrapper(new_func, f)
def make_pass_decorator(object_type, ensure=False):
"""Given an object type this creates a decorator that will work
similar to :func:`pass_obj` but instead of passing the object of the
current context, it will find the innermost context of type
:func:`object_type`.
This generates a decorator that works roughly like this::
from functools import update_wrapper
def decorator(f):
@pass_context
def new_func(ctx, *args, **kwargs):
obj = ctx.find_object(object_type)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
:param object_type: the type of the object to pass.
:param ensure: if set to `True`, a new object will be created and
remembered on the context if it's not there yet.
"""
def decorator(f):
@pass_context
def new_func(*args, **kwargs):
ctx = args[0]
if ensure:
obj = ctx.ensure_object(object_type)
else:
obj = ctx.find_object(object_type)
if obj is None:
raise RuntimeError('Managed to invoke callback without a '
'context object of type %r existing'
% object_type.__name__)
return ctx.invoke(f, obj, *args[1:], **kwargs)
return update_wrapper(new_func, f)
return decorator
def _make_command(f, name, attrs, cls):
if isinstance(f, Command):
raise TypeError('Attempted to convert a callback into a '
'command twice.')
try:
params = f.__click_params__
params.reverse()
del f.__click_params__
except AttributeError:
params = []
help = attrs.get('help')
if help is None:
help = inspect.getdoc(f)
if isinstance(help, bytes):
help = help.decode('utf-8')
else:
help = inspect.cleandoc(help)
attrs['help'] = help
return cls(name=name or f.__name__.lower(),
callback=f, params=params, **attrs)
def command(name=None, cls=None, **attrs):
"""Creates a new :class:`Command` and uses the decorated function as
callback. This will also automatically attach all decorated
:func:`option`\s and :func:`argument`\s as parameters to the command.
The name of the command defaults to the name of the function. If you
want to change that, you can pass the intended name as the first
argument.
All keyword arguments are forwarded to the underlying command class.
Once decorated the function turns into a :class:`Command` instance
that can be invoked as a command line utility or be attached to a
command :class:`Group`.
:param name: the name of the command. This defaults to the function
name.
:param cls: the command class to instantiate. This defaults to
:class:`Command`.
"""
if cls is None:
cls = Command
def decorator(f):
return _make_command(f, name, attrs, cls)
return decorator
def group(name=None, **attrs):
"""Creates a new :class:`Group` with a function as callback. This
works otherwise the same as :func:`command` just that the `cls`
parameter is set to :class:`Group`.
"""
attrs.setdefault('cls', Group)
return command(name, **attrs)
def _param_memo(f, param):
if isinstance(f, Command):
f.params.append(param)
else:
if not hasattr(f, '__click_params__'):
f.__click_params__ = []
f.__click_params__.append(param)
def argument(*param_decls, **attrs):
"""Attaches an option to the command. All positional arguments are
passed as parameter declarations to :class:`Argument`; all keyword
arguments are forwarded unchanged. This is equivalent to creating an
:class:`Option` instance manually and attaching it to the
:attr:`Command.params` list.
"""
def decorator(f):
_param_memo(f, Argument(param_decls, **attrs))
return f
return decorator
def option(*param_decls, **attrs):
"""Attaches an option to the command. All positional arguments are
passed as parameter declarations to :class:`Option`; all keyword
arguments are forwarded unchanged. This is equivalent to creating an
:class:`Option` instance manually and attaching it to the
:attr:`Command.params` list.
"""
def decorator(f):
if 'help' in attrs:
attrs['help'] = inspect.cleandoc(attrs['help'])
_param_memo(f, Option(param_decls, **attrs))
return f
return decorator
def confirmation_option(*param_decls, **attrs):
"""Shortcut for confirmation prompts that can be ignored by passing
``--yes`` as parameter.
This is equivalent to decorating a function with :func:`option` with
the following parameters::
def callback(ctx, param, value):
if not value:
ctx.abort()
@click.command()
@click.option('--yes', is_flag=True, callback=callback,
expose_value=False, prompt='Do you want to continue?')
def dropdb():
pass
"""
def decorator(f):
def callback(ctx, param, value):
if not value:
ctx.abort()
attrs.setdefault('is_flag', True)
attrs.setdefault('callback', callback)
attrs.setdefault('expose_value', False)
attrs.setdefault('prompt', 'Do you want to continue?')
attrs.setdefault('help', 'Confirm the action without prompting.')
return option(*(param_decls or ('--yes',)), **attrs)(f)
return decorator
def password_option(*param_decls, **attrs):
"""Shortcut for password prompts.
This is equivalent to decorating a function with :func:`option` with
the following parameters::
@click.command()
@click.option('--password', prompt=True, confirmation_prompt=True,
hide_input=True)
def changeadmin(password):
pass
"""
def decorator(f):
attrs.setdefault('prompt', True)
attrs.setdefault('confirmation_prompt', True)
attrs.setdefault('hide_input', True)
return option(*(param_decls or ('--password',)), **attrs)(f)
return decorator
def version_option(version=None, *param_decls, **attrs):
"""Adds a ``--version`` option which immediately ends the program
printing out the version number. This is implemented as an eager
option that prints the version and exits the program in the callback.
:param version: the version number to show. If not provided Click
attempts an auto discovery via setuptools.
:param prog_name: the name of the program (defaults to autodetection)
:param message: custom message to show instead of the default
(``'%(prog)s, version %(version)s'``)
:param others: everything else is forwarded to :func:`option`.
"""
if version is None:
module = sys._getframe(1).f_globals.get('__name__')
def decorator(f):
prog_name = attrs.pop('prog_name', None)
message = attrs.pop('message', '%(prog)s, version %(version)s')
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
return
prog = prog_name
if prog is None:
prog = ctx.find_root().info_name
ver = version
if ver is None:
try:
import pkg_resources
except ImportError:
pass
else:
for dist in pkg_resources.working_set:
scripts = dist.get_entry_map().get('console_scripts') or {}
for script_name, entry_point in iteritems(scripts):
if entry_point.module_name == module:
ver = dist.version
break
if ver is None:
raise RuntimeError('Could not determine version')
echo(message % {
'prog': prog,
'version': ver,
})
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('is_eager', True)
attrs.setdefault('help', 'Show the version and exit.')
attrs['callback'] = callback
return option(*(param_decls or ('--version',)), **attrs)(f)
return decorator
def help_option(*param_decls, **attrs):
"""Adds a ``--help`` option which immediately ends the program
printing out the help page. This is usually unnecessary to add as
this is added by default to all commands unless suppressed.
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help())
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('help', 'Show this message and exit.')
attrs.setdefault('is_eager', True)
attrs['callback'] = callback
return option(*(param_decls or ('--help',)), **attrs)(f)
return decorator
# Circular dependencies between core and decorators
from .core import Command, Group, Argument, Option
| bsd-3-clause |
nicproulx/mne-python | mne/io/write.py | 3 | 14052 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from gzip import GzipFile
import os.path as op
import re
import time
import uuid
import numpy as np
from scipy import linalg
from .constants import FIFF
from ..utils import logger
from ..externals.jdcal import jcal2jd
from ..externals.six import string_types, b
def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
"""Write data."""
if isinstance(data, np.ndarray):
data_size *= data.size
# XXX for string types the data size is used as
# computed in ``write_string``.
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_TYPE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(data, dtype=dtype).tostring())
def _get_split_size(split_size):
"""Convert human-readable bytes to machine-readable bytes."""
if isinstance(split_size, string_types):
exp = dict(MB=20, GB=30).get(split_size[-2:], None)
if exp is None:
raise ValueError('split_size has to end with either'
'"MB" or "GB"')
split_size = int(float(split_size[:-2]) * 2 ** exp)
if split_size > 2147483648:
raise ValueError('split_size cannot be larger than 2GB')
return split_size
def write_int(fid, kind, data):
"""Write a 32-bit integer tag to a fif file."""
data_size = 4
data = np.array(data, dtype='>i4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4')
def write_double(fid, kind, data):
"""Write a double-precision floating point tag to a fif file."""
data_size = 8
data = np.array(data, dtype='>f8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8')
def write_float(fid, kind, data):
"""Write a single-precision floating point tag to a fif file."""
data_size = 4
data = np.array(data, dtype='>f4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4')
def write_dau_pack16(fid, kind, data):
"""Write a dau_pack16 tag to a fif file."""
data_size = 2
data = np.array(data, dtype='>i2').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2')
def write_complex64(fid, kind, data):
"""Write a 64 bit complex floating point tag to a fif file."""
data_size = 8
data = np.array(data, dtype='>c8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8')
def write_complex128(fid, kind, data):
"""Write a 128 bit complex floating point tag to a fif file."""
data_size = 16
data = np.array(data, dtype='>c16').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16')
def write_julian(fid, kind, data):
"""Write a Julian-formatted date to a FIF file."""
assert len(data) == 3
data_size = 4
jd = np.sum(jcal2jd(*data))
data = np.array(jd, dtype='>i4')
_write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4')
def write_string(fid, kind, data):
"""Write a string tag."""
str_data = data.encode('utf-8') # Use unicode or bytes depending on Py2/3
data_size = len(str_data) # therefore compute size here
my_dtype = '>a' # py2/3 compatible on writing -- don't ask me why
if data_size > 0:
_write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype)
def write_name_list(fid, kind, data):
"""Write a colon-separated list of names.
Parameters
----------
data : list of strings
"""
write_string(fid, kind, ':'.join(data))
def write_float_matrix(fid, kind, mat):
"""Write a single-precision floating-point matrix tag."""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f4').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def write_double_matrix(fid, kind, mat):
"""Write a double-precision floating-point matrix tag."""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX
data_size = 8 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f8').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def write_int_matrix(fid, kind, mat):
"""Write integer 32 matrix tag."""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * 3
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>i4').tostring())
dims = np.empty(3, dtype=np.int32)
dims[0] = mat.shape[1]
dims[1] = mat.shape[0]
dims[2] = 2
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def get_machid():
"""Get (mostly) unique machine ID.
Returns
-------
ids : array (length 2, int32)
The machine identifier used in MNE.
"""
mac = b('%012x' % uuid.getnode()) # byte conversion for Py3
mac = re.findall(b'..', mac) # split string
mac += [b'00', b'00'] # add two more fields
# Convert to integer in reverse-order (for some reason)
from codecs import encode
mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]])
ids = np.flipud(np.fromstring(mac, np.int32, count=2))
return ids
def get_new_file_id():
"""Create a new file ID tag."""
secs, usecs = divmod(time.time(), 1.)
secs, usecs = int(secs), int(usecs * 1e6)
return {'machid': get_machid(), 'version': FIFF.FIFFC_VERSION,
'secs': secs, 'usecs': usecs}
def write_id(fid, kind, id_=None):
"""Write fiff id."""
id_ = _generate_meas_id() if id_ is None else id_
data_size = 5 * 4 # The id comprises five integers
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Collect the bits together for one write
arr = np.array([id_['version'],
id_['machid'][0], id_['machid'][1],
id_['secs'], id_['usecs']], dtype='>i4')
fid.write(arr.tostring())
def start_block(fid, kind):
"""Write a FIFF_BLOCK_START tag."""
write_int(fid, FIFF.FIFF_BLOCK_START, kind)
def end_block(fid, kind):
"""Write a FIFF_BLOCK_END tag."""
write_int(fid, FIFF.FIFF_BLOCK_END, kind)
def start_file(fname, id_=None):
"""Open a fif file for writing and writes the compulsory header tags.
Parameters
----------
fname : string | fid
The name of the file to open. It is recommended
that the name ends with .fif or .fif.gz. Can also be an
already opened file.
id_ : dict | None
ID to use for the FIFF_FILE_ID.
"""
if isinstance(fname, string_types):
if op.splitext(fname)[1].lower() == '.gz':
logger.debug('Writing using gzip')
# defaults to compression level 9, which is barely smaller but much
# slower. 2 offers a good compromise.
fid = GzipFile(fname, "wb", compresslevel=2)
else:
logger.debug('Writing using normal I/O')
fid = open(fname, "wb")
else:
logger.debug('Writing using %s I/O' % type(fname))
fid = fname
fid.seek(0)
# Write the compulsory items
write_id(fid, FIFF.FIFF_FILE_ID, id_)
write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
write_int(fid, FIFF.FIFF_FREE_LIST, -1)
return fid
def check_fiff_length(fid, close=True):
"""Ensure our file hasn't grown too large to work properly."""
if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations
if close:
fid.close()
raise IOError('FIFF file exceeded 2GB limit, please split file or '
'save to a different format')
def end_file(fid):
"""Write the closing tags to a fif file and closes the file."""
data_size = 0
fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_NONE, dtype='>i4').tostring())
check_fiff_length(fid)
fid.close()
def write_coord_trans(fid, trans):
"""Write a coordinate transformation structure."""
data_size = 4 * 2 * 12 + 4 * 2
fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(trans['from'], dtype='>i4').tostring())
fid.write(np.array(trans['to'], dtype='>i4').tostring())
# The transform...
rot = trans['trans'][:3, :3]
move = trans['trans'][:3, 3]
fid.write(np.array(rot, dtype='>f4').tostring())
fid.write(np.array(move, dtype='>f4').tostring())
# ...and its inverse
trans_inv = linalg.inv(trans['trans'])
rot = trans_inv[:3, :3]
move = trans_inv[:3, 3]
fid.write(np.array(rot, dtype='>f4').tostring())
fid.write(np.array(move, dtype='>f4').tostring())
def write_ch_info(fid, ch):
"""Write a channel information record to a fif file."""
data_size = 4 * 13 + 4 * 7 + 16
fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Start writing fiffChInfoRec
fid.write(np.array(ch['scanno'], dtype='>i4').tostring())
fid.write(np.array(ch['logno'], dtype='>i4').tostring())
fid.write(np.array(ch['kind'], dtype='>i4').tostring())
fid.write(np.array(ch['range'], dtype='>f4').tostring())
fid.write(np.array(ch['cal'], dtype='>f4').tostring())
fid.write(np.array(ch['coil_type'], dtype='>i4').tostring())
fid.write(np.array(ch['loc'], dtype='>f4').tostring()) # writing 12 values
# unit and unit multiplier
fid.write(np.array(ch['unit'], dtype='>i4').tostring())
fid.write(np.array(ch['unit_mul'], dtype='>i4').tostring())
# Finally channel name
if len(ch['ch_name']):
ch_name = ch['ch_name'][:15]
else:
ch_name = ch['ch_name']
fid.write(np.array(ch_name, dtype='>c').tostring())
if len(ch_name) < 16:
fid.write(b('\0') * (16 - len(ch_name)))
def write_dig_points(fid, dig, block=False, coord_frame=None):
"""Write a set of digitizer data points into a fif file."""
if dig is not None:
data_size = 5 * 4
if block:
start_block(fid, FIFF.FIFFB_ISOTRAK)
if coord_frame is not None:
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
for d in dig:
fid.write(np.array(FIFF.FIFF_DIG_POINT, '>i4').tostring())
fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, '>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, '>i4').tostring())
# Start writing fiffDigPointRec
fid.write(np.array(d['kind'], '>i4').tostring())
fid.write(np.array(d['ident'], '>i4').tostring())
fid.write(np.array(d['r'][:3], '>f4').tostring())
if block:
end_block(fid, FIFF.FIFFB_ISOTRAK)
def write_float_sparse_rcs(fid, kind, mat):
"""Write a single-precision floating-point matrix tag."""
FIFFT_MATRIX = 16416 << 16
FIFFT_MATRIX_FLOAT_RCS = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
nnzm = mat.nnz
nrow = mat.shape[0]
data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_FLOAT_RCS, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat.data, dtype='>f4').tostring())
fid.write(np.array(mat.indices, dtype='>i4').tostring())
fid.write(np.array(mat.indptr, dtype='>i4').tostring())
dims = [nnzm, mat.shape[0], mat.shape[1], 2]
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def _generate_meas_id():
"""Generate a new meas_id dict."""
id_ = dict()
id_['version'] = FIFF.FIFFC_VERSION
id_['machid'] = get_machid()
id_['secs'], id_['usecs'] = _date_now()
return id_
def _date_now():
"""Get date in secs, usecs."""
now = time.time()
# Get date in secs/usecs (as in `fill_measurement_info` in
# mne/forward/forward.py)
date_arr = np.array([np.floor(now), 1e6 * (now - np.floor(now))],
dtype='int32')
return date_arr
| bsd-3-clause |
keedio/keedio-stacks | KEEDIO/1.3/services/CASSANDRA/package/scripts/params.py | 1 | 2618 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import multiprocessing
cpu_count = multiprocessing.cpu_count()
config = Script.get_config()
cluster_name = config['clusterName']
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
realm = config['configurations']['cluster-env']['kerberos_domain']
cassandra_hosts = ",".join([str(elem) for elem in default('/clusterHostInfo/cassandra_hosts',[])])
tokens = default('/configurations/cassandra/tokens',256)
cassandra_data_path = list(str(config['configurations']['cassandra']['cassandra_data_path']).split(","))
cassandra_commit_log = config['configurations']['cassandra']['cassandra_commit_log']
storage_port = config['configurations']['cassandra']['storage_port']
native_transport_port = config['configurations']['cassandra']['native_transport_port']
rpc_port = config['configurations']['cassandra']['rpc_port']
rpc_max_threads = config['configurations']['cassandra']['rpc_max_threads']
endpoint_snitch = config['configurations']['cassandra']['endpoint_snitch']
rack = config['configurations']['cassandra']['rack']
datacenter = config['configurations']['cassandra']['datacenter']
cassandra_user = default('/configurations/cassandra-env/cassandra_user','cassandra')
#cassandra_principal_name = default('/configurations/cassandra-env/cassandra_principal_name',None)
#cassandra_keytab_file = default('/configurations/cassandra-env/cassandra_keytab',None)
#cassandra_spnego_principal_name = default('/configurations/cassandra-env/cassandra_principal_spnego',None)
#hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name',None)
#hdfs_user_keytab = default('/configurations/hadoop-env/hdfs_user_keytab',None)
#kerberos_cache_file = default('/configurations/cluster-env/kerberos_cache_file','/tmp/ccache_keytab')
| apache-2.0 |
jfpla/odoo | addons/sale_service/models/sale_service.py | 58 | 7877 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class procurement_order(osv.osv):
_name = "procurement.order"
_inherit = "procurement.order"
_columns = {
'task_id': fields.many2one('project.task', 'Task', copy=False),
}
def _is_procurement_task(self, cr, uid, procurement, context=None):
return procurement.product_id.type == 'service' and procurement.product_id.auto_create_task or False
def _assign(self, cr, uid, procurement, context=None):
res = super(procurement_order, self)._assign(cr, uid, procurement, context=context)
if not res:
#if there isn't any specific procurement.rule defined for the product, we may want to create a task
if self._is_procurement_task(cr, uid, procurement, context=context):
return True
return res
def _run(self, cr, uid, procurement, context=None):
if self._is_procurement_task(cr, uid, procurement, context=context) and not procurement.task_id:
#create a task for the procurement
return self._create_service_task(cr, uid, procurement, context=context)
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if self._is_procurement_task(cr, uid, procurement, context=context):
return procurement.task_id and procurement.task_id.stage_id.closed or False
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _convert_qty_company_hours(self, cr, uid, procurement, context=None):
product_uom = self.pool.get('product.uom')
company_time_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id
if procurement.product_uom.id != company_time_uom_id.id and procurement.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, company_time_uom_id.id)
else:
planned_hours = procurement.product_qty
return planned_hours
def _get_project(self, cr, uid, procurement, context=None):
project_project = self.pool.get('project.project')
project = procurement.product_id.project_id
if not project and procurement.sale_line_id:
# find the project corresponding to the analytic account of the sales order
account = procurement.sale_line_id.order_id.project_id
project_ids = project_project.search(cr, uid, [('analytic_account_id', '=', account.id)])
projects = project_project.browse(cr, uid, project_ids, context=context)
project = projects and projects[0] or False
return project
def _create_service_task(self, cr, uid, procurement, context=None):
project_task = self.pool.get('project.task')
project = self._get_project(cr, uid, procurement, context=context)
planned_hours = self._convert_qty_company_hours(cr, uid, procurement, context=context)
task_id = project_task.create(cr, uid, {
'name': '%s:%s' % (procurement.origin or '', procurement.product_id.name),
'date_deadline': procurement.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': procurement.sale_line_id and procurement.sale_line_id.order_id.partner_id.id or procurement.partner_dest_id.id,
'user_id': procurement.product_id.product_manager.id,
'procurement_id': procurement.id,
'description': procurement.name + '\n',
'project_id': project and project.id or False,
'company_id': procurement.company_id.id,
},context=context)
self.write(cr, uid, [procurement.id], {'task_id': task_id}, context=context)
self.project_task_create_note(cr, uid, [procurement.id], context=context)
return task_id
def project_task_create_note(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
body = _("Task created")
self.message_post(cr, uid, [procurement.id], body=body, context=context)
if procurement.sale_line_id and procurement.sale_line_id.order_id:
procurement.sale_line_id.order_id.message_post(body=body)
class ProjectTaskStageMrp(osv.Model):
""" Override project.task.type model to add a 'closed' boolean field allowing
to know that tasks in this stage are considered as closed. Indeed since
OpenERP 8.0 status is not present on tasks anymore, only stage_id. """
_name = 'project.task.type'
_inherit = 'project.task.type'
_columns = {
'closed': fields.boolean('Close', help="Tasks in this stage are considered as closed."),
}
_defaults = {
'closed': False,
}
class project_task(osv.osv):
_name = "project.task"
_inherit = "project.task"
_columns = {
'procurement_id': fields.many2one('procurement.order', 'Procurement', ondelete='set null'),
'sale_line_id': fields.related('procurement_id', 'sale_line_id', type='many2one', relation='sale.order.line', store=True, string='Sales Order Line'),
}
def _validate_subflows(self, cr, uid, ids, context=None):
proc_obj = self.pool.get("procurement.order")
for task in self.browse(cr, uid, ids, context=context):
if task.procurement_id:
proc_obj.check(cr, SUPERUSER_ID, [task.procurement_id.id], context=context)
def write(self, cr, uid, ids, values, context=None):
""" When closing tasks, validate subflows. """
res = super(project_task, self).write(cr, uid, ids, values, context=context)
if values.get('stage_id'):
stage = self.pool.get('project.task.type').browse(cr, uid, values.get('stage_id'), context=context)
if stage.closed:
self._validate_subflows(cr, uid, ids, context=context)
return res
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'project_id': fields.many2one('project.project', 'Project', ondelete='set null',),
'auto_create_task': fields.boolean('Create Task Automatically', help="Tick this option if you want to create a task automatically each time this product is sold"),
}
class product_product(osv.osv):
_inherit = "product.product"
def need_procurement(self, cr, uid, ids, context=None):
for product in self.browse(cr, uid, ids, context=context):
if product.type == 'service' and product.auto_create_task:
return True
return super(product_product, self).need_procurement(cr, uid, ids, context=context)
| agpl-3.0 |
jmchilton/pulsar | pulsar/core.py | 1 | 5500 | """
"""
import os
from tempfile import tempdir
from pulsar.manager_factory import build_managers
from pulsar.cache import Cache
from pulsar.tools import ToolBox
from pulsar.tools.authorization import get_authorizer
from pulsar import messaging
from galaxy.objectstore import build_object_store_from_config
from galaxy.tools.deps import DependencyManager
from galaxy.jobs.metrics import JobMetrics
from galaxy.util.bunch import Bunch
from logging import getLogger
log = getLogger(__name__)
DEFAULT_PRIVATE_TOKEN = None
DEFAULT_FILES_DIRECTORY = "files"
DEFAULT_STAGING_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "staging")
DEFAULT_PERSISTENCE_DIRECTORY = os.path.join(DEFAULT_FILES_DIRECTORY, "persisted_data")
NOT_WHITELIST_WARNING = "Starting the Pulsar without a toolbox to white-list." + \
"Ensure this application is protected by firewall or a configured private token."
class PulsarApp(object):
def __init__(self, **conf):
if conf is None:
conf = {}
self.__setup_staging_directory(conf.get("staging_directory", DEFAULT_STAGING_DIRECTORY))
self.__setup_private_token(conf.get("private_token", DEFAULT_PRIVATE_TOKEN))
self.__setup_persistence_directory(conf.get("persistence_directory", None))
self.__setup_tool_config(conf)
self.__setup_object_store(conf)
self.__setup_dependency_manager(conf)
self.__setup_job_metrics(conf)
self.__setup_managers(conf)
self.__setup_file_cache(conf)
self.__setup_bind_to_message_queue(conf)
self.__recover_jobs()
self.ensure_cleanup = conf.get("ensure_cleanup", False)
def shutdown(self, timeout=None):
for manager in self.managers.values():
try:
manager.shutdown(timeout)
except Exception:
pass
if self.__queue_state:
self.__queue_state.deactivate()
if self.ensure_cleanup:
self.__queue_state.join(timeout)
def __setup_bind_to_message_queue(self, conf):
message_queue_url = conf.get("message_queue_url", None)
queue_state = None
if message_queue_url:
queue_state = messaging.bind_app(self, message_queue_url, conf)
self.__queue_state = queue_state
def __setup_tool_config(self, conf):
"""
Setups toolbox object and authorization mechanism based
on supplied toolbox_path.
"""
tool_config_files = conf.get("tool_config_files", None)
if not tool_config_files:
# For compatibity with Galaxy, allow tool_config_file
# option name.
tool_config_files = conf.get("tool_config_file", None)
toolbox = None
if tool_config_files:
toolbox = ToolBox(tool_config_files)
else:
log.info(NOT_WHITELIST_WARNING)
self.toolbox = toolbox
self.authorizer = get_authorizer(toolbox)
def __setup_staging_directory(self, staging_directory):
self.staging_directory = os.path.abspath(staging_directory)
def __setup_managers(self, conf):
self.managers = build_managers(self, conf)
def __recover_jobs(self):
for manager in self.managers.values():
manager.recover_active_jobs()
def __setup_private_token(self, private_token):
self.private_token = private_token
if private_token:
log.info("Securing Pulsar web app with private key, please verify you are using HTTPS so key cannot be obtained by monitoring traffic.")
def __setup_persistence_directory(self, persistence_directory):
persistence_directory = persistence_directory or DEFAULT_PERSISTENCE_DIRECTORY
if persistence_directory == "__none__":
persistence_directory = None
self.persistence_directory = persistence_directory
def __setup_file_cache(self, conf):
file_cache_dir = conf.get('file_cache_dir', None)
self.file_cache = Cache(file_cache_dir) if file_cache_dir else None
def __setup_object_store(self, conf):
if "object_store_config_file" not in conf:
self.object_store = None
return
object_store_config = Bunch(
object_store_config_file=conf['object_store_config_file'],
file_path=conf.get("object_store_file_path", None),
object_store_check_old_style=False,
job_working_directory=conf.get("object_store_job_working_directory", None),
new_file_path=conf.get("object_store_new_file_path", tempdir),
umask=int(conf.get("object_store_umask", "0000")),
)
self.object_store = build_object_store_from_config(object_store_config)
def __setup_dependency_manager(self, conf):
dependencies_dir = conf.get("tool_dependency_dir", "dependencies")
resolvers_config_file = conf.get("dependency_resolvers_config_file", "dependency_resolvers_conf.xml")
self.dependency_manager = DependencyManager(dependencies_dir, resolvers_config_file)
def __setup_job_metrics(self, conf):
job_metrics_config_file = conf.get("job_metrics_config_file", "job_metrics_conf.xml")
self.job_metrics = JobMetrics(job_metrics_config_file)
@property
def only_manager(self):
# convience method for tests, etc... where when we know there
# is only one manager.
assert len(self.managers) == 1
return self.managers.values()[0]
| apache-2.0 |
dcosentino/edx-platform | cms/djangoapps/contentstore/views/tests/test_item.py | 7 | 92144 | """Tests for items views."""
import json
from datetime import datetime, timedelta
import ddt
from mock import patch, Mock, PropertyMock
from pytz import UTC
from webob import Response
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from contentstore.utils import reverse_usage_url, reverse_course_url
from contentstore.views.component import (
component_handler, get_component_templates
)
from contentstore.views.item import (
create_xblock_info, ALWAYS, VisibilityState, _xblock_type_and_display_name, add_container_page_publishing_info
)
from contentstore.tests.utils import CourseTestCase
from student.tests.factories import UserFactory
from xmodule.capa_module import CapaDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, LibraryFactory, check_mongo_calls
from xmodule.x_module import STUDIO_VIEW, STUDENT_VIEW
from xblock.exceptions import NoSuchHandlerError
from xblock_django.user_service import DjangoXBlockUserService
from opaque_keys.edx.keys import UsageKey, CourseKey
from opaque_keys.edx.locations import Location
from xmodule.partitions.partitions import Group, UserPartition
class ItemTest(CourseTestCase):
""" Base test class for create, save, and delete """
def setUp(self):
super(ItemTest, self).setUp()
self.course_key = self.course.id
self.usage_key = self.course.location
def get_item_from_modulestore(self, usage_key, verify_is_draft=False):
"""
Get the item referenced by the UsageKey from the modulestore
"""
item = self.store.get_item(usage_key)
if verify_is_draft:
self.assertTrue(getattr(item, 'is_draft', False))
return item
def response_usage_key(self, response):
"""
Get the UsageKey from the response payload and verify that the status_code was 200.
:param response:
"""
parsed = json.loads(response.content)
self.assertEqual(response.status_code, 200)
key = UsageKey.from_string(parsed['locator'])
if key.course_key.run is None:
key = key.map_into_course(CourseKey.from_string(parsed['courseKey']))
return key
def create_xblock(self, parent_usage_key=None, display_name=None, category=None, boilerplate=None):
data = {
'parent_locator': unicode(self.usage_key) if parent_usage_key is None else unicode(parent_usage_key),
'category': category
}
if display_name is not None:
data['display_name'] = display_name
if boilerplate is not None:
data['boilerplate'] = boilerplate
return self.client.ajax_post(reverse('contentstore.views.xblock_handler'), json.dumps(data))
def _create_vertical(self, parent_usage_key=None):
"""
Creates a vertical, returning its UsageKey.
"""
resp = self.create_xblock(category='vertical', parent_usage_key=parent_usage_key)
self.assertEqual(resp.status_code, 200)
return self.response_usage_key(resp)
@ddt.ddt
class GetItemTest(ItemTest):
"""Tests for '/xblock' GET url."""
def _get_preview(self, usage_key, data=None):
""" Makes a request to xblock preview handler """
preview_url = reverse_usage_url("xblock_view_handler", usage_key, {'view_name': 'container_preview'})
data = data if data else {}
resp = self.client.get(preview_url, data, HTTP_ACCEPT='application/json')
return resp
def _get_container_preview(self, usage_key, data=None):
"""
Returns the HTML and resources required for the xblock at the specified UsageKey
"""
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content)
html = resp_content['html']
self.assertTrue(html)
resources = resp_content['resources']
self.assertIsNotNone(resources)
return html, resources
def _get_container_preview_with_error(self, usage_key, expected_code, data=None, content_contains=None):
""" Make request and asserts on response code and response contents """
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, expected_code)
if content_contains:
self.assertIn(content_contains, resp.content)
return resp
@ddt.data(
(1, 16, 14, 15, 11),
(2, 16, 14, 15, 11),
(3, 16, 14, 15, 11),
)
@ddt.unpack
def test_get_query_count(self, branching_factor, chapter_queries, section_queries, unit_queries, problem_queries):
self.populate_course(branching_factor)
# Retrieve it
with check_mongo_calls(chapter_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['chapter'][-1]))
with check_mongo_calls(section_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['sequential'][-1]))
with check_mongo_calls(unit_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['vertical'][-1]))
with check_mongo_calls(problem_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['problem'][-1]))
@ddt.data(
(1, 26),
(2, 28),
(3, 30),
)
@ddt.unpack
def test_container_get_query_count(self, branching_factor, unit_queries,):
self.populate_course(branching_factor)
with check_mongo_calls(unit_queries):
self.client.get(reverse_usage_url('xblock_container_handler', self.populated_usage_keys['vertical'][-1]))
def test_get_vertical(self):
# Add a vertical
resp = self.create_xblock(category='vertical')
usage_key = self.response_usage_key(resp)
# Retrieve it
resp = self.client.get(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 200)
def test_get_empty_container_fragment(self):
root_usage_key = self._create_vertical()
html, __ = self._get_container_preview(root_usage_key)
# XBlock messages are added by the Studio wrapper.
self.assertIn('wrapper-xblock-message', html)
# Make sure that "wrapper-xblock" does not appear by itself (without -message at end).
self.assertNotRegexpMatches(html, r'wrapper-xblock[^-]+')
# Verify that the header and article tags are still added
self.assertIn('<header class="xblock-header xblock-header-vertical">', html)
self.assertIn('<article class="xblock-render">', html)
def test_get_container_fragment(self):
root_usage_key = self._create_vertical()
# Add a problem beneath a child vertical
child_vertical_usage_key = self._create_vertical(parent_usage_key=root_usage_key)
resp = self.create_xblock(parent_usage_key=child_vertical_usage_key, category='problem', boilerplate='multiplechoice.yaml')
self.assertEqual(resp.status_code, 200)
# Get the preview HTML
html, __ = self._get_container_preview(root_usage_key)
# Verify that the Studio nesting wrapper has been added
self.assertIn('level-nesting', html)
self.assertIn('<header class="xblock-header xblock-header-vertical">', html)
self.assertIn('<article class="xblock-render">', html)
# Verify that the Studio element wrapper has been added
self.assertIn('level-element', html)
def test_get_container_nested_container_fragment(self):
"""
Test the case of the container page containing a link to another container page.
"""
# Add a wrapper with child beneath a child vertical
root_usage_key = self._create_vertical()
resp = self.create_xblock(parent_usage_key=root_usage_key, category="wrapper")
self.assertEqual(resp.status_code, 200)
wrapper_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=wrapper_usage_key, category='problem', boilerplate='multiplechoice.yaml')
self.assertEqual(resp.status_code, 200)
# Get the preview HTML and verify the View -> link is present.
html, __ = self._get_container_preview(root_usage_key)
self.assertIn('wrapper-xblock', html)
self.assertRegexpMatches(
html,
# The instance of the wrapper class will have an auto-generated ID. Allow any
# characters after wrapper.
r'"/container/{}" class="action-button">\s*<span class="action-button-text">View</span>'.format(
wrapper_usage_key
)
)
def test_split_test(self):
"""
Test that a split_test module renders all of its children in Studio.
"""
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
split_test_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=split_test_usage_key, category='html', boilerplate='announcement.yaml')
self.assertEqual(resp.status_code, 200)
resp = self.create_xblock(parent_usage_key=split_test_usage_key, category='html', boilerplate='zooming_image.yaml')
self.assertEqual(resp.status_code, 200)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertIn('Announcement', html)
self.assertIn('Zooming', html)
def test_split_test_edited(self):
"""
Test that rename of a group changes display name of child vertical.
"""
self.course.user_partitions = [UserPartition(
0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta')]
)]
self.store.update_item(self.course, self.user.id)
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
split_test_usage_key = self.response_usage_key(resp)
self.client.ajax_post(
reverse_usage_url("xblock_handler", split_test_usage_key),
data={'metadata': {'user_partition_id': str(0)}}
)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertIn('alpha', html)
self.assertIn('beta', html)
# Rename groups in group configuration
GROUP_CONFIGURATION_JSON = {
u'id': 0,
u'name': u'first_partition',
u'scheme': u'random',
u'description': u'First Partition',
u'version': UserPartition.VERSION,
u'groups': [
{u'id': 0, u'name': u'New_NAME_A', u'version': 1},
{u'id': 1, u'name': u'New_NAME_B', u'version': 1},
],
}
response = self.client.put(
reverse_course_url('group_configurations_detail_handler', self.course.id, kwargs={'group_configuration_id': 0}),
data=json.dumps(GROUP_CONFIGURATION_JSON),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 201)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertNotIn('alpha', html)
self.assertNotIn('beta', html)
self.assertIn('New_NAME_A', html)
self.assertIn('New_NAME_B', html)
def test_valid_paging(self):
"""
Tests that valid paging is passed along to underlying block
"""
with patch('contentstore.views.item.get_preview_fragment') as patched_get_preview_fragment:
retval = Mock()
type(retval).content = PropertyMock(return_value="Some content")
type(retval).resources = PropertyMock(return_value=[])
patched_get_preview_fragment.return_value = retval
root_usage_key = self._create_vertical()
_, _ = self._get_container_preview(
root_usage_key,
{'enable_paging': 'true', 'page_number': 0, 'page_size': 2}
)
call_args = patched_get_preview_fragment.call_args[0]
_, _, context = call_args
self.assertIn('paging', context)
self.assertEqual({'page_number': 0, 'page_size': 2}, context['paging'])
@ddt.data([1, 'invalid'], ['invalid', 2])
@ddt.unpack
def test_invalid_paging(self, page_number, page_size):
"""
Tests that valid paging is passed along to underlying block
"""
root_usage_key = self._create_vertical()
self._get_container_preview_with_error(
root_usage_key,
400,
data={'enable_paging': 'true', 'page_number': page_number, 'page_size': page_size},
content_contains="Couldn't parse paging parameters"
)
class DeleteItem(ItemTest):
"""Tests for '/xblock' DELETE url."""
def test_delete_static_page(self):
# Add static tab
resp = self.create_xblock(category='static_tab')
usage_key = self.response_usage_key(resp)
# Now delete it. There was a bug that the delete was failing (static tabs do not exist in draft modulestore).
resp = self.client.delete(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 204)
class TestCreateItem(ItemTest):
"""
Test the create_item handler thoroughly
"""
def test_create_nicely(self):
"""
Try the straightforward use cases
"""
# create a chapter
display_name = 'Nicely created'
resp = self.create_xblock(display_name=display_name, category='chapter')
# get the new item and check its category and display_name
chap_usage_key = self.response_usage_key(resp)
new_obj = self.get_item_from_modulestore(chap_usage_key)
self.assertEqual(new_obj.scope_ids.block_type, 'chapter')
self.assertEqual(new_obj.display_name, display_name)
self.assertEqual(new_obj.location.org, self.course.location.org)
self.assertEqual(new_obj.location.course, self.course.location.course)
# get the course and ensure it now points to this one
course = self.get_item_from_modulestore(self.usage_key)
self.assertIn(chap_usage_key, course.children)
# use default display name
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='vertical')
vert_usage_key = self.response_usage_key(resp)
# create problem w/ boilerplate
template_id = 'multiplechoice.yaml'
resp = self.create_xblock(
parent_usage_key=vert_usage_key,
category='problem',
boilerplate=template_id
)
prob_usage_key = self.response_usage_key(resp)
problem = self.get_item_from_modulestore(prob_usage_key, verify_is_draft=True)
# check against the template
template = CapaDescriptor.get_template(template_id)
self.assertEqual(problem.data, template['data'])
self.assertEqual(problem.display_name, template['metadata']['display_name'])
self.assertEqual(problem.markdown, template['metadata']['markdown'])
def test_create_item_negative(self):
"""
Negative tests for create_item
"""
# non-existent boilerplate: creates a default
resp = self.create_xblock(category='problem', boilerplate='nosuchboilerplate.yaml')
self.assertEqual(resp.status_code, 200)
def test_create_with_future_date(self):
self.assertEqual(self.course.start, datetime(2030, 1, 1, tzinfo=UTC))
resp = self.create_xblock(category='chapter')
usage_key = self.response_usage_key(resp)
obj = self.get_item_from_modulestore(usage_key)
self.assertEqual(obj.start, datetime(2030, 1, 1, tzinfo=UTC))
def test_static_tabs_initialization(self):
"""
Test that static tab display names are not being initialized as None.
"""
# Add a new static tab with no explicit name
resp = self.create_xblock(category='static_tab')
usage_key = self.response_usage_key(resp)
# Check that its name is not None
new_tab = self.get_item_from_modulestore(usage_key)
self.assertEquals(new_tab.display_name, 'Empty')
class TestDuplicateItem(ItemTest):
"""
Test the duplicate method.
"""
def setUp(self):
""" Creates the test course structure and a few components to 'duplicate'. """
super(TestDuplicateItem, self).setUp()
# Create a parent chapter (for testing children of children).
resp = self.create_xblock(parent_usage_key=self.usage_key, category='chapter')
self.chapter_usage_key = self.response_usage_key(resp)
# create a sequential containing a problem and an html component
resp = self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential')
self.seq_usage_key = self.response_usage_key(resp)
# create problem and an html component
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='problem', boilerplate='multiplechoice.yaml')
self.problem_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='html')
self.html_usage_key = self.response_usage_key(resp)
# Create a second sequential just (testing children of children)
self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential2')
def test_duplicate_equality(self):
"""
Tests that a duplicated xblock is identical to the original,
except for location and display name.
"""
def duplicate_and_verify(source_usage_key, parent_usage_key):
""" Duplicates the source, parenting to supplied parent. Then does equality check. """
usage_key = self._duplicate_item(parent_usage_key, source_usage_key)
self.assertTrue(
check_equality(source_usage_key, usage_key, parent_usage_key),
"Duplicated item differs from original"
)
def check_equality(source_usage_key, duplicate_usage_key, parent_usage_key=None):
"""
Gets source and duplicated items from the modulestore using supplied usage keys.
Then verifies that they represent equivalent items (modulo parents and other
known things that may differ).
"""
original_item = self.get_item_from_modulestore(source_usage_key)
duplicated_item = self.get_item_from_modulestore(duplicate_usage_key)
self.assertNotEqual(
unicode(original_item.location),
unicode(duplicated_item.location),
"Location of duplicate should be different from original"
)
# Parent will only be equal for root of duplicated structure, in the case
# where an item is duplicated in-place.
if parent_usage_key and unicode(original_item.parent) == unicode(parent_usage_key):
self.assertEqual(
unicode(parent_usage_key), unicode(duplicated_item.parent),
"Parent of duplicate should equal parent of source for root xblock when duplicated in-place"
)
else:
self.assertNotEqual(
unicode(original_item.parent), unicode(duplicated_item.parent),
"Parent duplicate should be different from source"
)
# Set the location, display name, and parent to be the same so we can make sure the rest of the
# duplicate is equal.
duplicated_item.location = original_item.location
duplicated_item.display_name = original_item.display_name
duplicated_item.parent = original_item.parent
# Children will also be duplicated, so for the purposes of testing equality, we will set
# the children to the original after recursively checking the children.
if original_item.has_children:
self.assertEqual(
len(original_item.children),
len(duplicated_item.children),
"Duplicated item differs in number of children"
)
for i in xrange(len(original_item.children)):
if not check_equality(original_item.children[i], duplicated_item.children[i]):
return False
duplicated_item.children = original_item.children
return original_item == duplicated_item
duplicate_and_verify(self.problem_usage_key, self.seq_usage_key)
duplicate_and_verify(self.html_usage_key, self.seq_usage_key)
duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key)
duplicate_and_verify(self.chapter_usage_key, self.usage_key)
def test_ordering(self):
"""
Tests the a duplicated xblock appears immediately after its source
(if duplicate and source share the same parent), else at the
end of the children of the parent.
"""
def verify_order(source_usage_key, parent_usage_key, source_position=None):
usage_key = self._duplicate_item(parent_usage_key, source_usage_key)
parent = self.get_item_from_modulestore(parent_usage_key)
children = parent.children
if source_position is None:
self.assertFalse(source_usage_key in children, 'source item not expected in children array')
self.assertEqual(
children[len(children) - 1],
usage_key,
"duplicated item not at end"
)
else:
self.assertEqual(
children[source_position],
source_usage_key,
"source item at wrong position"
)
self.assertEqual(
children[source_position + 1],
usage_key,
"duplicated item not ordered after source item"
)
verify_order(self.problem_usage_key, self.seq_usage_key, 0)
# 2 because duplicate of problem should be located before.
verify_order(self.html_usage_key, self.seq_usage_key, 2)
verify_order(self.seq_usage_key, self.chapter_usage_key, 0)
# Test duplicating something into a location that is not the parent of the original item.
# Duplicated item should appear at the end.
verify_order(self.html_usage_key, self.usage_key)
def test_display_name(self):
"""
Tests the expected display name for the duplicated xblock.
"""
def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):
usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)
duplicated_item = self.get_item_from_modulestore(usage_key)
self.assertEqual(duplicated_item.display_name, expected_name)
return usage_key
# Display name comes from template.
dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, "Duplicate of 'Multiple Choice'")
# Test dupe of dupe.
verify_name(dupe_usage_key, self.seq_usage_key, "Duplicate of 'Duplicate of 'Multiple Choice''")
# Uses default display_name of 'Text' from HTML component.
verify_name(self.html_usage_key, self.seq_usage_key, "Duplicate of 'Text'")
# The sequence does not have a display_name set, so category is shown.
verify_name(self.seq_usage_key, self.chapter_usage_key, "Duplicate of sequential")
# Now send a custom display name for the duplicate.
verify_name(self.seq_usage_key, self.chapter_usage_key, "customized name", display_name="customized name")
def _duplicate_item(self, parent_usage_key, source_usage_key, display_name=None):
data = {
'parent_locator': unicode(parent_usage_key),
'duplicate_source_locator': unicode(source_usage_key)
}
if display_name is not None:
data['display_name'] = display_name
resp = self.client.ajax_post(reverse('contentstore.views.xblock_handler'), json.dumps(data))
return self.response_usage_key(resp)
class TestEditItem(ItemTest):
"""
Test xblock update.
"""
def setUp(self):
""" Creates the test course structure and a couple problems to 'edit'. """
super(TestEditItem, self).setUp()
# create a chapter
display_name = 'chapter created'
resp = self.create_xblock(display_name=display_name, category='chapter')
chap_usage_key = self.response_usage_key(resp)
# create 2 sequentials
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='sequential')
self.seq_usage_key = self.response_usage_key(resp)
self.seq_update_url = reverse_usage_url("xblock_handler", self.seq_usage_key)
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='sequential')
self.seq2_usage_key = self.response_usage_key(resp)
self.seq2_update_url = reverse_usage_url("xblock_handler", self.seq2_usage_key)
# create problem w/ boilerplate
template_id = 'multiplechoice.yaml'
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='problem', boilerplate=template_id)
self.problem_usage_key = self.response_usage_key(resp)
self.problem_update_url = reverse_usage_url("xblock_handler", self.problem_usage_key)
self.course_update_url = reverse_usage_url("xblock_handler", self.usage_key)
def test_delete_field(self):
"""
Sending null in for a field 'deletes' it
"""
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'rerandomize': 'onreset'}}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(problem.rerandomize, 'onreset')
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'rerandomize': None}}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(problem.rerandomize, 'never')
def test_null_field(self):
"""
Sending null in for a field 'deletes' it
"""
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertIsNotNone(problem.markdown)
self.client.ajax_post(
self.problem_update_url,
data={'nullout': ['markdown']}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertIsNone(problem.markdown)
def test_date_fields(self):
"""
Test setting due & start dates on sequential
"""
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertIsNone(sequential.due)
self.client.ajax_post(
self.seq_update_url,
data={'metadata': {'due': '2010-11-22T04:00Z'}}
)
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.client.ajax_post(
self.seq_update_url,
data={'metadata': {'start': '2010-09-12T14:00Z'}}
)
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.assertEqual(sequential.start, datetime(2010, 9, 12, 14, 0, tzinfo=UTC))
def test_delete_child(self):
"""
Test deleting a child.
"""
# Create 2 children of main course.
resp_1 = self.create_xblock(display_name='child 1', category='chapter')
resp_2 = self.create_xblock(display_name='child 2', category='chapter')
chapter1_usage_key = self.response_usage_key(resp_1)
chapter2_usage_key = self.response_usage_key(resp_2)
course = self.get_item_from_modulestore(self.usage_key)
self.assertIn(chapter1_usage_key, course.children)
self.assertIn(chapter2_usage_key, course.children)
# Remove one child from the course.
resp = self.client.delete(reverse_usage_url("xblock_handler", chapter1_usage_key))
self.assertEqual(resp.status_code, 204)
# Verify that the child is removed.
course = self.get_item_from_modulestore(self.usage_key)
self.assertNotIn(chapter1_usage_key, course.children)
self.assertIn(chapter2_usage_key, course.children)
def test_reorder_children(self):
"""
Test reordering children that can be in the draft store.
"""
# Create 2 child units and re-order them. There was a bug about @draft getting added
# to the IDs.
unit_1_resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical')
unit_2_resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical')
unit1_usage_key = self.response_usage_key(unit_1_resp)
unit2_usage_key = self.response_usage_key(unit_2_resp)
# The sequential already has a child defined in the setUp (a problem).
# Children must be on the sequential to reproduce the original bug,
# as it is important that the parent (sequential) NOT be in the draft store.
children = self.get_item_from_modulestore(self.seq_usage_key).children
self.assertEqual(unit1_usage_key, children[1])
self.assertEqual(unit2_usage_key, children[2])
resp = self.client.ajax_post(
self.seq_update_url,
data={'children': [unicode(self.problem_usage_key), unicode(unit2_usage_key), unicode(unit1_usage_key)]}
)
self.assertEqual(resp.status_code, 200)
children = self.get_item_from_modulestore(self.seq_usage_key).children
self.assertEqual(self.problem_usage_key, children[0])
self.assertEqual(unit1_usage_key, children[2])
self.assertEqual(unit2_usage_key, children[1])
def test_move_parented_child(self):
"""
Test moving a child from one Section to another
"""
unit_1_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical', display_name='unit 1')
)
unit_2_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 2')
)
# move unit 1 from sequential1 to sequential2
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [unicode(unit_1_key), unicode(unit_2_key)]}
)
self.assertEqual(resp.status_code, 200)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[unit_1_key, unit_2_key],
)
self.assertListEqual(
self.get_item_from_modulestore(self.seq_usage_key).children,
[self.problem_usage_key], # problem child created in setUp
)
def test_move_orphaned_child_error(self):
"""
Test moving an orphan returns an error
"""
unit_1_key = self.store.create_item(self.user.id, self.course_key, 'vertical', 'unit1').location
# adding orphaned unit 1 should return an error
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [unicode(unit_1_key)]}
)
self.assertEqual(resp.status_code, 400)
self.assertIn("Invalid data, possibly caused by concurrent authors", resp.content)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[]
)
def test_move_child_creates_orphan_error(self):
"""
Test creating an orphan returns an error
"""
unit_1_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 1')
)
unit_2_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 2')
)
# remove unit 2 should return an error
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [unicode(unit_1_key)]}
)
self.assertEqual(resp.status_code, 400)
self.assertIn("Invalid data, possibly caused by concurrent authors", resp.content)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[unit_1_key, unit_2_key]
)
def _is_location_published(self, location):
"""
Returns whether or not the item with given location has a published version.
"""
return modulestore().has_item(location, revision=ModuleStoreEnum.RevisionOption.published_only)
def _verify_published_with_no_draft(self, location):
"""
Verifies the item with given location has a published version and no draft (unpublished changes).
"""
self.assertTrue(self._is_location_published(location))
self.assertFalse(modulestore().has_changes(modulestore().get_item(location)))
def _verify_published_with_draft(self, location):
"""
Verifies the item with given location has a published version and also a draft version (unpublished changes).
"""
self.assertTrue(self._is_location_published(location))
self.assertTrue(modulestore().has_changes(modulestore().get_item(location)))
def test_make_public(self):
""" Test making a private problem public (publishing it). """
# When the problem is first created, it is only in draft (because of its category).
self.assertFalse(self._is_location_published(self.problem_usage_key))
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
def test_make_draft(self):
""" Test creating a draft version of a public problem. """
self._make_draft_content_different_from_published()
def test_revert_to_published(self):
""" Test reverting draft content to published """
self._make_draft_content_different_from_published()
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'discard_changes'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertIsNone(published.due)
def test_republish(self):
""" Test republishing an item. """
new_display_name = 'New Display Name'
# When the problem is first created, it is only in draft (because of its category).
self.assertFalse(self._is_location_published(self.problem_usage_key))
# Republishing when only in draft will update the draft but not cause a public item to be created.
self.client.ajax_post(
self.problem_update_url,
data={
'publish': 'republish',
'metadata': {
'display_name': new_display_name
}
}
)
self.assertFalse(self._is_location_published(self.problem_usage_key))
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(draft.display_name, new_display_name)
# Publish the item
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
# Now republishing should update the published version
new_display_name_2 = 'New Display Name 2'
self.client.ajax_post(
self.problem_update_url,
data={
'publish': 'republish',
'metadata': {
'display_name': new_display_name_2
}
}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(
self.problem_usage_key,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertEqual(published.display_name, new_display_name_2)
def test_direct_only_categories_not_republished(self):
"""Verify that republish is ignored for items in DIRECT_ONLY_CATEGORIES"""
# Create a vertical child with published and unpublished versions.
# If the parent sequential is not re-published, then the child problem should also not be re-published.
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='vertical', category='vertical')
vertical_usage_key = self.response_usage_key(resp)
vertical_update_url = reverse_usage_url('xblock_handler', vertical_usage_key)
self.client.ajax_post(vertical_update_url, data={'publish': 'make_public'})
self.client.ajax_post(vertical_update_url, data={'metadata': {'display_name': 'New Display Name'}})
self._verify_published_with_draft(self.seq_usage_key)
self.client.ajax_post(self.seq_update_url, data={'publish': 'republish'})
self._verify_published_with_draft(self.seq_usage_key)
def _make_draft_content_different_from_published(self):
"""
Helper method to create different draft and published versions of a problem.
"""
# Make problem public.
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key, revision=ModuleStoreEnum.RevisionOption.published_only)
# Update the draft version and check that published is different.
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'due': '2077-10-10T04:00Z'}}
)
updated_draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(updated_draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))
self.assertIsNone(published.due)
# Fetch the published version again to make sure the due date is still unset.
published = modulestore().get_item(published.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertIsNone(published.due)
def test_make_public_with_update(self):
""" Update a problem and make it public at the same time. """
self.client.ajax_post(
self.problem_update_url,
data={
'metadata': {'due': '2077-10-10T04:00Z'},
'publish': 'make_public'
}
)
published = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(published.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))
def test_published_and_draft_contents_with_update(self):
""" Create a draft and publish it then modify the draft and check that published content is not modified """
# Make problem public.
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key, revision=ModuleStoreEnum.RevisionOption.published_only)
# Now make a draft
self.client.ajax_post(
self.problem_update_url,
data={
'id': unicode(self.problem_usage_key),
'metadata': {},
'data': "<p>Problem content draft.</p>"
}
)
# Both published and draft content should be different
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertNotEqual(draft.data, published.data)
# Get problem by 'xblock_handler'
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDENT_VIEW})
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
# Activate the editing view
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDIO_VIEW})
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
# Both published and draft content should still be different
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertNotEqual(draft.data, published.data)
# Fetch the published version again to make sure the data is correct.
published = modulestore().get_item(published.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertNotEqual(draft.data, published.data)
def test_publish_states_of_nested_xblocks(self):
""" Test publishing of a unit page containing a nested xblock """
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='Test Unit', category='vertical')
unit_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=unit_usage_key, category='wrapper')
wrapper_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=wrapper_usage_key, category='html')
html_usage_key = self.response_usage_key(resp)
# The unit and its children should be private initially
unit_update_url = reverse_usage_url('xblock_handler', unit_usage_key)
self.assertFalse(self._is_location_published(unit_usage_key))
self.assertFalse(self._is_location_published(html_usage_key))
# Make the unit public and verify that the problem is also made public
resp = self.client.ajax_post(
unit_update_url,
data={'publish': 'make_public'}
)
self.assertEqual(resp.status_code, 200)
self._verify_published_with_no_draft(unit_usage_key)
self._verify_published_with_no_draft(html_usage_key)
# Make a draft for the unit and verify that the problem also has a draft
resp = self.client.ajax_post(
unit_update_url,
data={
'id': unicode(unit_usage_key),
'metadata': {},
}
)
self.assertEqual(resp.status_code, 200)
self._verify_published_with_draft(unit_usage_key)
self._verify_published_with_draft(html_usage_key)
def test_field_value_errors(self):
"""
Test that if the user's input causes a ValueError on an XBlock field,
we provide a friendly error message back to the user.
"""
response = self.create_xblock(parent_usage_key=self.seq_usage_key, category='video')
video_usage_key = self.response_usage_key(response)
update_url = reverse_usage_url('xblock_handler', video_usage_key)
response = self.client.ajax_post(
update_url,
data={
'id': unicode(video_usage_key),
'metadata': {
'saved_video_position': "Not a valid relative time",
},
}
)
self.assertEqual(response.status_code, 400)
parsed = json.loads(response.content)
self.assertIn("error", parsed)
self.assertIn("Incorrect RelativeTime value", parsed["error"]) # See xmodule/fields.py
class TestEditSplitModule(ItemTest):
"""
Tests around editing instances of the split_test module.
"""
def setUp(self):
super(TestEditSplitModule, self).setUp()
self.course.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta')]
),
UserPartition(
1, 'second_partition', 'Second Partition',
[Group("0", 'Group 0'), Group("1", 'Group 1'), Group("2", 'Group 2')]
)
]
self.store.update_item(self.course, self.user.id)
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
self.split_test_usage_key = self.response_usage_key(resp)
self.split_test_update_url = reverse_usage_url("xblock_handler", self.split_test_usage_key)
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/dummy-url')
self.request.user = self.user
def _update_partition_id(self, partition_id):
"""
Helper method that sets the user_partition_id to the supplied value.
The updated split_test instance is returned.
"""
self.client.ajax_post(
self.split_test_update_url,
# Even though user_partition_id is Scope.content, it will get saved by the Studio editor as
# metadata. The code in item.py will update the field correctly, even though it is not the
# expected scope.
data={'metadata': {'user_partition_id': str(partition_id)}}
)
# Verify the partition_id was saved.
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
self.assertEqual(partition_id, split_test.user_partition_id)
return split_test
def _assert_children(self, expected_number):
"""
Verifies the number of children of the split_test instance.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, True)
self.assertEqual(expected_number, len(split_test.children))
return split_test
def test_create_groups(self):
"""
Test that verticals are created for the configuration groups when
a spit test module is edited.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(-1, split_test.user_partition_id)
self.assertEqual(0, len(split_test.children))
# Set the user_partition_id to 0.
split_test = self._update_partition_id(0)
# Verify that child verticals have been set to match the groups
self.assertEqual(2, len(split_test.children))
vertical_0 = self.get_item_from_modulestore(split_test.children[0], verify_is_draft=True)
vertical_1 = self.get_item_from_modulestore(split_test.children[1], verify_is_draft=True)
self.assertEqual("vertical", vertical_0.category)
self.assertEqual("vertical", vertical_1.category)
self.assertEqual("Group ID 0", vertical_0.display_name)
self.assertEqual("Group ID 1", vertical_1.display_name)
# Verify that the group_id_to_child mapping is correct.
self.assertEqual(2, len(split_test.group_id_to_child))
self.assertEqual(vertical_0.location, split_test.group_id_to_child['0'])
self.assertEqual(vertical_1.location, split_test.group_id_to_child['1'])
def test_change_user_partition_id(self):
"""
Test what happens when the user_partition_id is changed to a different groups
group configuration.
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
initial_vertical_0_location = split_test.children[0]
initial_vertical_1_location = split_test.children[1]
# Set to second group configuration
split_test = self._update_partition_id(1)
# We don't remove existing children.
self.assertEqual(5, len(split_test.children))
self.assertEqual(initial_vertical_0_location, split_test.children[0])
self.assertEqual(initial_vertical_1_location, split_test.children[1])
vertical_0 = self.get_item_from_modulestore(split_test.children[2], verify_is_draft=True)
vertical_1 = self.get_item_from_modulestore(split_test.children[3], verify_is_draft=True)
vertical_2 = self.get_item_from_modulestore(split_test.children[4], verify_is_draft=True)
# Verify that the group_id_to child mapping is correct.
self.assertEqual(3, len(split_test.group_id_to_child))
self.assertEqual(vertical_0.location, split_test.group_id_to_child['0'])
self.assertEqual(vertical_1.location, split_test.group_id_to_child['1'])
self.assertEqual(vertical_2.location, split_test.group_id_to_child['2'])
self.assertNotEqual(initial_vertical_0_location, vertical_0.location)
self.assertNotEqual(initial_vertical_1_location, vertical_1.location)
def test_change_same_user_partition_id(self):
"""
Test that nothing happens when the user_partition_id is set to the same value twice.
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
initial_group_id_to_child = split_test.group_id_to_child
# Set again to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
self.assertEqual(initial_group_id_to_child, split_test.group_id_to_child)
def test_change_non_existent_user_partition_id(self):
"""
Test that nothing happens when the user_partition_id is set to a value that doesn't exist.
The user_partition_id will be updated, but children and group_id_to_child map will not change.
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
self.assertEqual(2, len(split_test.children))
initial_group_id_to_child = split_test.group_id_to_child
# Set to an group configuration that doesn't exist.
split_test = self._update_partition_id(-50)
self.assertEqual(2, len(split_test.children))
self.assertEqual(initial_group_id_to_child, split_test.group_id_to_child)
def test_add_groups(self):
"""
Test the "fix up behavior" when groups are missing (after a group is added to a group configuration).
This test actually belongs over in common, but it relies on a mutable modulestore.
TODO: move tests that can go over to common after the mixed modulestore work is done. # pylint: disable=fixme
"""
# Set to first group configuration.
split_test = self._update_partition_id(0)
# Add a group to the first group configuration.
split_test.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'pie')]
)
]
self.store.update_item(split_test, self.user.id)
# group_id_to_child and children have not changed yet.
split_test = self._assert_children(2)
group_id_to_child = split_test.group_id_to_child.copy()
self.assertEqual(2, len(group_id_to_child))
# Test environment and Studio use different module systems
# (CachingDescriptorSystem is used in tests, PreviewModuleSystem in Studio).
# CachingDescriptorSystem doesn't have user service, that's needed for
# SplitTestModule. So, in this line of code we add this service manually.
split_test.runtime._services['user'] = DjangoXBlockUserService(self.user) # pylint: disable=protected-access
# Call add_missing_groups method to add the missing group.
split_test.add_missing_groups(self.request)
split_test = self._assert_children(3)
self.assertNotEqual(group_id_to_child, split_test.group_id_to_child)
group_id_to_child = split_test.group_id_to_child
self.assertEqual(split_test.children[2], group_id_to_child["2"])
# Call add_missing_groups again -- it should be a no-op.
split_test.add_missing_groups(self.request)
split_test = self._assert_children(3)
self.assertEqual(group_id_to_child, split_test.group_id_to_child)
@ddt.ddt
class TestComponentHandler(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
patcher = patch('contentstore.views.component.modulestore')
self.modulestore = patcher.start()
self.addCleanup(patcher.stop)
# component_handler calls modulestore.get_item to get the descriptor of the requested xBlock.
# Here, we mock the return value of modulestore.get_item so it can be used to mock the handler
# of the xBlock descriptor.
self.descriptor = self.modulestore.return_value.get_item.return_value
self.usage_key_string = unicode(
Location('dummy_org', 'dummy_course', 'dummy_run', 'dummy_category', 'dummy_name')
)
self.user = UserFactory()
self.request = self.request_factory.get('/dummy-url')
self.request.user = self.user
def test_invalid_handler(self):
self.descriptor.handle.side_effect = NoSuchHandlerError
with self.assertRaises(Http404):
component_handler(self.request, self.usage_key_string, 'invalid_handler')
@ddt.data('GET', 'POST', 'PUT', 'DELETE')
def test_request_method(self, method):
def check_handler(handler, request, suffix):
self.assertEquals(request.method, method)
return Response()
self.descriptor.handle = check_handler
# Have to use the right method to create the request to get the HTTP method that we want
req_factory_method = getattr(self.request_factory, method.lower())
request = req_factory_method('/dummy-url')
request.user = self.user
component_handler(request, self.usage_key_string, 'dummy_handler')
@ddt.data(200, 404, 500)
def test_response_code(self, status_code):
def create_response(handler, request, suffix):
return Response(status_code=status_code)
self.descriptor.handle = create_response
self.assertEquals(component_handler(self.request, self.usage_key_string, 'dummy_handler').status_code, status_code)
class TestComponentTemplates(CourseTestCase):
"""
Unit tests for the generation of the component templates for a course.
"""
def setUp(self):
super(TestComponentTemplates, self).setUp()
self.templates = get_component_templates(self.course)
def get_templates_of_type(self, template_type):
"""
Returns the templates for the specified type, or None if none is found.
"""
template_dict = next((template for template in self.templates if template.get('type') == template_type), None)
return template_dict.get('templates') if template_dict else None
def get_template(self, templates, display_name):
"""
Returns the template which has the specified display name.
"""
return next((template for template in templates if template.get('display_name') == display_name), None)
def test_basic_components(self):
"""
Test the handling of the basic component templates.
"""
self.assertIsNotNone(self.get_templates_of_type('discussion'))
self.assertIsNotNone(self.get_templates_of_type('html'))
self.assertIsNotNone(self.get_templates_of_type('problem'))
self.assertIsNotNone(self.get_templates_of_type('video'))
self.assertIsNone(self.get_templates_of_type('advanced'))
def test_advanced_components(self):
"""
Test the handling of advanced component templates.
"""
self.course.advanced_modules.append('word_cloud')
self.templates = get_component_templates(self.course)
advanced_templates = self.get_templates_of_type('advanced')
self.assertEqual(len(advanced_templates), 1)
world_cloud_template = advanced_templates[0]
self.assertEqual(world_cloud_template.get('category'), 'word_cloud')
self.assertEqual(world_cloud_template.get('display_name'), u'Word cloud')
self.assertIsNone(world_cloud_template.get('boilerplate_name', None))
# Verify that non-advanced components are not added twice
self.course.advanced_modules.append('video')
self.course.advanced_modules.append('openassessment')
self.templates = get_component_templates(self.course)
advanced_templates = self.get_templates_of_type('advanced')
self.assertEqual(len(advanced_templates), 1)
only_template = advanced_templates[0]
self.assertNotEqual(only_template.get('category'), 'video')
self.assertNotEqual(only_template.get('category'), 'openassessment')
def test_advanced_components_without_display_name(self):
"""
Test that advanced components without display names display their category instead.
"""
self.course.advanced_modules.append('graphical_slider_tool')
self.templates = get_component_templates(self.course)
template = self.get_templates_of_type('advanced')[0]
self.assertEqual(template.get('display_name'), 'graphical_slider_tool')
def test_advanced_problems(self):
"""
Test the handling of advanced problem templates.
"""
problem_templates = self.get_templates_of_type('problem')
ora_template = self.get_template(problem_templates, u'Peer Assessment')
self.assertIsNotNone(ora_template)
self.assertEqual(ora_template.get('category'), 'openassessment')
self.assertIsNone(ora_template.get('boilerplate_name', None))
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"])
def test_ora1_no_advance_component_button(self):
"""
Test that there will be no `Advanced` button on unit page if `combinedopenended` and `peergrading` are
deprecated provided that there are only 'combinedopenended', 'peergrading' modules in `Advanced Module List`
"""
self.course.advanced_modules.extend(['combinedopenended', 'peergrading'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertNotIn('Advanced', button_names)
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', ["combinedopenended", "peergrading"])
def test_cannot_create_ora1_problems(self):
"""
Test that we can't create ORA1 problems if `combinedopenended` and `peergrading` are deprecated
"""
self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names)
self.assertEqual(len(templates[0]['templates']), 1)
template_display_names = [template['display_name'] for template in templates[0]['templates']]
self.assertEqual(template_display_names, ['Annotation'])
@patch('django.conf.settings.DEPRECATED_ADVANCED_COMPONENT_TYPES', [])
def test_create_ora1_problems(self):
"""
Test that we can create ORA1 problems if `combinedopenended` and `peergrading` are not deprecated
"""
self.course.advanced_modules.extend(['annotatable', 'combinedopenended', 'peergrading'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names)
self.assertEqual(len(templates[0]['templates']), 3)
template_display_names = [template['display_name'] for template in templates[0]['templates']]
self.assertEqual(template_display_names, ['Annotation', 'Open Response Assessment', 'Peer Grading Interface'])
class TestXBlockInfo(ItemTest):
"""
Unit tests for XBlock's outline handling.
"""
def setUp(self):
super(TestXBlockInfo, self).setUp()
user_id = self.user.id
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1", user_id=user_id
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1", user_id=user_id
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Unit 1', user_id=user_id
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category='video', display_name='My Video', user_id=user_id
)
def test_json_responses(self):
outline_url = reverse_usage_url('xblock_outline_handler', self.usage_key)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.validate_course_xblock_info(json_response, course_outline=True)
def test_chapter_xblock_info(self):
chapter = modulestore().get_item(self.chapter.location)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
)
self.validate_chapter_xblock_info(xblock_info)
def test_sequential_xblock_info(self):
sequential = modulestore().get_item(self.sequential.location)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
)
self.validate_sequential_xblock_info(xblock_info)
def test_vertical_xblock_info(self):
vertical = modulestore().get_item(self.vertical.location)
xblock_info = create_xblock_info(
vertical,
include_child_info=True,
include_children_predicate=ALWAYS,
include_ancestor_info=True
)
add_container_page_publishing_info(vertical, xblock_info)
self.validate_vertical_xblock_info(xblock_info)
def test_component_xblock_info(self):
video = modulestore().get_item(self.video.location)
xblock_info = create_xblock_info(
video,
include_child_info=True,
include_children_predicate=ALWAYS
)
self.validate_component_xblock_info(xblock_info)
def validate_course_xblock_info(self, xblock_info, has_child_info=True, course_outline=False):
"""
Validate that the xblock info is correct for the test course.
"""
self.assertEqual(xblock_info['category'], 'course')
self.assertEqual(xblock_info['id'], unicode(self.course.location))
self.assertEqual(xblock_info['display_name'], self.course.display_name)
self.assertTrue(xblock_info['published'])
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info, course_outline=course_outline)
def validate_chapter_xblock_info(self, xblock_info, has_child_info=True):
"""
Validate that the xblock info is correct for the test chapter.
"""
self.assertEqual(xblock_info['category'], 'chapter')
self.assertEqual(xblock_info['id'], unicode(self.chapter.location))
self.assertEqual(xblock_info['display_name'], 'Week 1')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
self.assertEqual(xblock_info['course_graders'], '["Homework", "Lab", "Midterm Exam", "Final Exam"]')
self.assertEqual(xblock_info['start'], '2030-01-01T00:00:00Z')
self.assertEqual(xblock_info['graded'], False)
self.assertEqual(xblock_info['due'], None)
self.assertEqual(xblock_info['format'], None)
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info)
def validate_sequential_xblock_info(self, xblock_info, has_child_info=True):
"""
Validate that the xblock info is correct for the test sequential.
"""
self.assertEqual(xblock_info['category'], 'sequential')
self.assertEqual(xblock_info['id'], unicode(self.sequential.location))
self.assertEqual(xblock_info['display_name'], 'Lesson 1')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info)
def validate_vertical_xblock_info(self, xblock_info):
"""
Validate that the xblock info is correct for the test vertical.
"""
self.assertEqual(xblock_info['category'], 'vertical')
self.assertEqual(xblock_info['id'], unicode(self.vertical.location))
self.assertEqual(xblock_info['display_name'], 'Unit 1')
self.assertTrue(xblock_info['published'])
self.assertEqual(xblock_info['edited_by'], 'testuser')
# Validate that the correct ancestor info has been included
ancestor_info = xblock_info.get('ancestor_info', None)
self.assertIsNotNone(ancestor_info)
ancestors = ancestor_info['ancestors']
self.assertEqual(len(ancestors), 3)
self.validate_sequential_xblock_info(ancestors[0], has_child_info=True)
self.validate_chapter_xblock_info(ancestors[1], has_child_info=False)
self.validate_course_xblock_info(ancestors[2], has_child_info=False)
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=True, has_ancestor_info=True)
def validate_component_xblock_info(self, xblock_info):
"""
Validate that the xblock info is correct for the test component.
"""
self.assertEqual(xblock_info['category'], 'video')
self.assertEqual(xblock_info['id'], unicode(self.video.location))
self.assertEqual(xblock_info['display_name'], 'My Video')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info)
def validate_xblock_info_consistency(self, xblock_info, has_ancestor_info=False, has_child_info=False,
course_outline=False):
"""
Validate that the xblock info is internally consistent.
"""
self.assertIsNotNone(xblock_info['display_name'])
self.assertIsNotNone(xblock_info['id'])
self.assertIsNotNone(xblock_info['category'])
self.assertTrue(xblock_info['published'])
if has_ancestor_info:
self.assertIsNotNone(xblock_info.get('ancestor_info', None))
ancestors = xblock_info['ancestor_info']['ancestors']
for ancestor in xblock_info['ancestor_info']['ancestors']:
self.validate_xblock_info_consistency(
ancestor,
has_child_info=(ancestor == ancestors[0]), # Only the direct ancestor includes children
course_outline=course_outline
)
else:
self.assertIsNone(xblock_info.get('ancestor_info', None))
if has_child_info:
self.assertIsNotNone(xblock_info.get('child_info', None))
if xblock_info['child_info'].get('children', None):
for child_response in xblock_info['child_info']['children']:
self.validate_xblock_info_consistency(
child_response,
has_child_info=(not child_response.get('child_info', None) is None),
course_outline=course_outline
)
else:
self.assertIsNone(xblock_info.get('child_info', None))
class TestLibraryXBlockInfo(ModuleStoreTestCase):
"""
Unit tests for XBlock Info for XBlocks in a content library
"""
def setUp(self):
super(TestLibraryXBlockInfo, self).setUp()
user_id = self.user.id
self.library = LibraryFactory.create()
self.top_level_html = ItemFactory.create(
parent_location=self.library.location, category='html', user_id=user_id, publish_item=False
)
self.vertical = ItemFactory.create(
parent_location=self.library.location, category='vertical', user_id=user_id, publish_item=False
)
self.child_html = ItemFactory.create(
parent_location=self.vertical.location, category='html', display_name='Test HTML Child Block',
user_id=user_id, publish_item=False
)
def test_lib_xblock_info(self):
html_block = modulestore().get_item(self.top_level_html.location)
xblock_info = create_xblock_info(html_block)
self.validate_component_xblock_info(xblock_info, html_block)
self.assertIsNone(xblock_info.get('child_info', None))
def test_lib_child_xblock_info(self):
html_block = modulestore().get_item(self.child_html.location)
xblock_info = create_xblock_info(html_block, include_ancestor_info=True, include_child_info=True)
self.validate_component_xblock_info(xblock_info, html_block)
self.assertIsNone(xblock_info.get('child_info', None))
ancestors = xblock_info['ancestor_info']['ancestors']
self.assertEqual(len(ancestors), 2)
self.assertEqual(ancestors[0]['category'], 'vertical')
self.assertEqual(ancestors[0]['id'], unicode(self.vertical.location))
self.assertEqual(ancestors[1]['category'], 'library')
def validate_component_xblock_info(self, xblock_info, original_block):
"""
Validate that the xblock info is correct for the test component.
"""
self.assertEqual(xblock_info['category'], original_block.category)
self.assertEqual(xblock_info['id'], unicode(original_block.location))
self.assertEqual(xblock_info['display_name'], original_block.display_name)
self.assertIsNone(xblock_info.get('has_changes', None))
self.assertIsNone(xblock_info.get('published', None))
self.assertIsNone(xblock_info.get('published_on', None))
self.assertIsNone(xblock_info.get('graders', None))
class TestLibraryXBlockCreation(ItemTest):
"""
Tests the adding of XBlocks to Library
"""
def test_add_xblock(self):
"""
Verify we can add an XBlock to a Library.
"""
lib = LibraryFactory.create()
self.create_xblock(parent_usage_key=lib.location, display_name='Test', category="html")
lib = self.store.get_library(lib.location.library_key)
self.assertTrue(lib.children)
xblock_locator = lib.children[0]
self.assertEqual(self.store.get_item(xblock_locator).display_name, 'Test')
def test_no_add_discussion(self):
"""
Verify we cannot add a discussion module to a Library.
"""
lib = LibraryFactory.create()
response = self.create_xblock(parent_usage_key=lib.location, display_name='Test', category='discussion')
self.assertEqual(response.status_code, 400)
lib = self.store.get_library(lib.location.library_key)
self.assertFalse(lib.children)
def test_no_add_advanced(self):
lib = LibraryFactory.create()
lib.advanced_modules = ['lti']
lib.save()
response = self.create_xblock(parent_usage_key=lib.location, display_name='Test', category='lti')
self.assertEqual(response.status_code, 400)
lib = self.store.get_library(lib.location.library_key)
self.assertFalse(lib.children)
class TestXBlockPublishingInfo(ItemTest):
"""
Unit tests for XBlock's outline handling.
"""
FIRST_SUBSECTION_PATH = [0]
FIRST_UNIT_PATH = [0, 0]
SECOND_UNIT_PATH = [0, 1]
def _create_child(self, parent, category, display_name, publish_item=False, staff_only=False):
"""
Creates a child xblock for the given parent.
"""
child = ItemFactory.create(
parent_location=parent.location, category=category, display_name=display_name,
user_id=self.user.id, publish_item=publish_item
)
if staff_only:
self._enable_staff_only(child.location)
# In case the staff_only state was set, return the updated xblock.
return modulestore().get_item(child.location)
def _get_child_xblock_info(self, xblock_info, index):
"""
Returns the child xblock info at the specified index.
"""
children = xblock_info['child_info']['children']
self.assertTrue(len(children) > index)
return children[index]
def _get_xblock_info(self, location):
"""
Returns the xblock info for the specified location.
"""
return create_xblock_info(
modulestore().get_item(location),
include_child_info=True,
include_children_predicate=ALWAYS,
)
def _get_xblock_outline_info(self, location):
"""
Returns the xblock info for the specified location as neeeded for the course outline page.
"""
return create_xblock_info(
modulestore().get_item(location),
include_child_info=True,
include_children_predicate=ALWAYS,
course_outline=True
)
def _set_release_date(self, location, start):
"""
Sets the release date for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.start = start
self.store.update_item(xblock, self.user.id)
def _enable_staff_only(self, location):
"""
Enables staff only for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.visible_to_staff_only = True
self.store.update_item(xblock, self.user.id)
def _set_display_name(self, location, display_name):
"""
Sets the display name for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.display_name = display_name
self.store.update_item(xblock, self.user.id)
def _verify_xblock_info_state(self, xblock_info, xblock_info_field, expected_state, path=None, should_equal=True):
"""
Verify the state of an xblock_info field. If no path is provided then the root item will be verified.
If should_equal is True, assert that the current state matches the expected state, otherwise assert that they
do not match.
"""
if path:
direct_child_xblock_info = self._get_child_xblock_info(xblock_info, path[0])
remaining_path = path[1:] if len(path) > 1 else None
self._verify_xblock_info_state(direct_child_xblock_info, xblock_info_field, expected_state, remaining_path, should_equal)
else:
if should_equal:
self.assertEqual(xblock_info[xblock_info_field], expected_state)
else:
self.assertNotEqual(xblock_info[xblock_info_field], expected_state)
def _verify_has_staff_only_message(self, xblock_info, expected_state, path=None):
"""
Verify the staff_only_message field of xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'staff_only_message', expected_state, path)
def _verify_visibility_state(self, xblock_info, expected_state, path=None, should_equal=True):
"""
Verify the publish state of an item in the xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'visibility_state', expected_state, path, should_equal)
def _verify_explicit_staff_lock_state(self, xblock_info, expected_state, path=None, should_equal=True):
"""
Verify the explicit staff lock state of an item in the xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'has_explicit_staff_lock', expected_state, path, should_equal)
def test_empty_chapter(self):
empty_chapter = self._create_child(self.course, 'chapter', "Empty Chapter")
xblock_info = self._get_xblock_info(empty_chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled)
def test_empty_sequential(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
self._create_child(chapter, 'sequential', "Empty Sequential")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled, path=self.FIRST_SUBSECTION_PATH)
def test_published_unit(self):
"""
Tests the visibility state of a published unit with release date in the future.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.ready)
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_released_unit(self):
"""
Tests the visibility state of a published unit with release date in the past.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.live)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_unpublished_changes(self):
"""
Tests the visibility state of a published unit with draft (unpublished) changes.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
unit = self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
# Setting the display name creates a draft version of unit.
self._set_display_name(unit.location, 'Updated Unit')
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_partially_released_section(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
released_sequential = self._create_child(chapter, 'sequential', "Released Sequential")
self._create_child(released_sequential, 'vertical', "Released Unit", publish_item=True)
self._create_child(released_sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1))
published_sequential = self._create_child(chapter, 'sequential', "Published Sequential")
self._create_child(published_sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(published_sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(published_sequential.location, datetime.now(UTC) + timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
# Verify the state of the released sequential
self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0])
self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0, 0])
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[0, 1])
# Verify the state of the published sequential
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1])
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1, 0])
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[1, 1])
# Finally verify the state of the chapter
self._verify_visibility_state(xblock_info, VisibilityState.ready)
def test_staff_only_section(self):
"""
Tests that an explicitly staff-locked section and all of its children are visible to staff only.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter", staff_only=True)
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
vertical = self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, True)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(chapter), vertical_info["staff_lock_from"])
def test_no_staff_only_section(self):
"""
Tests that a section with a staff-locked subsection and a visible subsection is not staff locked itself.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
self._create_child(chapter, 'sequential', "Test Visible Sequential")
self._create_child(chapter, 'sequential', "Test Staff Locked Sequential", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[0], should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[1])
def test_staff_only_subsection(self):
"""
Tests that an explicitly staff-locked subsection and all of its children are visible to staff only.
In this case the parent section is also visible to staff only because all of its children are staff only.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential", staff_only=True)
vertical = self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False)
self._verify_explicit_staff_lock_state(xblock_info, True, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(sequential), vertical_info["staff_lock_from"])
def test_no_staff_only_subsection(self):
"""
Tests that a subsection with a staff-locked unit and a visible unit is not staff locked itself.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit")
self._create_child(sequential, 'vertical', "Locked Unit", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.FIRST_SUBSECTION_PATH, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.FIRST_UNIT_PATH, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.SECOND_UNIT_PATH)
def test_staff_only_unit(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
vertical = self._create_child(sequential, 'vertical', "Unit", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, True, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(vertical), vertical_info["staff_lock_from"])
def test_unscheduled_section_with_live_subsection(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(sequential.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_unreleased_section_with_live_subsection(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))
self._set_release_date(sequential.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_locked_section_staff_only_message(self):
"""
Tests that a locked section has a staff only message and its descendants do not.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter", staff_only=True)
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_outline_info(chapter.location)
self._verify_has_staff_only_message(xblock_info, True)
self._verify_has_staff_only_message(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_has_staff_only_message(xblock_info, False, path=self.FIRST_UNIT_PATH)
def test_locked_unit_staff_only_message(self):
"""
Tests that a lone locked unit has a staff only message along with its ancestors.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit", staff_only=True)
xblock_info = self._get_xblock_outline_info(chapter.location)
self._verify_has_staff_only_message(xblock_info, True)
self._verify_has_staff_only_message(xblock_info, True, path=self.FIRST_SUBSECTION_PATH)
self._verify_has_staff_only_message(xblock_info, True, path=self.FIRST_UNIT_PATH)
| agpl-3.0 |
SCSSG/Odoo-SCS | addons/gamification/wizard/update_goal.py | 386 | 1848 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class goal_manual_wizard(osv.TransientModel):
"""Wizard to update a manual goal"""
_name = 'gamification.goal.wizard'
_columns = {
'goal_id': fields.many2one("gamification.goal", string='Goal', required=True),
'current': fields.float('Current'),
}
def action_update_current(self, cr, uid, ids, context=None):
"""Wizard action for updating the current value"""
goal_obj = self.pool.get('gamification.goal')
for wiz in self.browse(cr, uid, ids, context=context):
towrite = {
'current': wiz.current,
'goal_id': wiz.goal_id.id,
'to_update': False,
}
goal_obj.write(cr, uid, [wiz.goal_id.id], towrite, context=context)
goal_obj.update(cr, uid, [wiz.goal_id.id], context=context)
return {}
| agpl-3.0 |
ystk/debian-cracklib2 | python/cracklib.py | 2 | 5659 | #
# A Python binding for cracklib.
#
# Parts of this code are based on work Copyright (c) 2003 by Domenico
# Andreoli.
#
# Copyright (c) 2008, 2009 Jan Dittberner <jan@dittberner.info>
#
# This file is part of cracklib.
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Python extensions for the cracklib binding.
"""
import string
from _cracklib import FascistCheck
ASCII_UPPERCASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
ASCII_LOWERCASE = "abcdefghijklmnopqrstuvwxyz"
DIFF_OK = 5
MIN_LENGTH = 9
DIG_CREDIT = 1
UP_CREDIT = 1
LOW_CREDIT = 1
OTH_CREDIT = 1
def palindrome(sample):
"""Checks whether the given string is a palindrome.
"""
for i in range(len(sample)):
if sample[i] != sample[-i - 1]:
return 0
return 1
def distdifferent(old, new, i, j):
"""Calculate how different two strings are in terms of the number
of character removals, additions, and changes needed to go from one
to the other."""
if i == 0 or len(old) <= i:
cval = 0
else:
cval = old[i - 1]
if j == 0 or len(new) <= i:
dval = 0
else:
dval = new[j - 1]
return cval != dval
def distcalculate(distances, old, new, i, j):
"""Calculates the distance between two strings.
"""
tmp = 0
if distances[i][j] != -1:
return distances[i][j]
tmp = distcalculate(distances, old, new, i - 1, j - 1)
tmp = min(tmp, distcalculate(distances, old, new, i , j - 1))
tmp = min(tmp, distcalculate(distances, old, new, i - 1, j ))
tmp = tmp + distdifferent(old, new, i, j)
distances[i][j] = tmp
return tmp
def distance(old, new):
"""Gets the distance of two given strings.
"""
oldlength = len(old)
newlength = len(new)
distances = [ [] for i in range(oldlength + 1) ]
for i in range(oldlength + 1):
distances[i] = [ -1 for j in range(newlength + 1) ]
for i in range(oldlength + 1):
distances[i][0] = i
for j in range(newlength + 1):
distances[0][j] = j
distances[0][0] = 0
retval = distcalculate(distances, old, new, oldlength, newlength)
for i in range(len(distances)):
for j in range(len(distances[i])):
distances[i][j] = 0
return retval
def similar(old, new):
"""Calculates whether the given strings are similar.
"""
if distance(old, new) >= DIFF_OK:
return 0
if len(new) >= (len(old) * 2):
return 0
# passwords are too similar
return 1
def simple(new):
"""Checks whether the given string is simple or not.
"""
digits = 0
uppers = 0
lowers = 0
others = 0
for character in new:
if character in string.digits:
digits = digits + 1
elif character in ASCII_UPPERCASE:
uppers = uppers + 1
elif character in ASCII_LOWERCASE:
lowers = lowers + 1
else:
others = others + 1
# The scam was this - a password of only one character type
# must be 8 letters long. Two types, 7, and so on.
# This is now changed, the base size and the credits or defaults
# see the docs on the module for info on these parameters, the
# defaults cause the effect to be the same as before the change
if DIG_CREDIT >= 0 and digits > DIG_CREDIT:
digits = DIG_CREDIT
if UP_CREDIT >= 0 and uppers > UP_CREDIT:
uppers = UP_CREDIT
if LOW_CREDIT >= 0 and lowers > LOW_CREDIT:
lowers = LOW_CREDIT
if OTH_CREDIT >= 0 and others > OTH_CREDIT:
others = OTH_CREDIT
size = MIN_LENGTH
if DIG_CREDIT >= 0:
size = size - digits
elif digits < (DIG_CREDIT * -1):
return 1
if UP_CREDIT >= 0:
size = size - uppers
elif uppers < (UP_CREDIT * -1):
return 1
if LOW_CREDIT >= 0:
size = size - lowers
elif lowers < (LOW_CREDIT * -1):
return 1
if OTH_CREDIT >= 0:
size = size - others
elif others < (OTH_CREDIT * -1):
return 1
if len(new) < size:
return 1
return 0
def VeryFascistCheck(new, old = None, dictpath = None):
"""Extends the FascistCheck function with other checks implemented
in this module.
"""
if old != None:
if new == old:
raise ValueError, "is the same as the old one"
oldmono = old.lower()
newmono = new.lower()
wrapped = old + old
if newmono == oldmono:
raise ValueError, "case changes only"
if wrapped.find(new) != -1:
raise ValueError, "is rotated"
if similar(oldmono, newmono):
raise ValueError, "is too similar to the old one"
if dictpath == None:
FascistCheck(new)
else:
FascistCheck(new, dictpath)
if palindrome(new):
raise ValueError, "is a palindrome"
if simple(new):
raise ValueError, "is too simple"
return new
| lgpl-2.1 |
caveman-dick/ansible | lib/ansible/modules/storage/netapp/na_cdot_lun.py | 34 | 12121 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_lun
short_description: Manage NetApp cDOT luns
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create, destroy, resize luns on NetApp cDOT.
options:
state:
description:
- Whether the specified lun should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the lun to manage.
required: true
flexvol_name:
description:
- The name of the FlexVol the lun should exist on.
- Required when C(state=present).
size:
description:
- The size of the lun in C(size_unit).
- Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
force_resize:
description:
- Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size.
default: false
force_remove:
description:
- If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
- If "false", destroying an online and mapped LUN will fail.
default: false
force_remove_fenced:
description:
- If "true", override checks that prevent a LUN from being destroyed while it is fenced.
- If "false", attempting to destroy a fenced LUN will fail.
- The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
default: false
vserver:
required: true
description:
- The name of the vserver to use.
'''
EXAMPLES = """
- name: Create LUN
na_cdot_lun:
state: present
name: ansibleLUN
flexvol_name: ansibleVolume
vserver: ansibleVServer
size: 5
size_unit: mb
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Resize Lun
na_cdot_lun:
state: present
name: ansibleLUN
force_resize: True
flexvol_name: ansibleVolume
vserver: ansibleVServer
size: 5
size_unit: gb
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTLUN(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
force_resize=dict(default=False, type='bool'),
force_remove=dict(default=False, type='bool'),
force_remove_fenced=dict(default=False, type='bool'),
flexvol_name=dict(type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['flexvol_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.size_unit = p['size_unit']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.force_resize = p['force_resize']
self.force_remove = p['force_remove']
self.force_remove_fenced = p['force_remove_fenced']
self.flexvol_name = p['flexvol_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_lun(self):
"""
Return details about the LUN
:return: Details about the lun
:rtype: dict
"""
luns = []
tag = None
while True:
lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
if tag:
lun_info.add_new_child('tag', tag, True)
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('vserver', self.vserver)
query_details.add_new_child('volume', self.flexvol_name)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
lun_info.add_child_elem(query)
result = self.server.invoke_successfully(lun_info, True)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
luns.extend(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
# The LUNs have been extracted.
# Find the specified lun and extract details.
return_value = None
for lun in luns:
path = lun.get_child_content('path')
_rest, _splitter, found_name = path.rpartition('/')
if found_name == self.name:
size = lun.get_child_content('size')
# Find out if the lun is attached
attached_to = None
lun_id = None
if lun.get_child_content('mapped') == 'true':
lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-map-list-info', **{'path': path})
result = self.server.invoke_successfully(
lun_map_list, enable_tunneling=True)
igroups = result.get_child_by_name('initiator-groups')
if igroups:
for igroup_info in igroups.get_children():
igroup = igroup_info.get_child_content(
'initiator-group-name')
attached_to = igroup
lun_id = igroup_info.get_child_content('lun-id')
return_value = {
'name': found_name,
'size': size,
'attached_to': attached_to,
'lun_id': lun_id
}
else:
continue
return return_value
def create_lun(self):
"""
Create LUN with requested name and size
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-create-by-size', **{'path': path,
'size': str(self.size),
'ostype': 'linux'})
try:
self.server.invoke_successfully(lun_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" % (self.name, self.size, to_native(e)),
exception=traceback.format_exc())
def delete_lun(self):
"""
Delete requested LUN
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-destroy', **{'path': path,
'force': str(self.force_remove),
'destroy-fenced-lun':
str(self.force_remove_fenced)})
try:
self.server.invoke_successfully(lun_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(e)),
exception=traceback.format_exc())
def resize_lun(self):
"""
Resize requested LUN.
:return: True if LUN was actually re-sized, false otherwise.
:rtype: bool
"""
path = '/vol/%s/%s' % (self.flexvol_name, self.name)
lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-resize', **{'path': path,
'size': str(self.size),
'force': str(self.force_resize)})
try:
self.server.invoke_successfully(lun_resize, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
if to_native(e.code) == "9042":
# Error 9042 denotes the new LUN size being the same as the
# old LUN size. This happens when there's barely any difference
# in the two sizes. For example, from 8388608 bytes to
# 8194304 bytes. This should go away if/when the default size
# requested/reported to/from the controller is changed to a
# larger unit (MB/GB/TB).
return False
else:
self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)),
exception=traceback.format_exc())
return True
def apply(self):
property_changed = False
multiple_properties_changed = False
size_changed = False
lun_exists = False
lun_detail = self.get_lun()
if lun_detail:
lun_exists = True
current_size = lun_detail['size']
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if not current_size == self.size:
size_changed = True
property_changed = True
else:
if self.state == 'present':
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not lun_exists:
self.create_lun()
else:
if size_changed:
# Ensure that size was actually changed. Please
# read notes in 'resize_lun' function for details.
size_changed = self.resize_lun()
if not size_changed and not \
multiple_properties_changed:
property_changed = False
elif self.state == 'absent':
self.delete_lun()
changed = property_changed or size_changed
# TODO: include other details about the lun (size, etc.)
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTLUN()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
MichaelNedzelsky/intellij-community | python/lib/Lib/encodings/iso2022_jp_ext.py | 816 | 1069 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
googlei18n/glyphsLib | tests/parser_test.py | 1 | 6403 | # coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import, unicode_literals
import os
from collections import OrderedDict
import unittest
import datetime
import glyphsLib
from glyphsLib.parser import Parser
from glyphsLib.classes import GSGlyph
GLYPH_DATA = """\
(
{
glyphname="A";
color=5;
lastChange = "2017-04-30 13:57:04 +0000";
layers = ();
leftKerningGroup = A;
rightKerningGroup = A;
unicode = 0041;
}
)"""
class ParserTest(unittest.TestCase):
def run_test(self, text, expected):
parser = Parser()
self.assertEqual(parser.parse(text), OrderedDict(expected))
def test_parse(self):
self.run_test(
"{myval=1; mylist=(1,2,3);}", [("myval", 1), ("mylist", [1, 2, 3])]
)
def test_trim_value(self):
self.run_test('{mystr="a\\"s\\077d\\U2019f";}', [("mystr", 'a"s?d’f')])
self.run_test('{mystr="\\\\backslash";}', [("mystr", "\\backslash")])
def test_trailing_content(self):
with self.assertRaises(ValueError):
self.run_test("{myval=1;}trailing", [("myval", "1")])
def test_unexpected_content(self):
with self.assertRaises(ValueError):
self.run_test("{myval=@unexpected;}", [("myval", "@unexpected")])
def test_with_utf8(self):
self.run_test(b'{mystr="Don\xe2\x80\x99t crash";}', [("mystr", "Don’t crash")])
def test_parse_str_infinity(self):
self.run_test(b"{mystr = infinity;}", [("mystr", "infinity")])
self.run_test(b"{mystr = Infinity;}", [("mystr", "Infinity")])
self.run_test(b"{mystr = InFiNItY;}", [("mystr", "InFiNItY")])
def test_parse_str_inf(self):
self.run_test(b"{mystr = inf;}", [("mystr", "inf")])
self.run_test(b"{mystr = Inf;}", [("mystr", "Inf")])
def test_parse_multiple_unicodes(self):
# unquoted comma-separated list of unicodes is not valid plist;
# it used to be written by some old versions of Glyphs.app but
# the current version always writes multiple unicodes within quotes.
# Thus, we no longer support this in glyphsLib either.
with self.assertRaises(ValueError):
self.run_test(
b"{unicode = 0000,0008,001D;}", [("unicode", "0000,0008,001D")]
)
# this is the correct form
self.run_test(b'{unicode = "0000,0008,001D";}', [("unicode", "0000,0008,001D")])
def test_parse_single_unicodes(self):
# test both quoted and unquoted
self.run_test(b'{unicode = "0008";}', [("unicode", "0008")])
self.run_test(b"{unicode = ABCD;}", [("unicode", "ABCD")])
def test_parse_str_nan(self):
self.run_test(b"{mystr = nan;}", [("mystr", "nan")])
self.run_test(b"{mystr = NaN;}", [("mystr", "NaN")])
def test_dont_crash_on_string_that_looks_like_a_dict(self):
# https://github.com/googlei18n/glyphsLib/issues/238
self.run_test(b'{UUID0 = "{0.5, 0.5}";}', [("UUID0", "{0.5, 0.5}")])
def test_parse_dict_in_dict(self):
self.run_test(
b'{outer = {inner = "turtles";};}',
[("outer", OrderedDict([("inner", "turtles")]))],
)
def test_parse_hex_data(self):
self.run_test(b"{key = <48616c6c6f>;}", [("key", b"Hallo")])
def test_parse_stringy_floats(self):
self.run_test(b'{noodleThickness = "106.0";}', [("noodleThickness", "106.0")])
def test_parse_float_no_frac_as_int(self):
self.run_test(b"{noodleThickness = 106.0;}", [("noodleThickness", 106)])
def test_parse_float_as_float(self):
self.run_test(b"{noodleThickness = 106.1;}", [("noodleThickness", 106.1)])
class ParserGlyphTest(unittest.TestCase):
def test_parse_empty_glyphs(self):
# data = '({glyphname="A";})'
data = "({})"
parser = Parser(GSGlyph)
result = parser.parse(data)
self.assertEqual(len(result), 1)
glyph = result[0]
self.assertIsInstance(glyph, GSGlyph)
defaults_as_none = [
"category",
"color",
"lastChange",
"leftKerningGroup",
"leftMetricsKey",
"name",
"note",
"rightKerningGroup",
"rightMetricsKey",
"script",
"subCategory",
"unicode",
"widthMetricsKey",
]
for attr in defaults_as_none:
self.assertIsNone(getattr(glyph, attr))
self.assertIsNotNone(glyph.userData)
defaults_as_true = ["export"]
for attr in defaults_as_true:
self.assertTrue(getattr(glyph, attr))
def test_parse_glyphs(self):
data = GLYPH_DATA
parser = Parser(GSGlyph)
result = parser.parse(data)
glyph = result[0]
self.assertEqual(glyph.name, "A")
self.assertEqual(glyph.color, 5)
self.assertEqual(glyph.lastChange, datetime.datetime(2017, 4, 30, 13, 57, 4))
self.assertEqual(glyph.leftKerningGroup, "A")
self.assertEqual(glyph.rightKerningGroup, "A")
self.assertEqual(glyph.unicode, "0041")
def test_IntFloatCoordinates(self):
filename = os.path.join(os.path.dirname(__file__), "data/IntegerFloat.glyphs")
with open(filename) as f:
font = glyphsLib.load(f)
int_points_expected = [
(True, True),
(False, True),
(False, False),
(True, True),
(True, True),
(True, True),
]
assert isinstance(font.glyphs["a"].layers[0].width, int)
assert [
(isinstance(n.position.x, int), isinstance(n.position.y, int))
for n in font.glyphs["a"].layers[0].paths[0].nodes
] == int_points_expected
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
shail2810/nova | nova/api/openstack/compute/schemas/console_output.py | 110 | 1393 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_console_output = {
'type': 'object',
'properties': {
'os-getConsoleOutput': {
'type': 'object',
'properties': {
'length': {
'type': ['integer', 'string', 'null'],
'pattern': '^-?[0-9]+$',
# NOTE: -1 means an unlimited length.
# TODO(cyeoh): None also means unlimited length
# and is supported for v2 backwards compatibility
# Should remove in the future with a microversion
'minimum': -1,
},
},
'additionalProperties': False,
},
},
'required': ['os-getConsoleOutput'],
'additionalProperties': False,
}
| apache-2.0 |
googyanas/Googy-Max2-Kernel | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
zding5/Microblog-Flask | flask/lib/python2.7/site-packages/babel/localtime/__init__.py | 153 | 1730 | # -*- coding: utf-8 -*-
"""
babel.localtime
~~~~~~~~~~~~~~~
Babel specific fork of tzlocal to determine the local timezone
of the system.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import pytz
import time
from datetime import timedelta, datetime
from datetime import tzinfo
from threading import RLock
if sys.platform == 'win32':
from babel.localtime._win32 import _get_localzone
else:
from babel.localtime._unix import _get_localzone
_cached_tz = None
_cache_lock = RLock()
STDOFFSET = timedelta(seconds = -time.timezone)
if time.daylight:
DSTOFFSET = timedelta(seconds = -time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
ZERO = timedelta(0)
class _FallbackLocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
def get_localzone():
"""Returns the current underlying local timezone object.
Generally this function does not need to be used, it's a
better idea to use the :data:`LOCALTZ` singleton instead.
"""
return _get_localzone()
try:
LOCALTZ = get_localzone()
except pytz.UnknownTimeZoneError:
LOCALTZ = _FallbackLocalTimezone()
| mit |
scorpionis/docklet | client/venv/lib/python3.5/site-packages/pip/_vendor/distlib/index.py | 571 | 20976 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
self.rpc_proxy = None
with open(os.devnull, 'w') as sink:
for s in ('gpg2', 'gpg'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the acutal work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protcol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
if self.rpc_proxy is None:
self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
return self.rpc_proxy.search(terms, operator or 'and')
| bsd-3-clause |
kenshay/ImageScript | Script_Runner/PYTHON/Lib/unittest/test/support.py | 37 | 3752 | import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class _BaseLoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super().startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super().stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super().addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super().addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super().addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super().addUnexpectedSuccess(*args)
class LegacyLoggingResult(_BaseLoggingResult):
"""
A legacy TestResult implementation, without an addSubTest method,
which records its method calls.
"""
@property
def addSubTest(self):
raise AttributeError
class LoggingResult(_BaseLoggingResult):
"""
A TestResult implementation which records its method calls.
"""
def addSubTest(self, test, subtest, err):
if err is None:
self._events.append('addSubTestSuccess')
else:
self._events.append('addSubTestFailure')
super().addSubTest(test, subtest, err)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
| gpl-3.0 |
mj10777/QGIS | tests/src/python/test_qgsreport.py | 45 | 50038 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsReport
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '29/12/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsProject,
QgsLayout,
QgsReport,
QgsReportSectionLayout,
QgsReportSectionFieldGroup,
QgsVectorLayer,
QgsField,
QgsFeature,
QgsReadWriteContext,
QgsUnitTypes)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtXml import QDomDocument
start_app()
class TestQgsReport(unittest.TestCase):
def testGettersSetters(self):
p = QgsProject()
r = QgsReport(p)
self.assertEqual(r.layoutProject(), p)
self.assertEqual(r.project(), p)
r.setHeaderEnabled(True)
self.assertTrue(r.headerEnabled())
header = QgsLayout(p)
r.setHeader(header)
self.assertEqual(r.header(), header)
r.setFooterEnabled(True)
self.assertTrue(r.footerEnabled())
footer = QgsLayout(p)
r.setFooter(footer)
self.assertEqual(r.footer(), footer)
def testchildSections(self):
p = QgsProject()
r = QgsReport(p)
self.assertEqual(r.childCount(), 0)
self.assertEqual(r.childSections(), [])
self.assertIsNone(r.childSection(-1))
self.assertIsNone(r.childSection(1))
self.assertIsNone(r.childSection(0))
# try deleting non-existent childSections
r.removeChildAt(-1)
r.removeChildAt(0)
r.removeChildAt(100)
r.removeChild(None)
# append child
child1 = QgsReportSectionLayout()
self.assertIsNone(child1.project())
r.appendChild(child1)
self.assertEqual(r.childCount(), 1)
self.assertEqual(r.childSections(), [child1])
self.assertEqual(r.childSection(0), child1)
self.assertEqual(child1.parentSection(), r)
self.assertEqual(child1.row(), 0)
self.assertEqual(child1.project(), p)
child2 = QgsReportSectionLayout()
r.appendChild(child2)
self.assertEqual(r.childCount(), 2)
self.assertEqual(r.childSections(), [child1, child2])
self.assertEqual(r.childSection(1), child2)
self.assertEqual(child2.parentSection(), r)
self.assertEqual(child2.row(), 1)
def testInsertChild(self):
p = QgsProject()
r = QgsReport(p)
child1 = QgsReportSectionLayout()
r.insertChild(11, child1)
self.assertEqual(r.childCount(), 1)
self.assertEqual(r.childSections(), [child1])
self.assertEqual(child1.parentSection(), r)
self.assertEqual(child1.row(), 0)
child2 = QgsReportSectionLayout()
r.insertChild(-1, child2)
self.assertEqual(r.childCount(), 2)
self.assertEqual(r.childSections(), [child2, child1])
self.assertEqual(child2.parentSection(), r)
self.assertEqual(child2.row(), 0)
self.assertEqual(child1.row(), 1)
def testRemoveChild(self):
p = QgsProject()
r = QgsReport(p)
child1 = QgsReportSectionLayout()
r.appendChild(child1)
child2 = QgsReportSectionLayout()
r.appendChild(child2)
r.removeChildAt(-1)
r.removeChildAt(100)
r.removeChild(None)
self.assertEqual(r.childCount(), 2)
self.assertEqual(r.childSections(), [child1, child2])
r.removeChildAt(1)
self.assertEqual(r.childCount(), 1)
self.assertEqual(r.childSections(), [child1])
r.removeChild(child1)
self.assertEqual(r.childCount(), 0)
self.assertEqual(r.childSections(), [])
def testClone(self):
p = QgsProject()
r = QgsReport(p)
child1 = QgsReportSectionLayout()
child1.setHeaderEnabled(True)
r.appendChild(child1)
child2 = QgsReportSectionLayout()
child2.setFooterEnabled(True)
r.appendChild(child2)
cloned = r.clone()
self.assertEqual(cloned.childCount(), 2)
self.assertTrue(cloned.childSection(0).headerEnabled())
self.assertFalse(cloned.childSection(0).footerEnabled())
self.assertEqual(cloned.childSection(0).parentSection(), cloned)
self.assertFalse(cloned.childSection(1).headerEnabled())
self.assertTrue(cloned.childSection(1).footerEnabled())
self.assertEqual(cloned.childSection(1).parentSection(), cloned)
def testReportSectionLayout(self):
r = QgsReportSectionLayout()
p = QgsProject()
body = QgsLayout(p)
r.setBody(body)
self.assertEqual(r.body(), body)
def testIteration(self):
p = QgsProject()
r = QgsReport(p)
# empty report
self.assertTrue(r.beginRender())
self.assertFalse(r.next())
# add a header
r.setHeaderEnabled(True)
report_header = QgsLayout(p)
r.setHeader(report_header)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertFalse(r.next())
# add a footer
r.setFooterEnabled(True)
report_footer = QgsLayout(p)
r.setFooter(report_footer)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# add a child
child1 = QgsReportSectionLayout()
child1_body = QgsLayout(p)
child1.setBody(child1_body)
r.appendChild(child1)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# header and footer on child
child1_header = QgsLayout(p)
child1.setHeader(child1_header)
child1.setHeaderEnabled(True)
child1_footer = QgsLayout(p)
child1.setFooter(child1_footer)
child1.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# add another child
child2 = QgsReportSectionLayout()
child2_header = QgsLayout(p)
child2.setHeader(child2_header)
child2.setHeaderEnabled(True)
child2_footer = QgsLayout(p)
child2.setFooter(child2_footer)
child2.setFooterEnabled(True)
r.appendChild(child2)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# add a child to child2
child2a = QgsReportSectionLayout()
child2a_header = QgsLayout(p)
child2a.setHeader(child2a_header)
child2a.setHeaderEnabled(True)
child2a_footer = QgsLayout(p)
child2a.setFooter(child2a_footer)
child2a.setFooterEnabled(True)
child2.appendChild(child2a)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0001.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0002.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.filePath('/tmp/myreport', '.png'), '/tmp/myreport_0003.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'jpg'), '/tmp/myreport_0004.jpg')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0005.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2a_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0006.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2a_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0007.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0008.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0009.png')
self.assertFalse(r.next())
def testFieldGroup(self):
# create a layer
ptLayer = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)&field=town:string(20)", "points", "memory")
attributes = [
['Australia', 'QLD', 'Brisbane'],
['Australia', 'QLD', 'Emerald'],
['NZ', 'state1', 'town1'],
['Australia', 'VIC', 'Melbourne'],
['NZ', 'state1', 'town2'],
['Australia', 'QLD', 'Beerburrum'],
['Australia', 'VIC', 'Geelong'],
['NZ', 'state2', 'town2'],
['PNG', 'state1', 'town1'],
['Australia', 'NSW', 'Sydney']
]
pr = ptLayer.dataProvider()
for a in attributes:
f = QgsFeature()
f.initAttributes(3)
f.setAttribute(0, a[0])
f.setAttribute(1, a[1])
f.setAttribute(2, a[2])
self.assertTrue(pr.addFeature(f))
p = QgsProject()
r = QgsReport(p)
# add a child
child1 = QgsReportSectionFieldGroup()
child1_body = QgsLayout(p)
child1.setLayer(ptLayer)
child1.setBody(child1_body)
child1.setBodyEnabled(True)
child1.setField('country')
r.appendChild(child1)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# another group
# remove body from child1
child1.setBodyEnabled(False)
child2 = QgsReportSectionFieldGroup()
child2_body = QgsLayout(p)
child2.setLayer(ptLayer)
child2.setBody(child2_body)
child2.setBodyEnabled(True)
child2.setField('state')
child1.appendChild(child2)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# another group
# remove body from child1
child2.setBodyEnabled(False)
child3 = QgsReportSectionFieldGroup()
child3_body = QgsLayout(p)
child3.setLayer(ptLayer)
child3.setBody(child3_body)
child3.setBodyEnabled(True)
child3.setField('town')
child3.setSortAscending(False)
child2.appendChild(child3)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# add headers/footers
child3_header = QgsLayout(p)
child3.setHeader(child3_header)
child3.setHeaderEnabled(True)
child3_footer = QgsLayout(p)
child3.setFooter(child3_footer)
child3.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# header/footer for section2
child2_header = QgsLayout(p)
child2.setHeader(child2_header)
child2.setHeaderEnabled(True)
child2_footer = QgsLayout(p)
child2.setFooter(child2_footer)
child2.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertFalse(r.next())
# child 1 and report header/footer
child1_header = QgsLayout(p)
child1.setHeader(child1_header)
child1.setHeaderEnabled(True)
child1_footer = QgsLayout(p)
child1.setFooter(child1_footer)
child1.setFooterEnabled(True)
report_header = QgsLayout(p)
r.setHeader(report_header)
r.setHeaderEnabled(True)
report_footer = QgsLayout(p)
r.setFooter(report_footer)
r.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:1], ['Australia'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:1], ['PNG'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
def testFieldGroupSectionVisibility(self):
states = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)", "points", "memory")
p = QgsProject()
r = QgsReport(p)
# add a child
child1 = QgsReportSectionFieldGroup()
child1.setLayer(states)
child1.setField('country')
child1_header = QgsLayout(p)
child1.setHeader(child1_header)
child1.setHeaderEnabled(True)
child1_footer = QgsLayout(p)
child1.setFooter(child1_footer)
child1.setFooterEnabled(True)
r.appendChild(child1)
# check that no header was rendered when no features are found
self.assertTrue(r.beginRender())
self.assertFalse(r.next())
child1.setHeaderVisibility(QgsReportSectionFieldGroup.AlwaysInclude)
child1.setFooterVisibility(QgsReportSectionFieldGroup.AlwaysInclude)
# check that the header is included when no features are found
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
def testFieldGroupMultiLayer(self):
# create a layer
states = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)", "points", "memory")
attributes = [
['Australia', 'QLD'],
['NZ', 'state1'],
['Australia', 'VIC'],
['NZ', 'state2'],
['PNG', 'state3'],
['Australia', 'NSW']
]
pr = states.dataProvider()
for a in attributes:
f = QgsFeature()
f.initAttributes(2)
f.setAttribute(0, a[0])
f.setAttribute(1, a[1])
self.assertTrue(pr.addFeature(f))
places = QgsVectorLayer("Point?crs=epsg:4326&field=state:string(20)&field=town:string(20)", "points", "memory")
attributes = [
['QLD', 'Brisbane'],
['QLD', 'Emerald'],
['state1', 'town1'],
['VIC', 'Melbourne'],
['state1', 'town2'],
['QLD', 'Beerburrum'],
['VIC', 'Geelong'],
['state3', 'town1']
]
pr = places.dataProvider()
for a in attributes:
f = QgsFeature()
f.initAttributes(2)
f.setAttribute(0, a[0])
f.setAttribute(1, a[1])
self.assertTrue(pr.addFeature(f))
p = QgsProject()
r = QgsReport(p)
# add a child
child1 = QgsReportSectionFieldGroup()
child1_body = QgsLayout(p)
child1.setLayer(states)
child1.setBody(child1_body)
child1.setBodyEnabled(True)
child1.setField('country')
r.appendChild(child1)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertFalse(r.next())
# another group
# remove body from child1
child1.setBodyEnabled(False)
child2 = QgsReportSectionFieldGroup()
child2_body = QgsLayout(p)
child2.setLayer(states)
child2.setBody(child2_body)
child2.setBodyEnabled(True)
child2.setField('state')
child1.appendChild(child2)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertFalse(r.next())
# another group
child3 = QgsReportSectionFieldGroup()
child3_body = QgsLayout(p)
child3.setLayer(places)
child3.setBody(child3_body)
child3.setBodyEnabled(True)
child3.setField('town')
child3.setSortAscending(False)
child2.appendChild(child3)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertFalse(r.next())
# add headers/footers
child3_header = QgsLayout(p)
child3.setHeader(child3_header)
child3.setHeaderEnabled(True)
child3_footer = QgsLayout(p)
child3.setFooter(child3_footer)
child3.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertFalse(r.next())
def testReadWriteXml(self):
p = QgsProject()
ptLayer = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)&field=town:string(20)", "points", "memory")
p.addMapLayer(ptLayer)
r = QgsReport(p)
r.setName('my report')
# add a header
r.setHeaderEnabled(True)
report_header = QgsLayout(p)
report_header.setUnits(QgsUnitTypes.LayoutInches)
r.setHeader(report_header)
# add a footer
r.setFooterEnabled(True)
report_footer = QgsLayout(p)
report_footer.setUnits(QgsUnitTypes.LayoutMeters)
r.setFooter(report_footer)
# add some subsections
child1 = QgsReportSectionLayout()
child1_body = QgsLayout(p)
child1_body.setUnits(QgsUnitTypes.LayoutPoints)
child1.setBody(child1_body)
child2 = QgsReportSectionLayout()
child2_body = QgsLayout(p)
child2_body.setUnits(QgsUnitTypes.LayoutPixels)
child2.setBody(child2_body)
child1.appendChild(child2)
child2a = QgsReportSectionFieldGroup()
child2a_body = QgsLayout(p)
child2a_body.setUnits(QgsUnitTypes.LayoutInches)
child2a.setBody(child2a_body)
child2a.setField('my field')
child2a.setLayer(ptLayer)
child1.appendChild(child2a)
r.appendChild(child1)
doc = QDomDocument("testdoc")
elem = r.writeLayoutXml(doc, QgsReadWriteContext())
r2 = QgsReport(p)
self.assertTrue(r2.readLayoutXml(elem, doc, QgsReadWriteContext()))
self.assertEqual(r2.name(), 'my report')
self.assertTrue(r2.headerEnabled())
self.assertEqual(r2.header().units(), QgsUnitTypes.LayoutInches)
self.assertTrue(r2.footerEnabled())
self.assertEqual(r2.footer().units(), QgsUnitTypes.LayoutMeters)
self.assertEqual(r2.childCount(), 1)
self.assertEqual(r2.childSection(0).body().units(), QgsUnitTypes.LayoutPoints)
self.assertEqual(r2.childSection(0).childCount(), 2)
self.assertEqual(r2.childSection(0).childSection(0).body().units(), QgsUnitTypes.LayoutPixels)
self.assertEqual(r2.childSection(0).childSection(1).body().units(), QgsUnitTypes.LayoutInches)
self.assertEqual(r2.childSection(0).childSection(1).field(), 'my field')
self.assertEqual(r2.childSection(0).childSection(1).layer(), ptLayer)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ewels/MultiQC_OSXApp | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| mit |
tylertian/Openstack | openstack F/swift/swift/account/auditor.py | 3 | 5085 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from random import random
import swift.common.db
from swift.account import server as account_server
from swift.common.db import AccountBroker
from swift.common.utils import get_logger, audit_location_generator, \
TRUE_VALUES, dump_recon_cache
from swift.common.daemon import Daemon
from eventlet import Timeout
class AccountAuditor(Daemon):
"""Audit accounts."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='account-auditor')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.interval = int(conf.get('interval', 1800))
self.account_passes = 0
self.account_failures = 0
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "account.recon")
def _one_audit_pass(self, reported):
all_locs = audit_location_generator(self.devices,
account_server.DATADIR, mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.account_audit(path)
if time.time() - reported >= 3600: # once an hour
self.logger.info(_('Since %(time)s: Account audits: '
'%(passed)s passed audit, %(failed)s failed audit'),
{'time': time.ctime(reported),
'passed': self.account_passes,
'failed': self.account_failures})
self.account_audit(path)
dump_recon_cache({'account_audits_since': reported,
'account_audits_passed': self.account_passes,
'account_audits_failed':
self.account_failures},
self.rcache, self.logger)
reported = time.time()
self.account_passes = 0
self.account_failures = 0
return reported
def run_forever(self, *args, **kwargs):
"""Run the account audit until stopped."""
reported = time.time()
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin account audit pass.'))
begin = time.time()
try:
reported = self._one_audit_pass(reported)
except (Exception, Timeout):
self.logger.increment('errors')
self.logger.exception(_('ERROR auditing'))
elapsed = time.time() - begin
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
self.logger.info(
_('Account audit pass completed: %.02fs'), elapsed)
dump_recon_cache({'account_auditor_pass_completed': elapsed},
self.rcache, self.logger)
def run_once(self, *args, **kwargs):
"""Run the account audit once."""
self.logger.info(_('Begin account audit "once" mode'))
begin = reported = time.time()
self._one_audit_pass(reported)
elapsed = time.time() - begin
self.logger.info(
_('Account audit "once" mode completed: %.02fs'), elapsed)
dump_recon_cache({'account_auditor_pass_completed': elapsed},
self.rcache, self.logger)
def account_audit(self, path):
"""
Audits the given account path
:param path: the path to an account db
"""
start_time = time.time()
try:
if not path.endswith('.db'):
return
broker = AccountBroker(path)
if not broker.is_deleted():
info = broker.get_info()
self.logger.increment('passes')
self.account_passes += 1
self.logger.debug(_('Audit passed for %s') % broker.db_file)
except (Exception, Timeout):
self.logger.increment('failures')
self.account_failures += 1
self.logger.exception(_('ERROR Could not get account info %s'),
(broker.db_file))
self.logger.timing_since('timing', start_time)
| apache-2.0 |
rosswhitfield/mantid | Testing/SystemTests/tests/framework/HB3AWorkflowTests.py | 3 | 18874 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,too-few-public-methods
import numpy as np
import systemtesting
from mantid.simpleapi import (HB3AAdjustSampleNorm, HB3AFindPeaks,
HB3AIntegratePeaks, HB3APredictPeaks,
mtd, DeleteWorkspaces,
CreateMDWorkspace, LoadEmptyInstrument,
AddTimeSeriesLog, SetUB,
FakeMDEventData, FindPeaksMD,
ShowPossibleCells, FindUBUsingFFT,
OptimizeLatticeForCellType,
SelectCellOfType, HasUB)
from mantid.kernel import V3D
from mantid.geometry import OrientedLattice, SpaceGroupFactory
class SingleFileFindPeaksIntegrate(systemtesting.MantidSystemTest):
def runTest(self):
ws_name = 'SingleFileFindPeaksIntegrate'
HB3AAdjustSampleNorm(Filename="HB3A_exp0724_scan0183.nxs",
NormaliseBy='None',
OutputWorkspace=ws_name+'_data')
HB3AFindPeaks(InputWorkspace=ws_name+'_data',
CellType="Orthorhombic",
Centering="F",
OutputWorkspace=ws_name+'_peaks')
HB3AIntegratePeaks(InputWorkspace=ws_name+'_data',
PeaksWorkspace=ws_name+'_peaks',
PeakRadius=0.25,
OutputWorkspace=ws_name+'_integrated')
peaks = mtd[ws_name+'_integrated']
self.assertEqual(peaks.getNumberPeaks(), 15)
ol = peaks.sample().getOrientedLattice()
self.assertDelta(ol.a(), 5.231258554, 1.e-7)
self.assertDelta(ol.b(), 5.257701834, 1.e-7)
self.assertDelta(ol.c(), 19.67041036, 1.e-7)
self.assertEqual(ol.alpha(), 90)
self.assertEqual(ol.beta(), 90)
self.assertEqual(ol.gamma(), 90)
DeleteWorkspaces([ws_name+'_data',
ws_name+'_peaks',
ws_name+'_integrated'])
class SingleFilePredictPeaksIntegrate(systemtesting.MantidSystemTest):
def runTest(self):
ws_name = 'SingleFilePredictPeaksIntegrate'
HB3AAdjustSampleNorm(Filename="HB3A_exp0724_scan0183.nxs",
NormaliseBy='None',
OutputWorkspace=ws_name+'_data')
HB3APredictPeaks(InputWorkspace=ws_name+"_data",
OutputWorkspace=ws_name+"_peaks")
HB3AIntegratePeaks(InputWorkspace=ws_name+'_data',
PeaksWorkspace=ws_name+'_peaks',
PeakRadius=0.25,
OutputWorkspace=ws_name+'_integrated')
peaks = mtd[ws_name+'_integrated']
self.assertEqual(peaks.getNumberPeaks(), 112)
ol = peaks.sample().getOrientedLattice()
self.assertDelta(ol.a(), 5.238389802, 1.e-7)
self.assertDelta(ol.b(), 5.238415883, 1.e-7)
self.assertDelta(ol.c(), 19.65194598, 1.e-7)
self.assertDelta(ol.alpha(), 89.9997207, 1.e-7)
self.assertDelta(ol.beta(), 89.9997934, 1.e-7)
self.assertDelta(ol.gamma(), 89.9999396, 1.e-7)
DeleteWorkspaces([ws_name+'_data',
ws_name+'_peaks',
ws_name+'_integrated'])
class SingleFilePredictPeaksUBFromFindPeaksIntegrate(systemtesting.MantidSystemTest):
def runTest(self):
ws_name = 'SingleFilePredictPeaksUBFromFindPeaksIntegrate'
HB3AAdjustSampleNorm(Filename="HB3A_exp0724_scan0183.nxs",
NormaliseBy='None',
OutputWorkspace=ws_name+'_data')
HB3AFindPeaks(InputWorkspace=ws_name+'_data',
CellType="Orthorhombic",
Centering="F",
OutputWorkspace=ws_name+'_found_peaks')
HB3APredictPeaks(InputWorkspace=ws_name+"_data",
UBWorkspace=ws_name+'_found_peaks',
OutputWorkspace=ws_name+'_peaks')
HB3AIntegratePeaks(InputWorkspace=ws_name+'_data',
PeaksWorkspace=ws_name+'_peaks',
PeakRadius=0.25,
OutputWorkspace=ws_name+'_integrated')
peaks = mtd[ws_name+'_integrated']
self.assertEqual(peaks.getNumberPeaks(), 114)
ol = peaks.sample().getOrientedLattice()
self.assertDelta(ol.a(), 5.231258554, 1.e-7)
self.assertDelta(ol.b(), 5.257701834, 1.e-7)
self.assertDelta(ol.c(), 19.67041036, 1.e-7)
self.assertEqual(ol.alpha(), 90)
self.assertEqual(ol.beta(), 90)
self.assertEqual(ol.gamma(), 90)
DeleteWorkspaces([ws_name+'_data',
ws_name+'_found_peaks',
ws_name+'_peaks',
ws_name+'_integrated'])
class MultiFileFindPeaksIntegrate(systemtesting.MantidSystemTest):
def runTest(self):
ws_name = 'MultiFileFindPeaksIntegrate'
HB3AAdjustSampleNorm(Filename="HB3A_exp0724_scan0182.nxs, HB3A_exp0724_scan0183.nxs",
NormaliseBy='None',
OutputWorkspace=ws_name+'_data')
HB3AFindPeaks(InputWorkspace=ws_name+'_data',
CellType="Orthorhombic",
Centering="F",
OutputWorkspace=ws_name+'_peaks')
peaks = mtd[ws_name+'_peaks']
self.assertEqual(peaks.getNumberOfEntries(), 2)
HB3AIntegratePeaks(InputWorkspace=ws_name+'_data',
PeaksWorkspace=ws_name+'_peaks',
PeakRadius=0.25,
OutputWorkspace=ws_name+'_integrated')
integrated = mtd[ws_name+'_integrated']
self.assertEqual(peaks.getItem(0).getNumberPeaks() + peaks.getItem(1).getNumberPeaks(),
integrated.getNumberPeaks())
ol = integrated.sample().getOrientedLattice()
self.assertDelta(ol.a(), 5.261051697, 1.e-7)
self.assertDelta(ol.b(), 5.224167511, 1.e-7)
self.assertDelta(ol.c(), 19.689643636, 1.e-7)
self.assertEqual(ol.alpha(), 90)
self.assertEqual(ol.beta(), 90)
self.assertEqual(ol.gamma(), 90)
peak0 = integrated.getPeak(0)
self.assertEqual(peak0.getH(), 0)
self.assertEqual(peak0.getK(), 0)
self.assertEqual(peak0.getL(), 10)
self.assertEqual(peak0.getRunNumber(), 182)
self.assertAlmostEqual(peak0.getWavelength(), 1.008, places=5)
self.assertAlmostEqual(peak0.getIntensity(), 6581.5580392, places=5)
self.assertEqual(peak0.getBinCount(), 827)
peak1 = integrated.getPeak(integrated.getNumberPeaks()-1)
self.assertEqual(peak1.getH(), 0)
self.assertEqual(peak1.getK(), 4)
self.assertEqual(peak1.getL(), 0)
self.assertEqual(peak1.getRunNumber(), 183)
self.assertAlmostEqual(peak1.getWavelength(), 1.008, places=5)
self.assertAlmostEqual(peak1.getIntensity(), 11853.7538139, places=5)
self.assertEqual(peak1.getBinCount(), 134)
DeleteWorkspaces([ws_name+'_data',
ws_name+'_peaks',
ws_name+'_integrated'])
class MultiFilePredictPeaksIntegrate(systemtesting.MantidSystemTest):
def runTest(self):
ws_name = 'MultiFilePredictPeaksIntegrate'
HB3AAdjustSampleNorm(Filename="HB3A_exp0724_scan0182.nxs, HB3A_exp0724_scan0183.nxs",
NormaliseBy='None',
OutputWorkspace=ws_name+'_data')
HB3APredictPeaks(InputWorkspace=ws_name+"_data",
OutputWorkspace=ws_name+"_peaks")
peaks = mtd[ws_name+"_peaks"]
self.assertEqual(peaks.getNumberOfEntries(), 2)
HB3AIntegratePeaks(InputWorkspace=ws_name+'_data',
PeaksWorkspace=ws_name+'_peaks',
PeakRadius=0.25,
OutputWorkspace=ws_name+'_integrated')
integrated = mtd[ws_name+'_integrated']
self.assertEqual(peaks.getItem(0).getNumberPeaks() + peaks.getItem(1).getNumberPeaks(),
integrated.getNumberPeaks())
# Should be the same as the UB from the data
ol = integrated.sample().getOrientedLattice()
self.assertDelta(ol.a(), 5.238389802, 1.e-7)
self.assertDelta(ol.b(), 5.238415883, 1.e-7)
self.assertDelta(ol.c(), 19.65194598, 1.e-7)
self.assertDelta(ol.alpha(), 89.9997207, 1.e-7)
self.assertDelta(ol.beta(), 89.9997934, 1.e-7)
self.assertDelta(ol.gamma(), 89.9999396, 1.e-7)
peak0 = integrated.getPeak(0)
self.assertEqual(peak0.getH(), 0)
self.assertEqual(peak0.getK(), 0)
self.assertEqual(peak0.getL(), -11)
self.assertEqual(peak0.getRunNumber(), 182)
self.assertAlmostEqual(peak0.getWavelength(), 1.008, places=5)
self.assertAlmostEqual(peak0.getIntensity(), 127.20005, places=5)
self.assertEqual(peak0.getBinCount(), 0)
peak1 = integrated.getPeak(integrated.getNumberPeaks()-1)
self.assertEqual(peak1.getH(), 5)
self.assertEqual(peak1.getK(), 0)
self.assertEqual(peak1.getL(), 1)
self.assertEqual(peak1.getRunNumber(), 183)
self.assertAlmostEqual(peak1.getWavelength(), 1.008, places=5)
self.assertAlmostEqual(peak1.getIntensity(), 66.0945249, places=5)
self.assertEqual(peak1.getBinCount(), 0)
DeleteWorkspaces([ws_name+'_data',
ws_name+'_peaks',
ws_name+'_integrated'])
class MultiFilePredictPeaksUBFromFindPeaksIntegrate(systemtesting.MantidSystemTest):
def runTest(self):
ws_name = 'MultiFilePredictPeaksUBFromFindPeaksIntegrate'
HB3AAdjustSampleNorm(Filename="HB3A_exp0724_scan0182.nxs, HB3A_exp0724_scan0183.nxs",
NormaliseBy='None',
OutputWorkspace=ws_name+'_data')
HB3AFindPeaks(InputWorkspace=ws_name+'_data',
CellType="Orthorhombic",
Centering="F",
OutputWorkspace=ws_name+'_found_peaks')
found_peaks = mtd[ws_name+"_found_peaks"]
self.assertEqual(found_peaks.getNumberOfEntries(), 2)
self.assertEqual(found_peaks.getItem(0).getNumberPeaks(), 15)
self.assertEqual(found_peaks.getItem(1).getNumberPeaks(), 15)
HB3APredictPeaks(InputWorkspace=ws_name+"_data",
UBWorkspace=ws_name+'_found_peaks',
OutputWorkspace=ws_name+"_peaks")
peaks = mtd[ws_name+"_peaks"]
self.assertEqual(peaks.getItem(0).getNumberPeaks(), 78)
self.assertEqual(peaks.getItem(1).getNumberPeaks(), 113)
HB3AIntegratePeaks(InputWorkspace=ws_name+'_data',
PeaksWorkspace=ws_name+'_peaks',
PeakRadius=0.25,
OutputWorkspace=ws_name+'_integrated')
integrated = mtd[ws_name+'_integrated']
self.assertEqual(integrated.getNumberPeaks(), 191)
self.assertEqual(peaks.getItem(0).getNumberPeaks() + peaks.getItem(1).getNumberPeaks(),
integrated.getNumberPeaks())
# should be the same as from MultiFileFindPeaksIntegrate test
ol = integrated.sample().getOrientedLattice()
self.assertDelta(ol.a(), 5.261051697, 1.e-7)
self.assertDelta(ol.b(), 5.224167511, 1.e-7)
self.assertDelta(ol.c(), 19.689643636, 1.e-7)
self.assertEqual(ol.alpha(), 90)
self.assertEqual(ol.beta(), 90)
self.assertEqual(ol.gamma(), 90)
peak0 = integrated.getPeak(0)
self.assertEqual(peak0.getH(), -1)
self.assertEqual(peak0.getK(), 1)
self.assertEqual(peak0.getL(), 8)
self.assertEqual(peak0.getRunNumber(), 182)
self.assertAlmostEqual(peak0.getWavelength(), 1.008, places=5)
self.assertAlmostEqual(peak0.getIntensity(), 124.408590, places=5)
self.assertEqual(peak0.getBinCount(), 0)
peak1 = integrated.getPeak(integrated.getNumberPeaks()-1)
self.assertEqual(peak1.getH(), 3)
self.assertEqual(peak1.getK(), 3)
self.assertEqual(peak1.getL(), 3)
self.assertEqual(peak1.getRunNumber(), 183)
self.assertAlmostEqual(peak1.getWavelength(), 1.008, places=5)
self.assertAlmostEqual(peak1.getIntensity(), 101.3689957, places=5)
self.assertEqual(peak1.getBinCount(), 0)
DeleteWorkspaces([ws_name+'_data',
ws_name+'_found_peaks',
ws_name+'_peaks',
ws_name+'_integrated'])
class SatellitePeaksFakeData(systemtesting.MantidSystemTest):
def runTest(self):
# Na Mn Cl3
# R -3 H (148)
# 6.592 6.592 18.585177 90 90 120
# UB/wavelength from /HFIR/HB3A/IPTS-25470/shared/autoreduce/HB3A_exp0769_scan0040.nxs
ub = np.array([[ 1.20297e-01, 1.70416e-01, 1.43000e-04],
[ 8.16000e-04, -8.16000e-04, 5.38040e-02],
[ 1.27324e-01, -4.05110e-02, -4.81000e-04]])
wavelength = 1.553
# create fake MDEventWorkspace, similar to what is expected from exp769 after loading with HB3AAdjustSampleNorm
MD_Q_sample = CreateMDWorkspace(Dimensions='3', Extents='-5,5,-5,5,-5,5',
Names='Q_sample_x,Q_sample_y,Q_sample_z',
Units='rlu,rlu,rlu',
Frames='QSample,QSample,QSample')
inst = LoadEmptyInstrument(InstrumentName='HB3A')
AddTimeSeriesLog(inst, 'omega', '2010-01-01T00:00:00', 0.)
AddTimeSeriesLog(inst, 'phi', '2010-01-01T00:00:00', 0.)
AddTimeSeriesLog(inst, 'chi', '2010-01-01T00:00:00', 0.)
MD_Q_sample.addExperimentInfo(inst)
SetUB(MD_Q_sample, UB=ub)
ol=OrientedLattice()
ol.setUB(ub)
sg = SpaceGroupFactory.createSpaceGroup("R -3")
hkl= []
sat_hkl = []
for h in range(0, 6):
for k in range(0, 6):
for l in range(0, 11):
if sg.isAllowedReflection([h,k,l]):
if h == k == l == 0:
continue
q = V3D(h, k, l)
q_sample = ol.qFromHKL(q)
if not np.any(np.array(q_sample) > 5):
hkl.append(q)
FakeMDEventData(MD_Q_sample, PeakParams='1000,{},{},{},0.05'.format(*q_sample))
# satellite peaks at 0,0,+1.5
q = V3D(h, k, l+1.5)
q_sample = ol.qFromHKL(q)
if not np.any(np.array(q_sample) > 5):
sat_hkl.append(q)
FakeMDEventData(MD_Q_sample, PeakParams='100,{},{},{},0.02'.format(*q_sample))
# satellite peaks at 0,0,-1.5
q = V3D(h, k, l-1.5)
q_sample = ol.qFromHKL(q)
if not np.any(np.array(q_sample) > 5):
sat_hkl.append(q)
FakeMDEventData(MD_Q_sample, PeakParams='100,{},{},{},0.02'.format(*q_sample))
# Check that this fake workpsace gives us the expected UB
peaks = FindPeaksMD(MD_Q_sample, PeakDistanceThreshold=1, OutputType='LeanElasticPeak')
FindUBUsingFFT(peaks, MinD=5, MaxD=20)
ShowPossibleCells(peaks)
SelectCellOfType(peaks, CellType='Rhombohedral', Centering='R', Apply=True)
OptimizeLatticeForCellType(peaks, CellType='Hexagonal', Apply=True)
found_ol = peaks.sample().getOrientedLattice()
self.assertAlmostEqual(found_ol.a(), 6.592, places=2)
self.assertAlmostEqual(found_ol.b(), 6.592, places=2)
self.assertAlmostEqual(found_ol.c(), 18.585177, places=2)
self.assertAlmostEqual(found_ol.alpha(), 90)
self.assertAlmostEqual(found_ol.beta(), 90)
self.assertAlmostEqual(found_ol.gamma(), 120)
# nuclear peaks
predict = HB3APredictPeaks(MD_Q_sample, Wavelength=wavelength,
ReflectionCondition='Rhombohedrally centred, obverse',
SatellitePeaks=True, IncludeIntegerHKL=True)
predict = HB3AIntegratePeaks(MD_Q_sample, predict, 0.25)
self.assertEqual(predict.getNumberPeaks(), 66)
# check that the found peaks are expected
for n in range(predict.getNumberPeaks()):
HKL = predict.getPeak(n).getHKL()
self.assertTrue(HKL in hkl, msg=f"Peak {n} with HKL={HKL}")
# magnetic peaks
satellites = HB3APredictPeaks(MD_Q_sample, Wavelength=wavelength,
ReflectionCondition='Rhombohedrally centred, obverse',
SatellitePeaks=True,
ModVector1='0,0,1.5',
MaxOrder=1,
IncludeIntegerHKL=False)
satellites = HB3AIntegratePeaks(MD_Q_sample, satellites, 0.1)
self.assertEqual(satellites.getNumberPeaks(), 80)
# check that the found peaks are expected
for n in range(satellites.getNumberPeaks()):
HKL = satellites.getPeak(n).getHKL()
self.assertTrue(HKL in sat_hkl, msg=f"Peak {n} with HKL={HKL}")
class HB3AFindPeaksTest(systemtesting.MantidSystemTest):
# test moved from unittests because of large memory usage
def runTest(self):
# Test with vanadium normalization to make sure UB matrix and peaks can still be found
norm = HB3AAdjustSampleNorm(Filename="HB3A_exp0724_scan0182.nxs",
VanadiumFile="HB3A_exp0722_scan0220.nxs",
NormaliseBy='None')
peaks = HB3AFindPeaks(InputWorkspace=norm,
CellType="Orthorhombic",
Centering="F",
PeakDistanceThreshold=0.25,
Wavelength=1.008)
# Verify UB and peaks were found
self.assertTrue(HasUB(peaks))
self.assertGreater(peaks.getNumberPeaks(), 0)
| gpl-3.0 |
iradul/qtwebkit | Tools/Scripts/webkitpy/tool/bot/sheriff.py | 124 | 4862 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.config import urls
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.grammar import join_with_separators
class Sheriff(object):
def __init__(self, tool, sheriffbot):
self._tool = tool
self._sheriffbot = sheriffbot
def name(self):
return self._sheriffbot.name
def responsible_nicknames_from_commit_info(self, commit_info):
nestedList = [party.irc_nicknames for party in commit_info.responsible_parties() if party.irc_nicknames]
return reduce(lambda list, childList: list + childList, nestedList)
def post_irc_warning(self, commit_info, builders):
irc_nicknames = sorted(self.responsible_nicknames_from_commit_info(commit_info))
irc_prefix = ": " if irc_nicknames else ""
irc_message = "%s%s%s might have broken %s" % (
", ".join(irc_nicknames),
irc_prefix,
urls.view_revision_url(commit_info.revision()),
join_with_separators([builder.name() for builder in builders]))
self._tool.irc().post(irc_message)
def post_irc_summary(self, failure_map):
failing_tests = failure_map.failing_tests()
if not failing_tests:
return
test_list_limit = 5
irc_message = "New failures: %s" % ", ".join(sorted(failing_tests)[:test_list_limit])
failure_count = len(failing_tests)
if failure_count > test_list_limit:
irc_message += " (and %s more...)" % (failure_count - test_list_limit)
self._tool.irc().post(irc_message)
def post_rollout_patch(self, svn_revision_list, rollout_reason):
# Ensure that svn revisions are numbers (and not options to
# create-rollout).
try:
svn_revisions = " ".join([str(int(revision)) for revision in svn_revision_list])
except:
raise ScriptError(message="Invalid svn revision number \"%s\"."
% " ".join(svn_revision_list))
if rollout_reason.startswith("-"):
raise ScriptError(message="The rollout reason may not begin "
"with - (\"%s\")." % rollout_reason)
output = self._sheriffbot.run_webkit_patch([
"create-rollout",
"--force-clean",
# In principle, we should pass --non-interactive here, but it
# turns out that create-rollout doesn't need it yet. We can't
# pass it prophylactically because we reject unrecognized command
# line switches.
"--parent-command=sheriff-bot",
svn_revisions,
rollout_reason,
])
return urls.parse_bug_id(output)
def post_blame_comment_on_bug(self, commit_info, builders, tests):
if not commit_info.bug_id():
return
comment = "%s might have broken %s" % (
urls.view_revision_url(commit_info.revision()),
join_with_separators([builder.name() for builder in builders]))
if tests:
comment += "\nThe following tests are not passing:\n"
comment += "\n".join(tests)
self._tool.bugs.post_comment_to_bug(commit_info.bug_id(),
comment,
cc=self._sheriffbot.watchers)
| gpl-2.0 |
alexcigun/KPK2016teachers | A-gun/A gun initial steps 23 30.py | 1 | 3026 | from tkinter import *
from random import choice, randint as rnd
import time
from math import sin, cos, pi
class Gun:
def __init__(self,canvas,width=30,x=100,y=500):
self.canvas = canvas
self.x=x
self.y=y
self.w=width
self.id = canvas.create_line(30,570,self.x,self.y,width=self.w,fill='black')
class Ball:
def __init__(self,canvas,r,color,x=100,y=300):
self.canvas = canvas
self.x=x
self.y=y
self.r=r
self.color=color
self.id = canvas.create_oval(self.x-r,self.y-r,self.x+r,self.y+r, fill=self.color)
class Target:
def __init__(self,canvas):
self.canvas = canvas
self.x = rnd(700,1000-100)
self.y = rnd(100,500)
self.r = rnd(10,50)
self.id = canvas.create_oval(self.x-self.r,self.y-self.r,self.x+self.r,self.y+self.r, fill='grey')
def snaryad(tetta=45,v0=100):
# задаёмся переменными
x0=100 # координата х
# tetta=60 # начальный угол
g=9.81 # ускорение свободного падения (м/с**2), можно подставить значение и для Луны
# v0=100 # начальная скорость (м/с), типовая скорость снаряда
y0=500 # начальная вертикальная координата
# сразу задаёмся радианами
rad=180/pi
tetta_r=tetta/rad # вместо градусов получили количество радиан
a=Ball(canvas,15,'red',x0,y0)
t=0
x=50
while x<1000+a.r:
v0x=v0*cos(tetta_r)
v0y=v0*sin(tetta_r)
sx=v0x*t
sy=-v0y*t+(g*t**2)/2
x=x0+sx
y=y0+sy
# --- Сравнение x y снаряда --- с коор каждой оставшейся мишени
# ---------------------------
obj=a.id
try:
canvas.delete(obj)
col='red'
a = Ball(canvas,15,col,x,y)
tk.update_idletasks()
tk.update()
except:
pass
t+=0.5
time.sleep(0.1)
def babah(event):
global tetta,v0
snaryad(tetta,v0)
if __name__=='__main__':
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=1000, height=600, bd=0, highlightthickness=0)
canvas.pack()
# Рисуем пушку
our_gun=Gun(canvas)
# Расставляем случайно мишени
targets_count=rnd(2,6)
targets=[]
for i in range(targets_count):
t=Target(canvas)
targets.append(t)
tk.update()
v0=100 # начальная скорость (м/с), типовая скорость снаряда
tetta=60 # начальный угол
# Запускаем снаряд
tk.bind('<space>',babah)
| gpl-3.0 |
solvcon/solvcon | solvcon/parcel/linear/velstress/material.py | 2 | 17175 | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2013, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the SOLVCON nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Material definition.
"""
import numpy as np
from solvcon import gendata
#: Registry singleton.
mltregy = gendata.TypeNameRegistry()
class MaterialMeta(type):
"""
Meta class for material class.
"""
def __new__(cls, name, bases, namespace):
newcls = super(MaterialMeta, cls).__new__(cls, name, bases, namespace)
# register.
mltregy.register(newcls)
return newcls
class Material(metaclass=MaterialMeta):
"""Material properties. The constitutive relation needs not be symmetric.
"""
#: :py:class:`list` of :py:class:`tuple` for indices where the content
#: should be zero.
_zeropoints_ = []
K = np.array([ [
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0],
], [
[0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
], [
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
], ], dtype='float64')
def __init__(self, rho=None, al=None, be=None, ga=None, **kw):
assert None is not rho
assert None is not al
assert None is not be
assert None is not ga
#: Density.
self.rho = rho
#: Alpha angle.
self.al = al
#: Beta angle.
self.be = be
#: Gamma angle.
self.ga = ga
# set stiffness matrix.
origstiff = np.empty((6,6), dtype='float64')
origstiff.fill(0.0)
for key in kw.keys(): # becaues I pop out the key.
if len(key) == 4 and key[:2] == 'co':
try:
i = int(key[2])-1
j = int(key[3])-1
except:
continue
assert i < origstiff.shape[0]
assert j < origstiff.shape[1]
val = kw.pop(key)
origstiff[i,j] = val
#: Stiffness matrix in the crystal coordinate.
self.origstiff = origstiff
# check for zeros.
self._check_origstiffzero(self.origstiff)
# compute the stiffness matrix in the transformed global coordinate
# system.
bondmat = self.get_bondmat()
#: Stiffness matrix in the transformed global coordinate.
self.stiff = np.dot(bondmat, np.dot(self.origstiff, bondmat.T))
super(Material, self).__init__(**kw)
def __getattr__(self, key):
if len(key) == 4 and key[:2] == 'co':
i = int(key[2])
j = int(key[3])
if 1 <= i <= 6 and 1 <= j <= 6:
return self.origstiff[i-1,j-1]
elif len(key) == 3 and key[0] == 'c':
i = int(key[1])
j = int(key[2])
if 1 <= i <= 6 and 1 <= j <= 6:
return self.stiff[i-1,j-1]
else:
raise AttributeError
def __str__(self):
return '[%s: al=%.2f be=%.2f ga=%.2f (deg)]' % (self.__class__.__name__,
self.al/(np.pi/180), self.be/(np.pi/180), self.ga/(np.pi/180))
@classmethod
def _check_origstiffzero(cls, origstiff):
"""
Check for zero in original stiffness matrix.
@note: no assumed symmetry.
"""
for i, j in cls._zeropoints_:
assert origstiff[i,j] == 0.0
def get_rotmat(self):
"""
Coordinate transformation matrix for three successive rotations through
the Euler angles.
@return: the transformation matrix.
@rtype: numpy.ndarray
"""
al = self.al; be = self.be; ga = self.ga
almat = np.array([
[np.cos(al), np.sin(al), 0],
[-np.sin(al), np.cos(al), 0],
[0, 0, 1],
], dtype='float64')
bemat = np.array([
[1, 0, 0],
[0, np.cos(be), np.sin(be)],
[0, -np.sin(be), np.cos(be)],
], dtype='float64')
gamat = np.array([
[np.cos(ga), np.sin(ga), 0],
[-np.sin(ga), np.cos(ga), 0],
[0, 0, 1],
], dtype='float64')
return np.dot(gamat, np.dot(bemat, almat))
def get_bondmat(self):
"""
The Bond's matrix M as a shorthand of coordinate transformation for the
6-component stress vector.
@return: the Bond's matrix.
@rtype: numpy.ndarray
"""
rotmat = self.get_rotmat()
bond = np.empty((6,6), dtype='float64')
# upper left.
bond[:3,:3] = rotmat[:,:]**2
# upper right.
bond[0,3] = 2*rotmat[0,1]*rotmat[0,2]
bond[0,4] = 2*rotmat[0,2]*rotmat[0,0]
bond[0,5] = 2*rotmat[0,0]*rotmat[0,1]
bond[1,3] = 2*rotmat[1,1]*rotmat[1,2]
bond[1,4] = 2*rotmat[1,2]*rotmat[1,0]
bond[1,5] = 2*rotmat[1,0]*rotmat[1,1]
bond[2,3] = 2*rotmat[2,1]*rotmat[2,2]
bond[2,4] = 2*rotmat[2,2]*rotmat[2,0]
bond[2,5] = 2*rotmat[2,0]*rotmat[2,1]
# lower left.
bond[3,0] = rotmat[1,0]*rotmat[2,0]
bond[3,1] = rotmat[1,1]*rotmat[2,1]
bond[3,2] = rotmat[1,2]*rotmat[2,2]
bond[4,0] = rotmat[2,0]*rotmat[0,0]
bond[4,1] = rotmat[2,1]*rotmat[0,1]
bond[4,2] = rotmat[2,2]*rotmat[0,2]
bond[5,0] = rotmat[0,0]*rotmat[1,0]
bond[5,1] = rotmat[0,1]*rotmat[1,1]
bond[5,2] = rotmat[0,2]*rotmat[1,2]
# lower right.
bond[3,3] = rotmat[1,1]*rotmat[2,2] + rotmat[1,2]*rotmat[2,1]
bond[3,4] = rotmat[1,0]*rotmat[2,2] + rotmat[1,2]*rotmat[2,0]
bond[3,5] = rotmat[1,1]*rotmat[2,0] + rotmat[1,0]*rotmat[2,1]
bond[4,3] = rotmat[0,1]*rotmat[2,2] + rotmat[0,2]*rotmat[2,1]
bond[4,4] = rotmat[0,0]*rotmat[2,2] + rotmat[0,2]*rotmat[2,0]
bond[4,5] = rotmat[0,1]*rotmat[2,0] + rotmat[0,0]*rotmat[2,1]
bond[5,3] = rotmat[0,1]*rotmat[1,2] + rotmat[0,2]*rotmat[1,1]
bond[5,4] = rotmat[0,0]*rotmat[1,2] + rotmat[0,2]*rotmat[1,0]
bond[5,5] = rotmat[0,1]*rotmat[1,0] + rotmat[0,0]*rotmat[1,1]
return bond
def get_jacos(self):
"""
Obtain the Jacobian matrices for the solid.
@param K: the K matrix.
@type K: numpy.ndarray
@return: the Jacobian matrices
@rtype: numpy.ndarray
"""
rho = self.rho
sf = self.stiff
jacos = np.zeros((3,9,9), dtype='float64')
for idm in range(3):
K = self.K[idm]
jaco = jacos[idm]
jaco[:3,3:] = K/(-rho) # the upper right submatrix.
jaco[3:,:3] = -np.dot(sf, K.T) # the lower left submatrix.
return jacos
################################################################################
# Begin material symmetry group.
class Triclinic(Material):
"""
The stiffness matrix has to be symmetric.
"""
_zeropoints_ = []
def __init__(self, *args, **kw):
for key in kw.keys(): # becaues I modify the key.
if len(key) == 4 and key[:2] == 'co':
try:
i = int(key[2])
j = int(key[3])
except:
continue
symkey = 'co%d%d' % (j, i)
if i != j:
assert symkey not in kw
kw[symkey] = kw[key]
super(Triclinic, self).__init__(*args, **kw)
@classmethod
def _check_origstiffzero(cls, origstiff):
for i, j in cls._zeropoints_:
assert origstiff[i,j] == origstiff[j,i] == 0.0
class Monoclinic(Triclinic):
_zeropoints_ = [
(0,3), (0,5),
(1,3), (1,5),
(2,3), (2,5),
(3,4), (4,5),
]
class Orthorhombic(Triclinic):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
class Tetragonal(Triclinic):
_zeropoints_ = [
(0,3), (0,4),
(1,3), (1,4),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
def __init__(self, *args, **kw):
kw['co22'] = kw['co11']
kw['co23'] = kw['co13']
kw['co26'] = -kw.get('co16', 0.0)
kw['co55'] = kw['co44']
super(Tetragonal, self).__init__(*args, **kw)
class Trigonal(Triclinic):
_zeropoints_ = [
(0,5), (1,5),
(2,3), (2,4), (2,5),
(3,4),
]
def __init__(self, *args, **kw):
kw['co15'] = -kw.get('co25', 0.0)
kw['co22'] = kw['co11']
kw['co23'] = kw['co13']
kw['co24'] = -kw.get('co14', 0.0)
kw['co46'] = kw.get('co25', 0.0)
kw['co55'] = kw['co44']
kw['co56'] = kw.get('co14', 0.0)
kw['co66'] = (kw['co11'] - kw['co12'])/2
super(Trigonal, self).__init__(*args, **kw)
class Hexagonal(Trigonal):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
class Cubic(Triclinic):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
def __init__(self, *args, **kw):
kw['co13'] = kw['co12']
kw['co22'] = kw['co11']
kw['co23'] = kw['co12']
kw['co33'] = kw['co11']
kw['co55'] = kw['co44']
kw['co66'] = kw['co44']
super(Cubic, self).__init__(*args, **kw)
class Isotropic(Triclinic):
_zeropoints_ = [
(0,3), (0,4), (0,5),
(1,3), (1,4), (1,5),
(2,3), (2,4), (2,5),
(3,4), (3,5), (4,5),
]
def __init__(self, *args, **kw):
kw['co12'] = kw['co11']-2*kw['co44']
kw['co13'] = kw['co11']-2*kw['co44']
kw['co22'] = kw['co11']
kw['co23'] = kw['co11']-2*kw['co44']
kw['co33'] = kw['co11']
kw['co55'] = kw['co44']
kw['co66'] = kw['co44']
super(Isotropic, self).__init__(*args, **kw)
# End material symmetry group.
################################################################################
################################################################################
# Begin real material properties.
class GaAs(Cubic):
def __init__(self, *args, **kw):
kw.setdefault('rho', 5307.0)
kw.setdefault('co11', 11.88e10)
kw.setdefault('co12', 5.38e10)
kw.setdefault('co44', 5.94e10)
super(GaAs, self).__init__(*args, **kw)
class ZnO(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 5680.0)
kw.setdefault('co11', 20.97e10)
kw.setdefault('co12', 12.11e10)
kw.setdefault('co13', 10.51e10)
kw.setdefault('co33', 21.09e10)
kw.setdefault('co44', 4.247e10)
super(ZnO, self).__init__(*args, **kw)
class CdS(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 4820.0)
kw.setdefault('co11', 9.07e10)
kw.setdefault('co12', 5.81e10)
kw.setdefault('co13', 5.1e10)
kw.setdefault('co33', 9.38e10)
kw.setdefault('co44', 1.504e10)
super(CdS, self).__init__(*args, **kw)
class Zinc(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 7.1*1.e-3/(1.e-2**3))
kw.setdefault('co11', 14.3e11*1.e-5/(1.e-2**2))
kw.setdefault('co12', 1.7e11*1.e-5/(1.e-2**2))
kw.setdefault('co13', 3.3e11*1.e-5/(1.e-2**2))
kw.setdefault('co33', 5.0e11*1.e-5/(1.e-2**2))
kw.setdefault('co44', 4.0e11*1.e-5/(1.e-2**2))
super(Zinc, self).__init__(*args, **kw)
class Beryl(Hexagonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 2.7*1.e-3/(1.e-2**3))
kw.setdefault('co11', 26.94e11*1.e-5/(1.e-2**2))
kw.setdefault('co12', 9.61e11*1.e-5/(1.e-2**2))
kw.setdefault('co13', 6.61e11*1.e-5/(1.e-2**2))
kw.setdefault('co33', 23.63e11*1.e-5/(1.e-2**2))
kw.setdefault('co44', 6.53e11*1.e-5/(1.e-2**2))
super(Beryl, self).__init__(*args, **kw)
class Albite(Triclinic):
def __init__(self, *args, **kw):
#kw.setdefault('rho', )
kw.setdefault('co11', 69.9e9)
kw.setdefault('co22', 183.5e9)
kw.setdefault('co33', 179.5e9)
kw.setdefault('co44', 24.9e9)
kw.setdefault('co55', 26.8e9)
kw.setdefault('co66', 33.5e9)
kw.setdefault('co12', 34.0e9)
kw.setdefault('co13', 30.8e9)
kw.setdefault('co14', 5.1e9)
kw.setdefault('co15', -2.4e9)
kw.setdefault('co16', -0.9e9)
kw.setdefault('co23', 5.5e9)
kw.setdefault('co24', -3.9e9)
kw.setdefault('co25', -7.7e9)
kw.setdefault('co26', -5.8e9)
kw.setdefault('co34', -8.7e9)
kw.setdefault('co35', 7.1e9)
kw.setdefault('co36', -9.8e9)
kw.setdefault('co45', -2.4e9)
kw.setdefault('co46', -7.2e9)
kw.setdefault('co56', 0.5e9)
super(Albite, self).__init__(*args, **kw)
class Acmite(Monoclinic):
def __init__(self, *args, **kw):
kw.setdefault('rho', 3.5e3)
kw.setdefault('co11', 185.8e9)
kw.setdefault('co22', 181.3e9)
kw.setdefault('co33', 234.4e9)
kw.setdefault('co44', 62.9e9)
kw.setdefault('co55', 51.0e9)
kw.setdefault('co66', 47.4e9)
kw.setdefault('co12', 68.5e9)
kw.setdefault('co13', 70.7e9)
kw.setdefault('co15', 9.8e9)
kw.setdefault('co23', 62.9e9)
kw.setdefault('co25', 9.4e9)
kw.setdefault('co35', 21.4e9)
kw.setdefault('co46', 7.7e9)
super(Acmite, self).__init__(*args, **kw)
class AlphaUranium(Orthorhombic):
def __init__(self, *args, **kw):
#kw.setdefault('rho', )
kw.setdefault('rho', 8.2e3) # a false value.
kw.setdefault('co11', 215.e9)
kw.setdefault('co22', 199.e9)
kw.setdefault('co33', 267.e9)
kw.setdefault('co44', 124.e9)
kw.setdefault('co55', 73.e9)
kw.setdefault('co66', 74.e9)
kw.setdefault('co12', 46.e9)
kw.setdefault('co13', 22.e9)
kw.setdefault('co23', 107.e9)
super(AlphaUranium, self).__init__(*args, **kw)
class BariumTitanate(Tetragonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 6.2e3)
kw.setdefault('co11', 275.0e9)
kw.setdefault('co33', 165.0e9)
kw.setdefault('co44', 54.3e9)
kw.setdefault('co66', 113.0e9)
kw.setdefault('co12', 179.0e9)
kw.setdefault('co13', 151.0e9)
super(BariumTitanate, self).__init__(*args, **kw)
class AlphaQuartz(Trigonal):
def __init__(self, *args, **kw):
kw.setdefault('rho', 2.651e3)
kw.setdefault('co11', 87.6e9)
kw.setdefault('co33', 106.8e9)
kw.setdefault('co44', 57.2e9)
kw.setdefault('co12', 6.1e9)
kw.setdefault('co13', 13.3e9)
kw.setdefault('co14', 17.3e9)
super(AlphaQuartz, self).__init__(*args, **kw)
class RickerSample(Isotropic):
def __init__(self, *args, **kw):
kw.setdefault('rho', 2200.e0)
kw.setdefault('co11', 3200.e0**2*2200.e0)
kw.setdefault('co44', 1847.5e0**2*2200.e0)
super(RickerSample, self).__init__(*args, **kw)
class RickerSampleLight(Isotropic):
def __init__(self, *args, **kw):
scale = 1.e-3
kw.setdefault('rho', 2200.e0*scale)
kw.setdefault('co11', 3200.e0**2*2200.e0*scale)
kw.setdefault('co44', 1847.5e0**2*2200.e0*scale)
super(RickerSampleLight, self).__init__(*args, **kw)
# End real material properties.
################################################################################
# vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
| bsd-3-clause |
jensengrouppsu/rapid | rapid/pyqtgraph/dockarea/Container.py | 37 | 8634 | # -*- coding: utf-8 -*-
from ..Qt import QtCore, QtGui
import weakref
class Container(object):
#sigStretchChanged = QtCore.Signal() ## can't do this here; not a QObject.
def __init__(self, area):
object.__init__(self)
self.area = area
self._container = None
self._stretch = (10, 10)
self.stretches = weakref.WeakKeyDictionary()
def container(self):
return self._container
def containerChanged(self, c):
self._container = c
def type(self):
return None
def insert(self, new, pos=None, neighbor=None):
# remove from existing parent first
new.setParent(None)
if not isinstance(new, list):
new = [new]
if neighbor is None:
if pos == 'before':
index = 0
else:
index = self.count()
else:
index = self.indexOf(neighbor)
if index == -1:
index = 0
if pos == 'after':
index += 1
for n in new:
#print "change container", n, " -> ", self
n.containerChanged(self)
#print "insert", n, " -> ", self, index
self._insertItem(n, index)
index += 1
n.sigStretchChanged.connect(self.childStretchChanged)
#print "child added", self
self.updateStretch()
def apoptose(self, propagate=True):
##if there is only one (or zero) item in this container, disappear.
cont = self._container
c = self.count()
if c > 1:
return
if self.count() == 1: ## if there is one item, give it to the parent container (unless this is the top)
if self is self.area.topContainer:
return
self.container().insert(self.widget(0), 'before', self)
#print "apoptose:", self
self.close()
if propagate and cont is not None:
cont.apoptose()
def close(self):
self.area = None
self._container = None
self.setParent(None)
def childEvent(self, ev):
ch = ev.child()
if ev.removed() and hasattr(ch, 'sigStretchChanged'):
#print "Child", ev.child(), "removed, updating", self
try:
ch.sigStretchChanged.disconnect(self.childStretchChanged)
except:
pass
self.updateStretch()
def childStretchChanged(self):
#print "child", QtCore.QObject.sender(self), "changed shape, updating", self
self.updateStretch()
def setStretch(self, x=None, y=None):
#print "setStretch", self, x, y
self._stretch = (x, y)
self.sigStretchChanged.emit()
def updateStretch(self):
###Set the stretch values for this container to reflect its contents
pass
def stretch(self):
"""Return the stretch factors for this container"""
return self._stretch
class SplitContainer(Container, QtGui.QSplitter):
"""Horizontal or vertical splitter with some changes:
- save/restore works correctly
"""
sigStretchChanged = QtCore.Signal()
def __init__(self, area, orientation):
QtGui.QSplitter.__init__(self)
self.setOrientation(orientation)
Container.__init__(self, area)
#self.splitterMoved.connect(self.restretchChildren)
def _insertItem(self, item, index):
self.insertWidget(index, item)
item.show() ## need to show since it may have been previously hidden by tab
def saveState(self):
sizes = self.sizes()
if all([x == 0 for x in sizes]):
sizes = [10] * len(sizes)
return {'sizes': sizes}
def restoreState(self, state):
sizes = state['sizes']
self.setSizes(sizes)
for i in range(len(sizes)):
self.setStretchFactor(i, sizes[i])
def childEvent(self, ev):
QtGui.QSplitter.childEvent(self, ev)
Container.childEvent(self, ev)
#def restretchChildren(self):
#sizes = self.sizes()
#tot = sum(sizes)
class HContainer(SplitContainer):
def __init__(self, area):
SplitContainer.__init__(self, area, QtCore.Qt.Horizontal)
def type(self):
return 'horizontal'
def updateStretch(self):
##Set the stretch values for this container to reflect its contents
#print "updateStretch", self
x = 0
y = 0
sizes = []
for i in range(self.count()):
wx, wy = self.widget(i).stretch()
x += wx
y = max(y, wy)
sizes.append(wx)
#print " child", self.widget(i), wx, wy
self.setStretch(x, y)
#print sizes
tot = float(sum(sizes))
if tot == 0:
scale = 1.0
else:
scale = self.width() / tot
self.setSizes([int(s*scale) for s in sizes])
class VContainer(SplitContainer):
def __init__(self, area):
SplitContainer.__init__(self, area, QtCore.Qt.Vertical)
def type(self):
return 'vertical'
def updateStretch(self):
##Set the stretch values for this container to reflect its contents
#print "updateStretch", self
x = 0
y = 0
sizes = []
for i in range(self.count()):
wx, wy = self.widget(i).stretch()
y += wy
x = max(x, wx)
sizes.append(wy)
#print " child", self.widget(i), wx, wy
self.setStretch(x, y)
#print sizes
tot = float(sum(sizes))
if tot == 0:
scale = 1.0
else:
scale = self.height() / tot
self.setSizes([int(s*scale) for s in sizes])
class TContainer(Container, QtGui.QWidget):
sigStretchChanged = QtCore.Signal()
def __init__(self, area):
QtGui.QWidget.__init__(self)
Container.__init__(self, area)
self.layout = QtGui.QGridLayout()
self.layout.setSpacing(0)
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.hTabLayout = QtGui.QHBoxLayout()
self.hTabBox = QtGui.QWidget()
self.hTabBox.setLayout(self.hTabLayout)
self.hTabLayout.setSpacing(2)
self.hTabLayout.setContentsMargins(0,0,0,0)
self.layout.addWidget(self.hTabBox, 0, 1)
self.stack = QtGui.QStackedWidget()
self.layout.addWidget(self.stack, 1, 1)
self.stack.childEvent = self.stackChildEvent
self.setLayout(self.layout)
for n in ['count', 'widget', 'indexOf']:
setattr(self, n, getattr(self.stack, n))
def _insertItem(self, item, index):
if not isinstance(item, Dock.Dock):
raise Exception("Tab containers may hold only docks, not other containers.")
self.stack.insertWidget(index, item)
self.hTabLayout.insertWidget(index, item.label)
#QtCore.QObject.connect(item.label, QtCore.SIGNAL('clicked'), self.tabClicked)
item.label.sigClicked.connect(self.tabClicked)
self.tabClicked(item.label)
def tabClicked(self, tab, ev=None):
if ev is None or ev.button() == QtCore.Qt.LeftButton:
for i in range(self.count()):
w = self.widget(i)
if w is tab.dock:
w.label.setDim(False)
self.stack.setCurrentIndex(i)
else:
w.label.setDim(True)
def raiseDock(self, dock):
"""Move *dock* to the top of the stack"""
self.stack.currentWidget().label.setDim(True)
self.stack.setCurrentWidget(dock)
dock.label.setDim(False)
def type(self):
return 'tab'
def saveState(self):
return {'index': self.stack.currentIndex()}
def restoreState(self, state):
self.stack.setCurrentIndex(state['index'])
def updateStretch(self):
##Set the stretch values for this container to reflect its contents
x = 0
y = 0
for i in range(self.count()):
wx, wy = self.widget(i).stretch()
x = max(x, wx)
y = max(y, wy)
self.setStretch(x, y)
def stackChildEvent(self, ev):
QtGui.QStackedWidget.childEvent(self.stack, ev)
Container.childEvent(self, ev)
from . import Dock
| mit |
mrquim/repository.mrquim | script.module.covenant/lib/resources/lib/sources/es/pelisplustv.py | 6 | 4406 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['es']
self.domains = ['pelisplus.tv']
self.base_link = 'http://pelisplus.tv'
self.search_link = '/busqueda/?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases),
year)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
url = url[:-1] if url.endswith('/') else url
url += '/temporada/%d/capitulo/%d/' % (int(season), int(episode))
return url
except:
return
def __search(self, titles, year):
try:
query = self.search_link % (cleantitle.getsearch(titles[0].replace(' ','%20')))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i][0]
r = client.request(query)
r = client.parseDOM(r, 'li', attrs={'class': 'item everyone-item over_online haveTooltip'})
for i in r:
title = client.parseDOM(i, 'a', ret='title')[0]
url = client.parseDOM(i, 'a', ret='href')[0]
data = client.request(url)
y = re.findall('<p><span>Año:</span>(\d{4})',data)[0]
original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',data, re.DOTALL)[0]
original_t, title = cleantitle.get(original_t), cleantitle.get(title)
if (t in title or t in original_t) and y == year :
x = dom_parser.parse_dom(i, 'a', req='href')
return source_utils.strip_domain(x[0][0]['href'])
return
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
q = re.findall("'(http://www.elreyxhd.+?)'",r, re.DOTALL)[0]
links = client.request(q)
links = client.parseDOM(links, 'a', ret='href')
for url in links:
lang, info = 'es', 'LAT'
qual = 'HD'
if not 'http' in url: continue
if 'elrey' in url :continue
valid, host = source_utils.is_host_valid(url, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': qual, 'language': lang, 'url': url, 'info': info, 'direct':
False,'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url | gpl-2.0 |
Aaron1011/texting_wall | texting_wall/wsgi.py | 1 | 1146 | """
WSGI config for texting_wall project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "texting_wall.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
jrversteegh/softsailor | deps/numpy-1.6.1/numpy/distutils/fcompiler/compaq.py | 94 | 4064 |
#http://www.compaq.com/fortran/docs/
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.compat import get_exception
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl","-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore','-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g','-check bounds']
def get_flags_opt(self):
return ['-O4','-align dcommons','-assume bigarrays',
'-assume nozsize','-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared','-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\
' Version (?P<version>[^\s]*).*'
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError:
msg = get_exception()
if '_MSVCCompiler__root' in str(msg):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg))
else:
raise
except IOError:
e = get_exception()
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
except ValueError:
e = get_exception()
if not "path']" in str(e):
print("Unexpected ValueError in", __file__)
raise e
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl","/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo','/MD','/WX','/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase','/assume:underscore']
def get_flags_opt(self):
return ['/Ox','/fast','/optimize:5','/unroll:0','/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='compaq')
compiler.customize()
print(compiler.get_version())
| gpl-3.0 |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-2048-transformer/dataset_preproc/data_generators/generator_utils.py | 6 | 24723 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for data generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.utils import mlperf_log
import gzip
import os
import random
import stat
import tarfile
import tempfile
import requests
import six
from six.moves import range # pylint: disable=redefined-builtin
# Imports urllib on Python2, urllib.request on Python3
import six.moves.urllib_request as urllib
from tensor2tensor.data_generators import text_encoder
import tensorflow as tf
UNSHUFFLED_SUFFIX = "-unshuffled"
def to_example(dictionary):
"""Helper: build tf.Example from (string -> int/float/str list) dictionary."""
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError("Empty generated field: %s" % str((k, v)))
if isinstance(v[0], six.integer_types):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif isinstance(v[0], float):
features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))
elif isinstance(v[0], six.string_types):
if not six.PY2: # Convert in python 3.
v = [bytes(x, "utf-8") for x in v]
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif isinstance(v[0], bytes):
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
else:
raise ValueError("Value for %s is not a recognized type; v: %s type: %s" %
(k, str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=features))
def generate_files_distributed(generator,
output_name,
output_dir,
num_shards=1,
max_cases=None,
task_id=0):
"""generate_files but with a single writer writing to shard task_id."""
assert task_id < num_shards
output_filename = sharded_name(output_name, task_id, num_shards)
output_file = os.path.join(output_dir, output_filename)
tf.logging.info("Writing to file %s", output_file)
writer = tf.python_io.TFRecordWriter(output_file)
counter = 0
for case in generator:
if counter % 100000 == 0:
tf.logging.info("Generating case %d for %s." % (counter, output_name))
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writer.write(example.SerializeToString())
writer.close()
return output_file
def _data_filenames(output_name, output_dir, num_shards):
return [
os.path.join(output_dir, fname)
for fname in shard_filepath(output_name, num_shards)
]
def train_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + "-train", output_dir, num_shards)
def dev_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + "-dev", output_dir, num_shards)
def test_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + "-test", output_dir, num_shards)
def combined_data_filenames(problem, output_dir, num_training_shards):
return (train_data_filenames(problem, output_dir, num_training_shards) +
dev_data_filenames(problem, output_dir, 1) + test_data_filenames(
problem, output_dir, 1))
def sharded_name(base_name, shard, total_shards):
return "%s-%.5d-of-%.5d" % (base_name, shard, total_shards)
def shard_filepath(fname, num_shards):
return [
sharded_name(fname, shard, num_shards) for shard in range(num_shards)
]
def outputs_exist(filenames):
for out_fname in filenames:
out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, "")
if tf.gfile.Exists(out_fname):
return out_fname
def generate_files(generator, output_filenames,
max_cases=None, cycle_every_n=1):
"""Generate cases from a generator and save as TFRecord files.
Generated cases are transformed to tf.Example protos and saved as TFRecords
in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
# Check if is training or eval, ref: train_data_filenames().
if num_shards > 0:
if "-train" in output_filenames[0]:
tag = "train"
elif "-dev" in output_filenames[0]:
tag = "eval"
else:
tag = "other"
Args:
generator: a generator yielding (string -> int/float/str list) dictionaries.
output_filenames: List of output file paths.
max_cases: maximum number of cases to get from the generator;
if None (default), we use the generator until StopIteration is raised.
cycle_every_n: how many cases from the generator to take before
switching to the next shard; by default set to 1, switch every case.
"""
if outputs_exist(output_filenames):
tf.logging.info("Skipping generator because outputs files exists at {}"
.format(output_filenames))
return
tmp_filenames = [fname + ".incomplete" for fname in output_filenames]
num_shards = len(output_filenames)
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
counter, shard = 0, 0
for case in generator:
if case is None:
continue
if counter % 100000 == 0:
tf.logging.info("Generating case %d." % counter)
if num_shards > 0:
if tag == "train":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter)
elif tag == "eval":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter)
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writers[shard].write(example.SerializeToString())
if counter % cycle_every_n == 0:
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filenames, output_filenames):
tf.gfile.Rename(tmp_name, final_name)
tf.logging.info("Generated %s Examples", counter)
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def maybe_download(directory, filename, uri):
"""Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file.
"""
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
tf.logging.info("Downloading %s to %s" % (uri, filepath))
try:
tf.gfile.Copy(uri, filepath)
except tf.errors.UnimplementedError:
if uri.startswith("http"):
inprogress_filepath = filepath + ".incomplete"
inprogress_filepath, _ = urllib.urlretrieve(
uri, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress
print()
tf.gfile.Rename(inprogress_filepath, filepath)
else:
raise ValueError("Unrecognized URI: " + filepath)
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." %
(filename, statinfo.st_size))
return filepath
def maybe_download_from_drive(directory, filename, url):
"""Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
"""
if not tf.gfile.Exists(directory):
tf.logging.info("Creating directory %s" % directory)
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
confirm_token = None
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
# Since the file is big, drive will scan it for virus and take it to a
# warning page. We find the confirm token on this page and append it to the
# URL to start the download process.
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token:
url = url + "&confirm=" + confirm_token
tf.logging.info("Downloading %s to %s" % (url, filepath))
response = session.get(url, stream=True)
# Now begin the download.
chunk_size = 16 * 1024
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
# Print newline to clear the carriage return from the download progress
print()
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." % (filename,
statinfo.st_size))
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
"""
if tf.gfile.Exists(new_path):
tf.logging.info("File %s already exists, skipping unpacking" % new_path)
return
tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
# We may be unpacking into a newly created directory, add write mode.
mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
os.chmod(os.path.dirname(new_path), mode)
with gzip.open(gz_path, "rb") as gz_file:
with tf.gfile.GFile(new_path, mode="wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generator, max_subtoken_length=None,
reserved_tokens=None):
"""Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
"""
if data_dir and vocab_filename:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
return text_encoder.SubwordTextEncoder(vocab_filepath)
else:
vocab_filepath = None
tf.logging.info("Generating vocab file: %s", vocab_filepath)
vocab = text_encoder.SubwordTextEncoder.build_from_generator(
generator, vocab_size, max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
if vocab_filepath:
tf.gfile.MakeDirs(data_dir)
vocab.store_to_file(vocab_filepath)
return vocab
def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size,
sources, file_byte_budget=1e6):
"""Generate a vocabulary from the datasets in sources."""
vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget)
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
vocab_generator)
def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6):
"""Generate lines for vocabulary generation."""
tf.logging.info("Generating vocab from: %s", str(sources))
for source in sources:
url = source[0]
filename = os.path.basename(url)
compressed_file = maybe_download(tmp_dir, filename, url)
for lang_file in source[1]:
tf.logging.info("Reading file: %s" % lang_file)
filepath = os.path.join(tmp_dir, lang_file)
# Extract from tar if needed.
if not tf.gfile.Exists(filepath):
read_type = "r:gz" if filename.endswith("tgz") else "r"
with tarfile.open(compressed_file, read_type) as corpus_tar:
corpus_tar.extractall(tmp_dir)
# For some datasets a second extraction is necessary.
if lang_file.endswith(".gz"):
new_filepath = os.path.join(tmp_dir, lang_file[:-3])
if tf.gfile.Exists(new_filepath):
tf.logging.info(
"Subdirectory %s already exists, skipping unpacking" % filepath)
else:
tf.logging.info("Unpacking subdirectory %s" % filepath)
gunzip_file(filepath, new_filepath)
filepath = new_filepath
with tf.gfile.GFile(filepath, mode="r") as source_file:
file_byte_budget_ = file_byte_budget
counter = 0
countermax = int(source_file.size() / file_byte_budget_ / 2)
for line in source_file:
if counter < countermax:
counter += 1
else:
if file_byte_budget_ <= 0:
break
line = line.strip()
file_byte_budget_ -= len(line)
counter = 0
yield line
def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename,
index, vocab_filename, vocab_size):
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary.
"""
def generate():
filepath = os.path.join(tmp_dir, source_filename)
tf.logging.info("Generating vocab from %s", filepath)
with tf.gfile.GFile(filepath, mode="r") as source_file:
for line in source_file:
line = line.strip()
if line and "\t" in line:
parts = line.split("\t", 1)
part = parts[index].strip()
yield part
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size,
filepatterns):
"""Generate a vocabulary from txt files with example-per-line."""
if isinstance(filepatterns, str):
filepatterns = [filepatterns]
def generate():
tf.logging.info("Generating vocab from %s", filepatterns)
for filepattern in filepatterns:
for filename in tf.gfile.Glob(filepattern):
with tf.gfile.GFile(filename, mode="r") as source_file:
for line in source_file:
yield line.strip()
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
def read_records(filename):
reader = tf.python_io.tf_record_iterator(filename)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info("read: %d", len(records))
return records
def write_records(records, out_filename):
writer = tf.python_io.TFRecordWriter(out_filename)
mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)
for count, record in enumerate(records):
writer.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info("write: %d", count)
writer.close()
def generate_dataset_and_shuffle(train_gen,
train_paths,
dev_gen,
dev_paths,
shuffle=True):
generate_files(train_gen, train_paths)
generate_files(dev_gen, dev_paths)
if shuffle:
shuffle_dataset(train_paths + dev_paths)
def _shuffle_single(fname):
records = read_records(fname)
random.shuffle(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, "")
write_records(records, out_fname)
tf.gfile.Remove(fname)
def shuffle_dataset(filenames):
"""Shuffles the dataset."""
if outputs_exist(filenames):
tf.logging.info("Skipping shuffle because output files exist")
return
tf.logging.info("Shuffling data...")
for filename in filenames:
_shuffle_single(filename)
tf.logging.info("Data shuffled.")
class SequencePacker(object):
"""Helper for constructing a packed example of sequence examples.
See comments to pack_examples()
"""
def __init__(self, first_sequence, spacing=2):
self._spacing = spacing
self._ids = first_sequence[:]
self._segmentation = [1] * len(first_sequence)
self._position = list(range(len(first_sequence)))
def add(self, ids):
padding = [0] * self._spacing
self._ids.extend(padding + ids)
next_segment_num = self._segmentation[-1] + 1 if self._segmentation else 1
self._segmentation.extend(padding + [next_segment_num] * len(ids))
self._position.extend(padding + list(range(len(ids))))
def can_fit(self, ids, packed_length):
return len(self._ids) + self._spacing + len(ids) <= packed_length
def to_dict(self):
return {"inputs": [0],
"targets": self._ids,
"targets_segmentation": self._segmentation,
"targets_position": self._position}
class SequencePairPacker(object):
"""Helper for packing sequence-to-sequence examples into bigger examples.
See comments to pack_examples()
"""
def __init__(self, first_sequence_pair, spacing=2):
self._inputs = SequencePacker(first_sequence_pair[0], spacing)
self._targets = SequencePacker(first_sequence_pair[1], spacing)
def add(self, pair):
self._inputs.add(pair[0])
self._targets.add(pair[1])
def can_fit(self, pair, packed_length):
return (self._inputs.can_fit(pair[0], packed_length) and
self._targets.can_fit(pair[1], packed_length))
def to_dict(self):
ret = self._targets.to_dict()
inputs_dict = self._inputs.to_dict()
ret["inputs"] = inputs_dict["targets"]
ret["inputs_segmentation"] = inputs_dict["targets_segmentation"]
ret["inputs_position"] = inputs_dict["targets_position"]
return ret
def pack_examples(examples,
has_inputs,
packed_length=256,
spacing=2,
queue_size=10,
chop_long_sequences=False):
"""Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries.
"""
packer = SequencePairPacker if has_inputs else SequencePacker
combined = []
for example in examples:
x = ((example["inputs"], example["targets"])
if has_inputs else example["targets"])
if chop_long_sequences and len(x) > packed_length:
assert not has_inputs
num_fragments = len(x) // packed_length
for i in range(num_fragments):
yield packer(
x[packed_length * i:packed_length * (i + 1)], spacing).to_dict()
x = x[packed_length * num_fragments:]
added = False
for c in combined:
if c.can_fit(x, packed_length):
c.add(x)
added = True
break
if not added:
if len(combined) == queue_size:
yield combined[0].to_dict()
combined = combined[1:]
combined.append(packer(x, spacing))
for c in combined:
yield c.to_dict()
def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
"""Make a temporary directory."""
if dir is None:
return tempfile.mkdtemp(suffix, prefix, dir)
else:
while True:
rand_term = random.randint(1, 9999)
tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix))
if tf.gfile.Exists(tmp_dir):
continue
tf.gfile.MakeDirs(tmp_dir)
break
return tmp_dir
def tfrecord_iterator_for_problem(problem, data_dir,
dataset_split=tf.estimator.ModeKeys.TRAIN):
"""Iterate over the records on disk for the Problem."""
filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split))
example_spec = problem.example_reading_spec()[0]
return tfrecord_iterator(filenames, example_spec=example_spec)
def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
"""Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
"""
with tf.Graph().as_default():
dataset = tf.data.Dataset.from_tensor_slices(filenames)
def _load_records(filename):
return tf.data.TFRecordDataset(
filename,
compression_type=tf.constant("GZIP") if gzipped else None,
buffer_size=16 * 1000 * 1000)
dataset = dataset.flat_map(_load_records)
def _parse_example(ex_ser):
return tf.parse_single_example(ex_ser, example_spec)
if example_spec:
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
yield ex
except tf.errors.OutOfRangeError:
break
| apache-2.0 |
tonnrueter/pymca_devel | PyMca/NNMAWindow.py | 1 | 12078 | #/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit (industry@esrf.fr) if this license
# is a problem for you.
#############################################################################*/
__author__ = "V.A. Sole - ESRF"
from PyMca import PCAWindow
from PyMca import PyMcaQt as qt
from PyMca import NNMAModule
QTVERSION = qt.qVersion()
class NNMAParametersDialog(qt.QDialog):
def __init__(self, parent = None, options=[1, 2, 3, 4, 5, 10]):
qt.QDialog.__init__(self, parent)
if QTVERSION < '4.0.0':
self.setCaption("NNMA Configuration Dialog")
else:
self.setWindowTitle("NNMA Configuration Dialog")
self.mainLayout = qt.QVBoxLayout(self)
self.mainLayout.setMargin(11)
self.mainLayout.setSpacing(0)
self.infoButton = qt.QPushButton(self)
self.infoButton.setAutoDefault(False)
self.infoButton.setText('About NNMA')
self.mainLayout.addWidget(self.infoButton)
self.connect(self.infoButton,
qt.SIGNAL('clicked()'),
self._showInfo)
#
self.methodOptions = qt.QGroupBox(self)
self.methodOptions.setTitle('NNMA Method to use')
self.methods = ['RRI', 'NNSC', 'NMF', 'SNMF', 'NMFKL',
'FNMAI', 'ALS', 'FastHALS', 'GDCLS']
self.methodOptions.mainLayout = qt.QGridLayout(self.methodOptions)
self.methodOptions.mainLayout.setMargin(0)
self.methodOptions.mainLayout.setSpacing(2)
self.buttonGroup = qt.QButtonGroup(self.methodOptions)
i = 0
for item in self.methods:
rButton = qt.QRadioButton(self.methodOptions)
self.methodOptions.mainLayout.addWidget(rButton, 0, i)
#self.l.setAlignment(rButton, qt.Qt.AlignHCenter)
if i == 1:
rButton.setChecked(True)
rButton.setText(item)
self.buttonGroup.addButton(rButton)
self.buttonGroup.setId(rButton, i)
i += 1
self.connect(self.buttonGroup,
qt.SIGNAL('buttonPressed(QAbstractButton *)'),
self._slot)
self.mainLayout.addWidget(self.methodOptions)
# NNMA configuration parameters
self.nnmaConfiguration = qt.QGroupBox(self)
self.nnmaConfiguration.setTitle('NNMA Configuration')
self.nnmaConfiguration.mainLayout = qt.QGridLayout(self.nnmaConfiguration)
self.nnmaConfiguration.mainLayout.setMargin(0)
self.nnmaConfiguration.mainLayout.setSpacing(2)
label = qt.QLabel(self.nnmaConfiguration)
label.setText('Tolerance (0<eps<1000:')
self._tolerance = qt.QLineEdit(self.nnmaConfiguration)
validator = qt.QDoubleValidator(self._tolerance)
self._tolerance.setValidator(validator)
self._tolerance._validator = validator
self._tolerance.setText("0.001")
self.nnmaConfiguration.mainLayout.addWidget(label, 0, 0)
self.nnmaConfiguration.mainLayout.addWidget(self._tolerance, 0, 1)
label = qt.QLabel(self.nnmaConfiguration)
label.setText('Maximum iterations:')
self._maxIterations = qt.QSpinBox(self.nnmaConfiguration)
self._maxIterations.setMinimum(1)
self._maxIterations.setMaximum(1000)
self._maxIterations.setValue(100)
self.nnmaConfiguration.mainLayout.addWidget(label, 1, 0)
self.nnmaConfiguration.mainLayout.addWidget(self._maxIterations, 1, 1)
self.mainLayout.addWidget(self.nnmaConfiguration)
#built in speed options
self.speedOptions = qt.QGroupBox(self)
self.speedOptions.setTitle("Speed Options")
self.speedOptions.mainLayout = qt.QGridLayout(self.speedOptions)
self.speedOptions.mainLayout.setMargin(0)
self.speedOptions.mainLayout.setSpacing(2)
labelPC = qt.QLabel(self)
labelPC.setText("Number of PC:")
self.nPC = qt.QSpinBox(self.speedOptions)
self.nPC.setMinimum(0)
self.nPC.setValue(10)
self.nPC.setMaximum(40)
self.binningLabel = qt.QLabel(self.speedOptions)
self.binningLabel.setText("Spectral Binning:")
self.binningCombo = qt.QComboBox(self.speedOptions)
for option in options:
self.binningCombo.addItem("%d" % option)
self.speedOptions.mainLayout.addWidget(labelPC, 0, 0)
self.speedOptions.mainLayout.addWidget(self.nPC, 0, 1)
#self.speedOptions.mainLayout.addWidget(qt.HorizontalSpacer(self), 0, 2)
self.speedOptions.mainLayout.addWidget(self.binningLabel, 1, 0)
self.speedOptions.mainLayout.addWidget(self.binningCombo, 1, 1)
self.binningCombo.setEnabled(True)
#the OK button
hbox = qt.QWidget(self)
hboxLayout = qt.QHBoxLayout(hbox)
hboxLayout.setMargin(0)
hboxLayout.setSpacing(0)
self.okButton = qt.QPushButton(hbox)
self.okButton.setAutoDefault(False)
self.okButton.setText("Accept")
hboxLayout.addWidget(qt.HorizontalSpacer(hbox))
hboxLayout.addWidget(self.okButton)
self.dismissButton = qt.QPushButton(hbox)
self.dismissButton.setAutoDefault(False)
self.dismissButton.setText("Dismiss")
hboxLayout.addWidget(qt.HorizontalSpacer(hbox))
hboxLayout.addWidget(self.dismissButton)
hboxLayout.addWidget(qt.HorizontalSpacer(hbox))
self.mainLayout.addWidget(self.speedOptions)
self.mainLayout.addWidget(hbox)
self.connect(self.okButton,
qt.SIGNAL("clicked()"),
self.accept)
self.connect(self.dismissButton,
qt.SIGNAL("clicked()"),
self.reject)
self._infoDocument = qt.QTextEdit()
self._infoDocument.setReadOnly(True)
self._infoDocument.setText(NNMAModule.__doc__)
self._infoDocument.hide()
self.mainLayout.addWidget(self._infoDocument)
def _showInfo(self):
self._infoDocument.show()
def _slot(self, button):
button.setChecked(True)
index = self.buttonGroup.checkedId()
self.binningLabel.setText("Spectral Binning:")
if 1 or index != 2:
self.binningCombo.setEnabled(True)
else:
self.binningCombo.setEnabled(False)
return
def setParameters(self, ddict):
if 'options' in ddict:
self.binningCombo.clear()
for option in ddict['options']:
self.binningCombo.addItem("%d" % option)
if 'binning' in ddict:
option = "%d" % ddict['binning']
for i in range(self.binningCombo.count()):
if str(self.binningCombo.itemText(i)) == option:
self.binningCombo.setCurrentIndex(i)
if 'npc' in ddict:
self.nPC.setValue(ddict['npc'])
if 'method' in ddict:
self.buttonGroup.buttons()[ddict['method']].setChecked(True)
return
def getParameters(self):
ddict = {}
i = self.buttonGroup.checkedId()
ddict['methodlabel'] = self.methods[i]
ddict['function'] = NNMAModule.nnma
eps = float(self._tolerance.text())
maxcount = self._maxIterations.value()
ddict['binning'] = int(self.binningCombo.currentText())
ddict['npc'] = self.nPC.value()
ddict['kw'] = {'eps':eps,
'maxcount':maxcount}
return ddict
class NNMAWindow(PCAWindow.PCAWindow):
def setPCAData(self, images, eigenvalues=None, eigenvectors=None,
imagenames = None, vectornames = None):
self.eigenValues = eigenvalues
self.eigenVectors = eigenvectors
if type(images) == type([]):
self.imageList = images
elif len(images.shape) == 3:
nimages = images.shape[0]
self.imageList = [0] * nimages
for i in range(nimages):
self.imageList[i] = images[i,:]
if self.imageList[i].max() < 0:
self.imageList[i] *= -1
if self.eigenVectors is not None:
self.eigenVectors [i] *= -1
if imagenames is None:
self.imageNames = []
for i in range(nimages):
self.imageNames.append("NNMA Image %02d" % i)
else:
self.imageNames = imagenames
if self.imageList is not None:
self.slider.setMaximum(len(self.imageList)-1)
self.showImage(0)
else:
self.slider.setMaximum(0)
if self.eigenVectors is not None:
if vectornames is None:
self.vectorNames = []
for i in range(nimages):
self.vectorNames.append("NNMA Component %02d" % i)
else:
self.vectorNames = vectornames
legend = self.vectorNames[0]
y = self.eigenVectors[0]
self.vectorGraph.newCurve(range(len(y)), y, legend, replace=True)
if self.eigenValues is not None:
self.vectorGraphTitles = []
for i in range(nimages):
self.vectorGraphTitles.append("%g %% explained intensity" %\
self.eigenValues[i])
self.vectorGraph.graph.setTitle(self.vectorGraphTitles[0])
self.slider.setValue(0)
def test2():
app = qt.QApplication([])
qt.QObject.connect(app,
qt.SIGNAL("lastWindowClosed()"),
app,
qt.SLOT('quit()'))
dialog = NNMAParametersDialog()
#dialog.setParameters({'options':[1,3,5,7,9],'method':1, 'npc':8,'binning':3})
dialog.setModal(True)
ret = dialog.exec_()
if ret:
dialog.close()
print(dialog.getParameters())
#app.exec_()
def test():
app = qt.QApplication([])
qt.QObject.connect(app,
qt.SIGNAL("lastWindowClosed()"),
app,
qt.SLOT('quit()'))
container = NNMAWindow()
data = numpy.arange(20000)
data.shape = 2, 100, 100
data[1, 0:100,0:50] = 100
container.setPCAData(data, eigenvectors=[numpy.arange(100.), numpy.arange(100.)+10],
imagenames=["I1", "I2"], vectornames=["V1", "V2"])
container.show()
def theSlot(ddict):
print(ddict['event'])
if QTVERSION < '4.0.0':
qt.QObject.connect(container,
qt.PYSIGNAL("MaskImageWidgetSignal"),
updateMask)
app.setMainWidget(container)
app.exec_loop()
else:
qt.QObject.connect(container,
qt.SIGNAL("MaskImageWidgetSignal"),
theSlot)
app.exec_()
if __name__ == "__main__":
import numpy
test2()
| gpl-2.0 |
ar7z1/ansible | lib/ansible/modules/cloud/openstack/os_listener.py | 27 | 8358 | #!/usr/bin/python
# Copyright (c) 2018 Catalyst Cloud Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_listener
short_description: Add/Delete a listener for a load balancer from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.7"
author: "Lingxian Kong (@lingxiankong)"
description:
- Add or Remove a listener for a load balancer from the OpenStack load-balancer service.
options:
name:
description:
- Name that has to be given to the listener
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
loadbalancer:
description:
- The name or id of the load balancer that this listener belongs to.
required: true
protocol:
description:
- The protocol for the listener.
choices: [HTTP, HTTPS, TCP, TERMINATED_HTTPS]
default: HTTP
protocol_port:
description:
- The protocol port number for the listener.
default: 80
wait:
description:
- If the module should wait for the load balancer to be ACTIVE.
type: bool
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the load balancer to get
into ACTIVE state.
default: 180
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["openstacksdk"]
'''
RETURN = '''
id:
description: The listener UUID.
returned: On success when I(state) is 'present'
type: string
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
listener:
description: Dictionary describing the listener.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Unique UUID.
type: string
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
name:
description: Name given to the listener.
type: string
sample: "test"
description:
description: The listener description.
type: string
sample: "description"
load_balancer_id:
description: The load balancer UUID this listener belongs to.
type: string
sample: "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"
loadbalancers:
description: A list of load balancer IDs..
type: list
sample: [{"id": "b32eef7e-d2a6-4ea4-a301-60a873f89b3b"}]
provisioning_status:
description: The provisioning status of the listener.
type: string
sample: "ACTIVE"
operating_status:
description: The operating status of the listener.
type: string
sample: "ONLINE"
is_admin_state_up:
description: The administrative state of the listener.
type: bool
sample: true
protocol:
description: The protocol for the listener.
type: string
sample: "HTTP"
protocol_port:
description: The protocol port number for the listener.
type: int
sample: 80
'''
EXAMPLES = '''
# Create a listener, wait for the loadbalancer to be active.
- os_listener:
cloud: mycloud
endpoint_type: admin
state: present
name: test-listener
loadbalancer: test-loadbalancer
protocol: HTTP
protocol_port: 8080
# Create a listener, do not wait for the loadbalancer to be active.
- os_listener:
cloud: mycloud
endpoint_type: admin
state: present
name: test-listener
loadbalancer: test-loadbalancer
protocol: HTTP
protocol_port: 8080
wait: no
# Delete a listener
- os_listener:
cloud: mycloud
endpoint_type: admin
state: absent
name: test-listener
loadbalancer: test-loadbalancer
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, \
openstack_module_kwargs, openstack_cloud_from_module
def _lb_wait_for_status(module, cloud, lb, status, failures, interval=5):
"""Wait for load balancer to be in a particular provisioning status."""
timeout = module.params['timeout']
total_sleep = 0
if failures is None:
failures = []
while total_sleep < timeout:
lb = cloud.load_balancer.get_load_balancer(lb.id)
if lb.provisioning_status == status:
return None
if lb.provisioning_status in failures:
module.fail_json(
msg="Load Balancer %s transitioned to failure state %s" %
(lb.id, lb.provisioning_status)
)
time.sleep(interval)
total_sleep += interval
module.fail_json(
msg="Timeout waiting for Load Balancer %s to transition to %s" %
(lb.id, status)
)
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
loadbalancer=dict(required=True),
protocol=dict(default='HTTP',
choices=['HTTP', 'HTTPS', 'TCP', 'TERMINATED_HTTPS']),
protocol_port=dict(default=80, type='int', required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
sdk, cloud = openstack_cloud_from_module(module)
loadbalancer = module.params['loadbalancer']
loadbalancer_id = None
try:
changed = False
listener = cloud.load_balancer.find_listener(
name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not listener:
lb = cloud.load_balancer.find_load_balancer(loadbalancer)
if not lb:
module.fail_json(
msg='load balancer %s is not found' % loadbalancer
)
loadbalancer_id = lb.id
listener = cloud.load_balancer.create_listener(
name=module.params['name'],
loadbalancer_id=loadbalancer_id,
protocol=module.params['protocol'],
protocol_port=module.params['protocol_port'],
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed,
listener=listener.to_dict(),
id=listener.id)
if module.params['wait']:
# Check in case the listener already exists.
lb = cloud.load_balancer.find_load_balancer(loadbalancer)
if not lb:
module.fail_json(
msg='load balancer %s is not found' % loadbalancer
)
_lb_wait_for_status(module, cloud, lb, "ACTIVE", ["ERROR"])
module.exit_json(changed=changed, listener=listener.to_dict(),
id=listener.id)
elif module.params['state'] == 'absent':
if not listener:
changed = False
else:
cloud.load_balancer.delete_listener(listener)
changed = True
if module.params['wait']:
# Wait for the load balancer to be active after deleting
# the listener.
lb = cloud.load_balancer.find_load_balancer(loadbalancer)
if not lb:
module.fail_json(
msg='load balancer %s is not found' % loadbalancer
)
_lb_wait_for_status(module, cloud, lb, "ACTIVE", ["ERROR"])
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == "__main__":
main()
| gpl-3.0 |
xrg/django-static-gitified | django/contrib/sites/management.py | 81 | 1585 | """
Creates the default Site object.
"""
from django.db.models import signals
from django.db import connections
from django.db import router
from django.contrib.sites.models import Site
from django.contrib.sites import models as site_app
from django.core.management.color import no_style
def create_default_site(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table
if Site in created_models and router.allow_syncdb(db, Site) :
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print "Creating example.com Site object"
Site(pk=1, domain="example.com", name="example.com").save(using=db)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[db].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print "Resetting sequence"
cursor = connections[db].cursor()
for command in sequence_sql:
cursor.execute(command)
Site.objects.clear_cache()
signals.post_syncdb.connect(create_default_site, sender=site_app)
| bsd-3-clause |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_grammar.py | 68 | 34266 | # Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
from test.support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def test_backslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEqual(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
def test_plain_integers(self):
self.assertEqual(type(000), type(0))
self.assertEqual(0xff, 255)
self.assertEqual(0o377, 255)
self.assertEqual(2147483647, 0o17777777777)
self.assertEqual(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxsize
if maxsize == 2147483647:
self.assertEqual(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assertTrue(0o37777777777 > 0)
self.assertTrue(0xffffffff > 0)
self.assertTrue(0b1111111111111111111111111111111 > 0)
for s in ('2147483648', '0o40000000000', '0x100000000',
'0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxsize == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -0o1000000000000000000000)
self.assertTrue(0o1777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '0o2000000000000000000000', \
'0x10000000000000000', \
'0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxsize value %r' % maxsize)
def test_long_integers(self):
x = 0
x = 0xffffffffffffffff
x = 0Xffffffffffffffff
x = 0o77777777777777777
x = 0O77777777777777777
x = 123456789012345678901234567890
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
def test_floats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def test_float_exponent_tokenization(self):
# See issue 21642.
self.assertEqual(1 if 1else 0, 1)
self.assertEqual(1 if 0else 0, 0)
self.assertRaises(SyntaxError, eval, "0 if 1Else 0")
def test_string_literals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assertTrue(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assertTrue(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEqual(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEqual(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEqual(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEqual(x, y)
def test_ellipsis(self):
x = ...
self.assertTrue(x is Ellipsis)
self.assertRaises(SyntaxError, eval, ".. .")
def test_eof_error(self):
samples = ("def foo(", "\ndef foo(", "def foo(\n")
for s in samples:
with self.assertRaises(SyntaxError) as cm:
compile(s, "<test>", "exec")
self.assertIn("unexpected EOF", str(cm.exception))
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def test_eval_input(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def test_funcdef(self):
### [decorators] 'def' NAME parameters ['->' test] ':' suite
### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
### decorators: decorator+
### parameters: '(' [typedargslist] ')'
### typedargslist: ((tfpdef ['=' test] ',')*
### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
### | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
### tfpdef: NAME [':' test]
### varargslist: ((vfpdef ['=' test] ',')*
### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
### | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
### vfpdef: NAME
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
self.assertEqual(f2.__code__.co_varnames, ('one_argument',))
self.assertEqual(f3.__code__.co_varnames, ('two', 'arguments'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# keyword argument type tests
try:
str('x', **{b'foo':1 })
except TypeError:
pass
else:
self.fail('Bytes should not work as keyword argument names')
# keyword only argument tests
def pos0key1(*, key): return key
pos0key1(key=100)
def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
pos2key2(1, 2, k1=100)
pos2key2(1, 2, k1=100, k2=200)
pos2key2(1, 2, k2=100, k1=200)
def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# argument annotation tests
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
def f(x:int): pass
self.assertEqual(f.__annotations__, {'x': int})
def f(*x:str): pass
self.assertEqual(f.__annotations__, {'x': str})
def f(**x:float): pass
self.assertEqual(f.__annotations__, {'x': float})
def f(x, y:1+2): pass
self.assertEqual(f.__annotations__, {'y': 3})
def f(a, b:1, c:2, d): pass
self.assertEqual(f.__annotations__, {'b': 1, 'c': 2})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
**k:11) -> 12: pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
'k': 11, 'return': 12})
# Check for issue #20625 -- annotations mangling
class Spam:
def f(self, *, __kw:1):
pass
class Ham(Spam): pass
self.assertEqual(Spam.f.__annotations__, {'_Spam__kw': 1})
self.assertEqual(Ham.f.__annotations__, {'_Spam__kw': 1})
# Check for SF Bug #1697248 - mixing decorators and a return annotation
def null(x): return x
@null
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
# test MAKE_CLOSURE with a variety of oparg's
closure = 1
def f(): return closure
def f(x=1): return closure
def f(*, k=1): return closure
def f() -> int: return closure
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def test_lambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEqual(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0]]
self.assertEqual(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEqual(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
l6 = lambda x, y, *, k=20: x+y+k
self.assertEqual(l6(1,2), 1+2+20)
self.assertEqual(l6(1,2,k=10), 1+2+10)
### stmt: simple_stmt | compound_stmt
# Tested below
def test_simple_stmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
# Tested below
def test_expr_stmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
# Check the heuristic for print & exec covers significant cases
# As well as placing some limits on false positives
def test_former_statements_refer_to_builtins(self):
keywords = "print", "exec"
# Cases where we want the custom error
cases = [
"{} foo",
"{} {{1:foo}}",
"if 1: {} foo",
"if 1: {} {{1:foo}}",
"if 1:\n {} foo",
"if 1:\n {} {{1:foo}}",
]
for keyword in keywords:
custom_msg = "call to '{}'".format(keyword)
for case in cases:
source = case.format(keyword)
with self.subTest(source=source):
with self.assertRaisesRegex(SyntaxError, custom_msg):
exec(source)
source = source.replace("foo", "(foo.)")
with self.subTest(source=source):
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(source)
def test_del_stmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def test_pass_stmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def test_break_stmt(self):
# 'break'
while 1: break
def test_continue_stmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def test_return(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def test_yield(self):
# Allowed as standalone statement
def g(): yield 1
def g(): yield from ()
# Allowed as RHS of assignment
def g(): x = yield 1
def g(): x = yield from ()
# Ordinary yield accepts implicit tuples
def g(): yield 1, 1
def g(): x = yield 1, 1
# 'yield from' does not
check_syntax_error(self, "def g(): yield from (), 1")
check_syntax_error(self, "def g(): x = yield from (), 1")
# Requires parentheses as subexpression
def g(): 1, (yield 1)
def g(): 1, (yield from ())
check_syntax_error(self, "def g(): 1, yield 1")
check_syntax_error(self, "def g(): 1, yield from ()")
# Requires parentheses as call argument
def g(): f((yield 1))
def g(): f((yield 1), 1)
def g(): f((yield from ()))
def g(): f((yield from ()), 1)
check_syntax_error(self, "def g(): f(yield 1)")
check_syntax_error(self, "def g(): f(yield 1, 1)")
check_syntax_error(self, "def g(): f(yield from ())")
check_syntax_error(self, "def g(): f(yield from (), 1)")
# Not allowed at top level
check_syntax_error(self, "yield")
check_syntax_error(self, "yield from")
# Not allowed at class scope
check_syntax_error(self, "class foo:yield 1")
check_syntax_error(self, "class foo:yield from ()")
def test_raise(self):
# 'raise' test [',' test]
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def test_import(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def test_global(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def test_nonlocal(self):
# 'nonlocal' NAME (',' NAME)*
x = 0
y = 0
def f():
nonlocal x
nonlocal x, y
def test_assert(self):
# assertTruestmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert True
except AssertionError as e:
self.fail("'assert True' should not have raised an AssertionError")
try:
assert True, 'this should always pass'
except AssertionError as e:
self.fail("'assert True, msg' should not have "
"raised an AssertionError")
# these tests fail if python is run with -O, so check __debug__
@unittest.skipUnless(__debug__, "Won't work if __debug__ is False")
def testAssert2(self):
try:
assert 0, "msg"
except AssertionError as e:
self.assertEqual(e.args[0], "msg")
else:
self.fail("AssertionError not raised by assert 0")
try:
assert False
except AssertionError as e:
self.assertEqual(len(e.args), 0)
else:
self.fail("AssertionError not raised by 'assert False'")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def test_if(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def test_while(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEqual(x, 2)
def test_for(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def test_try(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr ['as' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError as msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError) as msg: pass
try: pass
finally: pass
def test_suite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def test_test(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def test_comparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
def test_binary_mask_ops(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def test_shift_ops(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def test_additive_ops(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def test_multiplicative_ops(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def test_unary_ops(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def test_selectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort(key=lambda x: x if isinstance(x, tuple) else ())
self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def test_atoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = {'one'}
x = {'one', 1,}
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def test_classdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x): return x
@class_decorator
class G: pass
def test_dictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
def test_listcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [0 < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def test_genexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(next(g), [x for x in range(10)])
try:
next(g)
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
next(g)
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def test_comprehension_specials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def test_if_else_expr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print(x)
return ret
# the next line is not allowed anymore
#self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_paren_evaluation(self):
self.assertEqual(16 // (4 // 2), 8)
self.assertEqual((16 // 4) // 2, 2)
self.assertEqual(16 // 4 // 2, 2)
self.assertTrue(False is (2 is 3))
self.assertFalse((False is 2) is 3)
self.assertFalse(False is 2 is 3)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
netzkolchose/django-cms | cms/models/placeholderpluginmodel.py | 1 | 1354 | # -*- coding: utf-8 -*-
from cms.models import CMSPlugin
from cms.models.fields import PlaceholderField
from cms.utils.copy_plugins import copy_plugins_to
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PlaceholderReference(CMSPlugin):
cmsplugin_ptr = models.OneToOneField(CMSPlugin, related_name='cms_placeholderreference', parent_link=True)
name = models.CharField(max_length=255)
placeholder_ref = PlaceholderField(slotname='clipboard')
class Meta:
app_label = 'cms'
def __str__(self):
return self.name
def copy_to(self, placeholder, language):
copy_plugins_to(self.placeholder_ref.get_plugins(), placeholder, to_language=language)
def copy_from(self, placeholder, language):
copy_plugins_to(placeholder.get_plugins(language), self.placeholder_ref, to_language=self.language)
def move_to(self, placeholder, language):
for plugin in self.placeholder_ref.get_plugins():
plugin.placeholder = placeholder
plugin.language = language
plugin.save()
def move_from(self, placeholder, language):
for plugin in placeholder.get_plugins():
plugin.placeholder = self.placeholder_ref
plugin.language = language
plugin.save()
| bsd-3-clause |
hendradarwin/VTK | Filters/Modeling/Testing/Python/contour3DAll.py | 21 | 5161 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class contour3DAll(vtk.test.Testing.vtkTest):
class Colors(object):
'''
Provides some wrappers for using color names.
'''
def __init__(self):
'''
Define a single instance of the NamedColors class here.
'''
self.namedColors = vtk.vtkNamedColors()
def GetRGBColor(self, colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
self.namedColors.GetColorRGB(colorName, rgb)
return rgb
def GetRGBAColor(self, colorName):
'''
Return the red, green, blue and alpha
components for a color as doubles.
'''
rgba = [0.0, 0.0, 0.0, 1.0] # black
self.namedColors.GetColor(colorName, rgba)
return rgba
def testContour3DAll(self):
# On older Macs, 10 is too low. Due to what looks like a driver bug
# spectral lighting behaves sort of weird and produces small differences
threshold = 30
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create pipeline
#
slc = vtk.vtkStructuredPointsReader()
slc.SetFileName(VTK_DATA_ROOT + "/Data/ironProt.vtk")
actorColors = ["flesh", "banana", "grey", "pink", "carrot", "gainsboro", "tomato", "gold", "thistle", "chocolate"]
types = ["UnsignedChar", "Char", "Short", "UnsignedShort", "Int", "UnsignedInt", "Long", "UnsignedLong", "Float", "Double"]
i = 1
c = 0
clip = list()
cast = list()
iso = list()
mapper = list()
actor = list()
colors = self.Colors()
for idx, vtkType in enumerate(types):
clip.append(vtk.vtkImageClip())
clip[idx].SetInputConnection(slc.GetOutputPort())
clip[idx].SetOutputWholeExtent(-1000, 1000, -1000, 1000, i, i + 5)
i += 5
cast.append(vtk.vtkImageCast())
eval("cast[idx].SetOutputScalarTypeTo" + vtkType)
cast[idx].SetInputConnection(clip[idx].GetOutputPort())
cast[idx].ClampOverflowOn()
iso.append(vtk.vtkContourFilter())
iso[idx].SetInputConnection(cast[idx].GetOutputPort())
iso[idx].GenerateValues(1, 30, 30)
iso[idx].ComputeScalarsOff()
iso[idx].ComputeGradientsOff()
mapper.append(vtk.vtkPolyDataMapper())
mapper[idx].SetInputConnection(iso[idx].GetOutputPort())
mapper[idx].ImmediateModeRenderingOn()
actor.append(vtk.vtkActor())
actor[idx].SetMapper(mapper[idx])
eval('actor[idx].GetProperty().SetDiffuseColor(colors.GetRGBColor("' + actorColors[idx] + '"))')
actor[idx].GetProperty().SetSpecularPower(30)
actor[idx].GetProperty().SetDiffuse(.7)
actor[idx].GetProperty().SetSpecular(.5)
c += 3
ren.AddActor(actor[idx])
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(slc.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.VisibilityOff()
# Add the actors to the renderer, set the background and size
#
ren.AddActor(outlineActor)
ren.SetBackground(0.9, .9, .9)
ren.ResetCamera()
ren.GetActiveCamera().SetViewAngle(30)
ren.GetActiveCamera().Elevation(20)
ren.GetActiveCamera().Azimuth(20)
ren.GetActiveCamera().Zoom(1.5)
ren.ResetCameraClippingRange()
renWin.SetSize(400, 400)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "contour3DAll.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(contour3DAll, 'test')])
| bsd-3-clause |
alan412/WR_RFID_RaspberryPi | rfid_webapp.py | 1 | 2313 | import cherrypy
import os
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import WebSocket
from ws4py.messaging import TextMessage
from cherrypy.process import plugins
from rfid_handling import RFIDHandling
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
rfidHandler = RFIDHandling()
def doRFID():
cherrypy.engine.publish("websocket-broadcast", TextMessage(rfidHandler.blockingRead()))
class WebSocketHandler(WebSocket):
def opened(self):
print "Socket Opened------"
def closed(self, code, reason="unknown"):
print "Socket Closed------2"
class rfid(object):
@cherrypy.expose
def index(self):
return open('web/index.html')
@cherrypy.expose
def ws(self):
# you can access the class instance through
handler = cherrypy.request.ws_handler
class rfidGeneratorWebService(object):
exposed = True
@cherrypy.tools.accept(media='text/plain')
def GET(self):
return cherrypy.session['session_id']
if __name__ == '__main__':
rfidapp = rfid()
rfidapp.generator = rfidGeneratorWebService()
file_path = os.getcwd() + '/web'
# This is the configuration and starting of the service
cherrypy.config.update({'server.socket_host' : "0.0.0.0",
'server.socket_port' : 9090})
plugins.BackgroundTask(0.1, doRFID).start()
cherrypy.quickstart(rfid(),'/',
{
'/':
{
'tools.staticdir.root' : file_path,
},
'/logaccess.csv':
{
'tools.staticfile.on' : True,
'tools.staticfile.filename' : file_path + '/logaccess.csv'
},
'/users.txt':
{
'tools.staticfile.on' : True,
'tools.staticfile.filename' : file_path + '/users.txt'
},
'/js':
{
'tools.staticdir.on' : True,
'tools.staticdir.dir' : file_path + '/js'
},
'/static':
{
'tools.staticdir.on' : True,
'tools.staticdir.dir' : file_path + '/static'
},
'/ws':
{
'tools.websocket.on' : True,
'tools.websocket.handler_cls' : WebSocketHandler
}
}
)
| mit |
eahneahn/free | lib/python2.7/site-packages/django/contrib/auth/tests/test_views.py | 22 | 33423 | import itertools
import os
import re
from django.conf import global_settings, settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.http import int_to_base36, urlsafe_base64_decode, urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils.importlib import import_module
from django.utils._os import upath
from django.test import TestCase
from django.test.utils import override_settings, patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertTrue(SESSION_KEY in self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_valid_base36(self):
# Remove in Django 1.7
url, path = self._test_confirm_start()
path_parts = path.strip("/").split("/")
# construct an old style (base36) URL by converting the base64 ID
path_parts[1] = int_to_base36(int(urlsafe_base64_decode(path_parts[1])))
response = self.client.get("/%s/%s-%s/" % tuple(path_parts))
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user_base36(self):
# Remove in Django 1.7
response = self.client.get('/reset/123456-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user_base36(self):
# Remove in Django 1.7
response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
response = self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
req.REQUEST = req.POST
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session['django_language'] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session['django_language'], 'pl')
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
| agpl-3.0 |
rooshilp/CMPUT410W15-project | testenv/lib/python2.7/site-packages/pip-1.1-py2.7.egg/pip/commands/help.py | 80 | 1071 | from pip.basecommand import (Command, command_dict,
load_all_commands, SUCCESS,
ERROR)
from pip.exceptions import CommandError
from pip.baseparser import parser
class HelpCommand(Command):
name = 'help'
usage = '%prog'
summary = 'Show available commands'
def run(self, options, args):
load_all_commands()
if args:
## FIXME: handle errors better here
command = args[0]
if command not in command_dict:
raise CommandError('No command with the name: %s' % command)
command = command_dict[command]
command.parser.print_help()
return SUCCESS
parser.print_help()
print('\nCommands available:')
commands = list(set(command_dict.values()))
commands.sort(key=lambda x: x.name)
for command in commands:
if command.hidden:
continue
print(' %s: %s' % (command.name, command.summary))
return SUCCESS
HelpCommand()
| gpl-2.0 |
jmartinezchaine/OpenERP | openerp/addons/hr_attendance/hr_attendance.py | 8 | 7047 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields, osv
from tools.translate import _
class hr_action_reason(osv.osv):
_name = "hr.action.reason"
_description = "Action Reason"
_columns = {
'name': fields.char('Reason', size=64, required=True, help='Specifies the reason for Signing In/Signing Out.'),
'action_type': fields.selection([('sign_in', 'Sign in'), ('sign_out', 'Sign out')], "Action Type"),
}
_defaults = {
'action_type': 'sign_in',
}
hr_action_reason()
def _employee_get(obj, cr, uid, context=None):
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
return ids and ids[0] or False
class hr_attendance(osv.osv):
_name = "hr.attendance"
_description = "Attendance"
def _day_compute(self, cr, uid, ids, fieldnames, args, context=None):
res = dict.fromkeys(ids, '')
for obj in self.browse(cr, uid, ids, context=context):
res[obj.id] = time.strftime('%Y-%m-%d', time.strptime(obj.name, '%Y-%m-%d %H:%M:%S'))
return res
_columns = {
'name': fields.datetime('Date', required=True, select=1),
'action': fields.selection([('sign_in', 'Sign In'), ('sign_out', 'Sign Out'), ('action','Action')], 'Action', required=True),
'action_desc': fields.many2one("hr.action.reason", "Action Reason", domain="[('action_type', '=', action)]", help='Specifies the reason for Signing In/Signing Out in case of extra hours.'),
'employee_id': fields.many2one('hr.employee', "Employee's Name", required=True, select=True),
'day': fields.function(_day_compute, type='char', string='Day', store=True, select=1, size=32),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), #please don't remove the lambda, if you remove it then the current time will not change
'employee_id': _employee_get,
}
def _altern_si_so(self, cr, uid, ids, context=None):
""" Alternance sign_in/sign_out check.
Previous (if exists) must be of opposite action.
Next (if exists) must be of opposite action.
"""
for att in self.browse(cr, uid, ids, context=context):
# search and browse for first previous and first next records
prev_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '<', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name DESC')
next_add_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '>', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name ASC')
prev_atts = self.browse(cr, uid, prev_att_ids, context=context)
next_atts = self.browse(cr, uid, next_add_ids, context=context)
# check for alternance, return False if at least one condition is not satisfied
if prev_atts and prev_atts[0].action == att.action: # previous exists and is same action
return False
if next_atts and next_atts[0].action == att.action: # next exists and is same action
return False
if (not prev_atts) and (not next_atts) and att.action != 'sign_in': # first attendance must be sign_in
return False
return True
_constraints = [(_altern_si_so, 'Error: Sign in (resp. Sign out) must follow Sign out (resp. Sign in)', ['action'])]
_order = 'name desc'
hr_attendance()
class hr_employee(osv.osv):
_inherit = "hr.employee"
_description = "Employee"
def _state(self, cr, uid, ids, name, args, context=None):
result = {}
if not ids:
return result
for id in ids:
result[id] = 'absent'
cr.execute('SELECT hr_attendance.action, hr_attendance.employee_id \
FROM ( \
SELECT MAX(name) AS name, employee_id \
FROM hr_attendance \
WHERE action in (\'sign_in\', \'sign_out\') \
GROUP BY employee_id \
) AS foo \
LEFT JOIN hr_attendance \
ON (hr_attendance.employee_id = foo.employee_id \
AND hr_attendance.name = foo.name) \
WHERE hr_attendance.employee_id IN %s',(tuple(ids),))
for res in cr.fetchall():
result[res[1]] = res[0] == 'sign_in' and 'present' or 'absent'
return result
_columns = {
'state': fields.function(_state, type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Attendance'),
}
def _action_check(self, cr, uid, emp_id, dt=False, context=None):
cr.execute('SELECT MAX(name) FROM hr_attendance WHERE employee_id=%s', (emp_id,))
res = cr.fetchone()
return not (res and (res[0]>=(dt or time.strftime('%Y-%m-%d %H:%M:%S'))))
def attendance_action_change(self, cr, uid, ids, type='action', context=None, dt=False, *args):
obj_attendance = self.pool.get('hr.attendance')
id = False
warning_sign = 'sign'
res = {}
#Special case when button calls this method: type=context
if isinstance(type, dict):
type = type.get('type','action')
if type == 'sign_in':
warning_sign = "Sign In"
elif type == 'sign_out':
warning_sign = "Sign Out"
for emp in self.read(cr, uid, ids, ['id'], context=context):
if not self._action_check(cr, uid, emp['id'], dt, context):
raise osv.except_osv(_('Warning'), _('You tried to %s with a date anterior to another event !\nTry to contact the administrator to correct attendances.')%(warning_sign,))
res = {'action': type, 'employee_id': emp['id']}
if dt:
res['name'] = dt
id = obj_attendance.create(cr, uid, res, context=context)
if type != 'action':
return id
return True
hr_employee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
todaychi/hue | desktop/libs/liboozie/src/liboozie/credentials_tests.py | 33 | 2933 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nose.tools import assert_equal, assert_true
import beeswax.conf
from liboozie.credentials import Credentials
LOG = logging.getLogger(__name__)
class TestCredentials():
CREDENTIALS = {
"hcat": "org.apache.oozie.action.hadoop.HCatCredentials",
"hive2": "org.apache.oozie.action.hadoop.Hive2Credentials",
"hbase": "org.apache.oozie.action.hadoop.HbaseCredentials"
}
def test_parse_oozie(self):
oozie_credentialclasses = """
hbase=org.apache.oozie.action.hadoop.HbaseCredentials,
hcat=org.apache.oozie.action.hadoop.HCatCredentials,
hive2=org.apache.oozie.action.hadoop.Hive2Credentials
"""
oozie_config = {'oozie.credentials.credentialclasses': oozie_credentialclasses}
creds = Credentials()
assert_equal({
'hive2': 'org.apache.oozie.action.hadoop.Hive2Credentials',
'hbase': 'org.apache.oozie.action.hadoop.HbaseCredentials',
'hcat': 'org.apache.oozie.action.hadoop.HCatCredentials'
}, creds._parse_oozie(oozie_config)
)
def test_gen_properties(self):
creds = Credentials(credentials=TestCredentials.CREDENTIALS.copy())
hive_properties = {
'thrift_uri': 'thrift://hue-koh-chang:9999',
'kerberos_principal': 'hive',
'hive2.server.principal': 'hive',
}
finish = (
beeswax.conf.HIVE_SERVER_HOST.set_for_testing('hue-koh-chang'),
beeswax.conf.HIVE_SERVER_PORT.set_for_testing(12345),
)
try:
assert_equal({
'hcat': {
'xml_name': 'hcat',
'properties': [
('hcat.metastore.uri', 'thrift://hue-koh-chang:9999'),
('hcat.metastore.principal', 'hive')
]},
'hive2': {
'xml_name': 'hive2',
'properties': [
('hive2.jdbc.url', 'jdbc:hive2://hue-koh-chang:12345/default'),
('hive2.server.principal', 'hive')
]},
'hbase': {
'xml_name': 'hbase',
'properties': []
}
}, creds.get_properties(hive_properties))
finally:
for f in finish:
f()
| apache-2.0 |
jwlawson/tensorflow | tensorflow/contrib/graph_editor/transform.py | 49 | 24923 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to transform an subgraph into another.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from functools import partial
from six import iteritems
from six import iterkeys
from six import string_types
from six import StringIO
from tensorflow.contrib.graph_editor import reroute
from tensorflow.contrib.graph_editor import select
from tensorflow.contrib.graph_editor import subgraph
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"replace_t_with_placeholder_handler",
"keep_t_if_possible_handler",
"assign_renamed_collections_handler",
"transform_op_if_inside_handler",
"copy_op_handler",
"Transformer",
"TransformerInfo",
"copy",
"copy_with_input_replacements",
"graph_replace",
]
def replace_t_with_placeholder_handler(info, t):
"""Transform a tensor into a placeholder tensor.
This handler is typically used to transform a subgraph input tensor into a
placeholder.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
with info.graph_.as_default():
t_ = util.make_placeholder_from_tensor(t, scope=info.scope_)
return t_
def keep_t_if_possible_handler(info, t):
"""Transform a tensor into itself (identity) if possible.
This handler transform a tensor into itself if the source and destination
graph are the same. Otherwise it will create a placeholder.
This handler is typically used to transform a hidden input tensors.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
if info.graph is info.graph_:
return t
else:
return replace_t_with_placeholder_handler(info, t)
def assign_renamed_collections_handler(info, elem, elem_):
"""Add the transformed elem to the (renamed) collections of elem.
A collection is renamed only if is not a known key, as described in
`tf.GraphKeys`.
Args:
info: Transform._TmpInfo instance.
elem: the original element (`tf.Tensor` or `tf.Operation`)
elem_: the transformed element
"""
known_collection_names = util.get_predefined_collection_names()
for name, collection in iteritems(info.collections):
if elem not in collection:
continue
if name in known_collection_names:
transformed_name = name
else:
transformed_name = info.new_name(name)
info.graph_.add_to_collection(transformed_name, elem_)
def transform_op_if_inside_handler(info, op, keep_if_possible=True):
"""Transform an optional op only if it is inside the subgraph.
This handler is typically use to handle original op: it is fine to keep them
if they are inside the subgraph, otherwise they are just ignored.
Args:
info: Transform._TmpInfo instance.
op: the optional op to transform (or ignore).
keep_if_possible: re-attach to the original op if possible, that is,
if the source graph and the destination graph are the same.
Returns:
The transformed op or None.
"""
if op in info.sgv.ops:
return info.transformed_ops[op]
else:
if keep_if_possible and info.graph is info.graph_:
return op
else:
return None
def copy_op_handler(info, op, copy_shape=True):
"""Copy a `tf.Operation`.
Args:
info: Transform._TmpInfo instance.
op: the `tf.Operation` to be copied.
copy_shape: also copy the shape of the tensor
Returns:
A `(op, op_outputs)` tuple containing the transformed op and its outputs.
"""
# pylint: disable=protected-access
# Clone the node def:
node_def_ = deepcopy(op._node_def)
# Transform name:
name_ = info.new_name(op.name)
name_ = info.graph_.unique_name(name_)
node_def_.name = name_
# Copy the other inputs needed for initialization
output_types_ = op._output_types[:]
input_types_ = op._input_types[:]
# Make a copy of the op_def too.
# Its unique to every _type_ of Operation.
op_def_ = deepcopy(op._op_def)
# Initialize a new Operation instance
op_ = tf_ops.Operation(node_def_, info.graph_, [], output_types_,
[], input_types_, None, op_def_)
# copy the shape over
if copy_shape:
for t, t_ in zip(op.outputs, op_.outputs):
t_.set_shape(t.get_shape())
# Original op cannot be finalised here yet. Because some ops require this
# attribute to exist, we will create a dummy original_op first and then
# later finalise it with the actual original_op when all the ops have
# been copied.
if op._original_op:
op_._original_op = op._original_op
# Add op to the graph
info.graph_._add_op(op_)
return op_, op_.outputs
class TransformerInfo(object):
""""Contains information about the result of a transform operation."""
def __init__(self, info):
"""Constructor.
Args:
info: an instance of Transformer._TmpInfo containing various internal
information about the transform operation.
"""
self._graph = info.graph
self._scope = info.scope
self._graph_ = info.graph_
self._scope_ = info.scope_
self._transformed_ops = info.transformed_ops
self._transformed_ts = info.transformed_ts
def _get_transformed_map(self, top):
"""Return the correct container depending on the type of `top`."""
if isinstance(top, tf_ops.Operation):
return self._transformed_ops
elif isinstance(top, tf_ops.Tensor):
return self._transformed_ts
else:
raise TypeError(
"Expected a tf.Tensor or a tf.Operation, got a {}".format(
type(top)))
def _transformed_elem(self, original_top, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Args:
original_top: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(original_top)
if isinstance(original_top, string_types):
for original, transformed in iteritems(transformed_map):
if original.name == original_top:
return transformed
return None if missing_fn is None else missing_fn(original_top)
else:
if original_top not in transformed_map:
return None if missing_fn is None else missing_fn(original_top)
return transformed_map[original_top]
def _original_elem(self, transformed_top, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Args:
transformed_top: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(transformed_top)
if isinstance(transformed_top, string_types):
finder = lambda transformed: transformed.name == transformed_top
else:
finder = lambda transformed: transformed == transformed_top
for original, transformed in iteritems(transformed_map):
if finder(transformed):
return original
return None if missing_fn is None else missing_fn(transformed_top)
def transformed(self, original, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Note that the output of this function mimics the hierarchy
of its input argument `original`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
original: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_elem = partial(self._transformed_elem, missing_fn=missing_fn)
return util.transform_tree(original, transformed_elem)
def original(self, transformed, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Note that the output of this function mimics the hierarchy
of its input argument `transformed`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
transformed: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
original_elem = partial(self._original_elem, missing_fn=missing_fn)
return util.transform_tree(transformed, original_elem)
def __str__(self):
res = StringIO()
print("Transform result info:", file=res)
if self._graph == self._graph_:
in_place_str = "" if self._scope_ else " IN-PLACE"
print(" Within graph[{}]{}".format(
id(self._graph), in_place_str), file=res)
else:
print(" graph[{}] => graph[{}]".format(
id(self._graph), id(self._graph_)), file=res)
if self._scope:
print(" Relative to source scope: {}".format(self._scope), file=res)
if self._scope_:
print(" Scope destination: {}".format(self._scope_), file=res)
print("Operations mapping:", file=res)
for op, op_ in iteritems(self._transformed_ops):
print(" {} => {}".format(op.name, op_.name), file=res)
return res.getvalue()
class _TmpInfo(object):
"""Transformer temporary data.
An instance of this class holds all the information relevant to a call
to a transformer instance (that is, a call to __call__). An instance
is created for the life-time of the __call__ function and is passed as
argument to the handlers.
"""
def __init__(self, sgv, dst_graph, dst_scope, src_scope):
self.sgv = sgv
self.sgv_inputs_set = frozenset(sgv.inputs)
self.ops = frozenset(sgv.ops)
self.control_outputs = util.ControlOutputs(sgv.graph)
self.graph = sgv.graph
self.scope = src_scope
self.graph_ = dst_graph
self.scope_ = dst_scope
self.transformed_ops = {}
self.transformed_ts = {}
self.collections = dict((key, self.graph.get_collection(key))
for key in self.graph.get_all_collection_keys())
self.cyclic_ops = []
self.transform_original_op_handler = transform_op_if_inside_handler
def new_name(self, name):
"""Compute a destination name from a source name.
Args:
name: the name to be "transformed".
Returns:
The transformed name.
Raises:
ValueError: if the source scope is used (that is, not an empty string)
and the source name does not belong to the source scope.
"""
scope = self.scope
if not name.startswith(scope):
raise ValueError("{} does not belong to source scope: {}.".format(
name, scope))
rel_name = name[len(scope):]
name_ = self.scope_ + rel_name
return name_
class Transformer(object):
"""Transform a subgraph into another one.
By default, the constructor create a transform which copy a subgraph and
replaces inputs with placeholders. This behavior can be modified by changing
the handlers.
"""
def __init__(self):
"""Transformer constructor.
The following members can be modified:
transform_op_handler: handle the transformation of a `tf.Operation`.
This handler defaults to a simple copy.
assign_collections_handler: handle the assignment of collections.
This handler defaults to assigning new collections created under the
given name-scope.
transform_external_input_handler: handle the transform of the inputs to
the given subgraph. This handler defaults to creating placeholders
instead of the ops just before the input tensors of the subgraph.
transform_external_hidden_input_handler: handle the transform of the
hidden inputs of the subgraph, that is, the inputs which are not listed
in sgv.inputs. This handler defaults to a transform which keep the same
input if the source and destination graphs are the same, otherwise
use placeholders.
transform_original_op_handler: handle the transform of original_op. This
handler defaults to transforming original_op only if they are in the
subgraph, otherwise they are ignored.
"""
# handlers
self.transform_op_handler = copy_op_handler
self.transform_control_input_handler = transform_op_if_inside_handler
self.assign_collections_handler = assign_renamed_collections_handler
self.transform_external_input_handler = replace_t_with_placeholder_handler
self.transform_external_hidden_input_handler = keep_t_if_possible_handler
self.transform_original_op_handler = transform_op_if_inside_handler
def __call__(self,
sgv,
dst_graph,
dst_scope,
src_scope="",
reuse_dst_scope=False):
"""Execute the transformation.
Args:
sgv: the source subgraph-view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope, which specify the path from which the
relative path of the transformed nodes are computed. For instance, if
src_scope is a/ and dst_scoped is b/, then the node a/x/y will have a
relative path of x/y and will be transformed into b/x/y.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
ValueError: if the arguments are invalid.
"""
sgv = subgraph.make_view(sgv)
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
src_scope = util.scope_finalize(src_scope)
dst_scope = util.scope_finalize(dst_scope)
# Potentially create new scope if reuse_dst_scope is False
if dst_scope and not reuse_dst_scope:
dst_scope = util.scope_finalize(dst_graph.unique_name(dst_scope[:-1]))
# Create temporary info used during this transform call
info = _TmpInfo(sgv, dst_graph, dst_scope, src_scope)
info.transform_original_op_handler = self.transform_original_op_handler
self._copy_ops(info)
self._connect_ops(info)
# Compute information about the transformation
res_info = TransformerInfo(info)
sgv_ = self._transform_sgv(info, sgv)
return sgv_, res_info
def _copy_ops(self, info):
"""Copy ops without connecting them."""
for op in info.sgv.ops:
logging.debug("Copying op: %s", op.name)
# TODO(fkp): return a subgraph?
op_, op_outputs_ = self.transform_op_handler(info, op)
if op is op_:
raise ValueError("In-place transformation not allowed.")
# Process op.
info.transformed_ops[op] = op_
self.assign_collections_handler(info, op, op_)
# Process output tensors.
for op_output, op_output_ in zip(op.outputs, op_outputs_):
info.transformed_ts[op_output] = op_output_
self.assign_collections_handler(info, op_output, op_output_)
def _connect_ops(self, info):
"""Connect the previously copied ops."""
for op in info.sgv.ops:
logging.debug("Finalizing op: %s", op.name)
op_ = info.transformed_ops[op]
# pylint: disable=protected-access
if op_.inputs:
raise ValueError("The newly transformed op should not have "
"any inputs yet: {}".format(op_.name))
inputs_ = [self._transformed_t(info, t) for t in op.inputs]
for t in inputs_:
op_._add_input(t)
# Finalize original op.
if op._original_op:
original_op = info.transform_original_op_handler(info, op._original_op)
if original_op is None:
logging.debug("Could not find original op for: %s", op_.name)
else:
op_._original_op = original_op
# Finalize control inputs:
control_inputs_ = [self.transform_control_input_handler(info, ci)
for ci in op.control_inputs]
control_inputs_ = [ci for ci in control_inputs_ if ci is not None]
reroute.add_control_inputs(op_, control_inputs_)
def _transform_sgv(self, info, sgv):
"""Transform a subgraph view.
For convenience, a transform operation returns a subgraph view of the
transformed graph.
Args:
info: Temporary information for this transorfm call.
sgv: the subgraph to be transformed.
Returns:
The transformed subgraph.
"""
ops_ = [op_ for _, op_ in iteritems(info.transformed_ops)]
sgv_ = subgraph.SubGraphView(ops_)
sgv_inputs_ = sgv_.inputs
sgv_outputs_ = sgv_.outputs
# re-order inputs
input_map_ = []
for input_t in sgv.inputs:
if input_t not in info.transformed_ts:
continue
input_t_ = info.transformed_ts[input_t]
if input_t_ not in sgv_inputs_:
continue
input_t_index_ = sgv_.input_index(input_t_)
input_map_.append(input_t_index_)
# re-order outputs
output_map_ = []
for output_t in sgv.outputs:
if output_t not in info.transformed_ts:
continue
output_t_ = info.transformed_ts[output_t]
if output_t_ not in sgv_outputs_:
continue
output_t_index_ = sgv_.output_index(output_t_)
output_map_.append(output_t_index_)
return sgv_.remap(input_map_, output_map_)
def _transformed_t(self, info, t):
"""Return tre transformed tensor of `t`."""
if t not in info.transformed_ts:
# If op is not in the subgraph.
if t in info.sgv_inputs_set:
# t is an input of the subgraph.
return self.transform_external_input_handler(info, t)
else:
# t is a hidden input of the subgraph.
return self.transform_external_hidden_input_handler(info, t)
else:
# If op is in the subgraph, just return its transformed.
return info.transformed_ts[t]
def copy(sgv, dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules than the function subgraph.make_view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if `dst_graph` is not a `tf.Graph`.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def copy_with_input_replacements(sgv, replacement_ts,
dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph, replacing some of its inputs.
Note a replacement only happens if the tensor to be replaced
is an input of the given subgraph. The inputs of a subgraph can
be queried using sgv.inputs.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules as the function subgraph.make_view.
replacement_ts: dictionary mapping from original tensors to the
replaced one.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if dst_graph is not a tf.Graph.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules as the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
# Replace tensor if possible.
def replace_t_with_replacement_handler(info, t):
if t in replacement_ts:
return replacement_ts[t]
else:
return keep_t_if_possible_handler(info, t)
copier.transform_external_input_handler = replace_t_with_replacement_handler
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def graph_replace(target_ts, replacement_ts, dst_scope="",
src_scope="", reuse_dst_scope=False):
"""Create a new graph which compute the targets from the replaced Tensors.
Args:
target_ts: a single tf.Tensor or an iterable of tf.Tensor.
replacement_ts: dictionary mapping from original tensors to replaced tensors
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A single tf.Tensor or a list of target tf.Tensor, depending on
the type of the input argument `target_ts`.
The returned tensors are recomputed using the tensors from replacement_ts.
Raises:
ValueError: if the targets are not connected to replacement_ts.
"""
# Identify operations in the graph that will change.
# Start forward walk at Tensors that will be replaced, and
# backward walk at the target output Tensors.
flatten_target_ts = util.flatten_tree(target_ts)
# Construct the forward control dependencies edges so that
# the get_walks_intersection_ops can also traverse the
# control dependencies.
graph = util.get_unique_graph(flatten_target_ts, check_types=(tf_ops.Tensor))
control_ios = util.ControlOutputs(graph)
ops = select.get_walks_intersection_ops(list(iterkeys(replacement_ts)),
flatten_target_ts,
control_ios=control_ios)
if not ops:
raise ValueError("Targets and replacements are not connected!")
# Create a copy of the relevant subgraph
_, info = copy_with_input_replacements(
ops, replacement_ts, None, dst_scope, src_scope, reuse_dst_scope)
# Return the transformed targets but keep the original if the transformed
# counterpart cannot be found
missing_fn = lambda original_t: original_t
return info.transformed(target_ts, missing_fn)
| apache-2.0 |
hanselke/erpnext-1 | erpnext/accounts/doctype/accounts_settings/accounts_settings.py | 45 | 1236 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, comma_and
from frappe.model.document import Document
class AccountsSettings(Document):
def on_update(self):
frappe.db.set_default("auto_accounting_for_stock", self.auto_accounting_for_stock)
if cint(self.auto_accounting_for_stock):
# set default perpetual account in company
for company in frappe.db.sql("select name from tabCompany"):
company = frappe.get_doc("Company", company[0])
company.flags.ignore_permissions = True
company.save()
# Create account head for warehouses
warehouse_list = frappe.db.sql("select name, company from tabWarehouse", as_dict=1)
warehouse_with_no_company = [d.name for d in warehouse_list if not d.company]
if warehouse_with_no_company:
frappe.throw(_("Company is missing in warehouses {0}").format(comma_and(warehouse_with_no_company)))
for wh in warehouse_list:
wh_doc = frappe.get_doc("Warehouse", wh.name)
wh_doc.flags.ignore_permissions = True
wh_doc.save()
| agpl-3.0 |
5aurabhpathak/masters-thesis | rulebaseprior.py | 2 | 6070 | #!/bin/env python3
#Author: Saurabh Pathak
#Prior rule-base
import subprocess, os, data
rulechunks, j, wall = None, 0, None
class _RuleChunk:
def __init__(self, begpos, length, trans):
global j
rulechunks.append(self)
self.istart, self.iend, self.trans, self.fms = begpos, begpos + length, trans, 0.5
if data.infofile is not None: j += 1
def apply_rules(ip, tag, l):
'''rules to apply at the beginning'''
global rulechunks
rulechunks, i = [], 0
#Auxillary verbs treatment
while i < len(ip):
bound, lbound, x = i+1 < l, i > 0, ip[i]
if lbound and tag[i-1]['POS'] == 'VM':
if x in {'रहता', 'रहती'} or (x in {'रहा', 'रही'} and lbound and tag[i-1]['suffix'] in {'ता', 'ती'}): #Present/Past Perfect Continuous - all 3 persons
if bound and ip[i+1] == 'हूँ': _RuleChunk(i, 2, 'have been')
elif bound and ip[i+1] == 'है': _RuleChunk(i, 2, 'has been')
elif bound and ip[i+1] in {'था', 'थी'}: _RuleChunk(i, 2, 'had been')
elif x in {'रहते', 'रहतीं'} or (x in {'रहे', 'रहीं'} and lbound and tag[i-1]['suffix'] in {'ते', 'ती'}):
if bound and ip[i+1] == 'हैं': _RuleChunk(i, 2, 'have been')
elif bound and ip[i+1] in {'थे', 'थीं'}: _RuleChunk(i, 2, 'had been')
elif x in {'चुका', 'चुकी', 'चुके', 'दी', 'दिया', 'दिये', 'ली', 'लिया', 'लिये', 'गया', 'गई', 'गए'}: #Present/Past Perfect - all 3 persons
if bound and ip[i+1] in {'हूँ', 'हैं'}: _RuleChunk(i, 2, 'have')
elif bound and ip[i+1] in {'था', 'थी', 'थे', 'थीं'}: _RuleChunk(i, 2, 'had')
elif bound and ip[i+1] == 'है': _RuleChunk(i, 2, 'has')
i += 1
elif x == 'चाहिए': #Modal verb
if bound and ip[i+1] == 'था':
if lbound and ip[i-1] == 'होना': _RuleChunk(i-1, 3, 'should have been')
else: _RuleChunk(i, 2, 'should have')
i += 1
elif lbound and tag[i-1]['coarsePOS'] == 'v':
if ip[i-1] == 'होना': _RuleChunk(i-1, 2, 'should be')
else: _RuleChunk(i, 1, 'should')
elif x in {'सकता', 'सकती', 'सकते','सकूँगा', 'सकूँगी', 'सकेगा', 'सकेगी', 'सकेंगे', 'सकेंगी', 'सका', 'सके', 'सकी', 'सकीं'}: #Modal verb - All tenses
if x in {'सकता', 'सकती', 'सकते'}:
if bound and ip[i+1] in {'था', 'थी', 'थे', 'थीं'}:
if lbound and ip[i-1] == 'हो': _RuleChunk(i-1, 3, 'could have been')
else: _RuleChunk(i, 2, 'could have')
i += 1
elif bound and ip[i+1] in {'हूँ', 'है', 'हैं'}:
if lbound and ip[i-1] == 'हो': _RuleChunk(i-1, 3, 'can be')
else: _RuleChunk(i, 2, 'can')
i += 1
elif not bound or tag[i+1]['coarsePOS'] != 'v':
_RuleChunk(i, 1, 'could')
elif lbound and tag[i-1]['coarsePOS'] == 'v': _RuleChunk(i, 1, 'could')
elif x in {'रहूंगा', 'रहूंगी', 'रहेंगे'} and lbound and tag[i-1]['coarsePOS'] == 'v': _RuleChunk(i, 1, 'will be') #Future Continuous
elif x in {'होऊंगा', 'होऊंगी', 'होंगे', 'होंगी', 'होगा', 'होगी'}:
if lbound and tag[i-1]['coarsePOS'] == 'v':
if ip[i-1] in {'रहा', 'रही', 'रहे', 'रही', 'रहता', 'रहती', 'रहते'} and i > 1 and tag[i-2]['POS'] == 'VM': _RuleChunk(i-1, 2, 'will have been') #Future Perfect Continuous
elif ip[i-1] in {'चुका', 'चुकी', 'चुके', 'दी', 'दिया', 'दिये', 'ली', 'लिया', 'लिये', 'गया', 'गई', 'गए'} and i > 1 and tag[i-2]['POS'] == 'VM': _RuleChunk(i-1, 2, 'will have') #Future Perfect
elif tag[i-1]['suffix'] in {'ता', 'ते', 'ती'}: _RuleChunk(i, 1, 'will be') #Future Continuous
else: _RuleChunk(i, 1, 'will be')
i += 1
return rulechunks
def add_walls(istart, iend, tags, l, line):#reordering around conjunctions
global j, wall
s = ''
for t in range(istart, iend):
if (tags[t]['POS'] == 'CC' or tags[t]['lemma'] == 'जो') and t not in {0, l-1} and (tags[t-1]['coarsePOS'] == 'v' or (t > 1 and tags[t-1]['POS'] == 'SYM' and tags[t-2]['coarsePOS'] == 'v')):
s = ' '.join([s, '<wall />', line[t]])
wall = True
j += 1
else: s = ' '.join([s, line[t]])
return s
def tag_input(inp):
p = subprocess.Popen(['{}/make.sh'.format(os.environ['THESISDIR']), '{}'.format(inp)], stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
d = []
for line in out.splitlines():
if 'EOL' in line: continue
line = line.split('\t')
d.append({x : y for x, y in zip(['lemma', 'POS', 'suffix', 'coarsePOS', 'gender', 'number', 'case'], line[1:])})
return d
def tag_input_file(f):
D, d = [], []
p = subprocess.Popen('{}/make.sh {}'.format(os.environ['THESISDIR'], f).split(), stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
with open('{}/tags.out'.format(data.run), 'w', encoding='utf-8') as tagop: tagop.write(out)
for line in out.splitlines():
if 'EOL' in line:
D.append(d)
d = []
continue
line = line.split('\t')
d.append({x : y for x, y in zip(['lemma', 'POS', 'suffix', 'coarsePOS', 'gender', 'number', 'case'], line[1:])})
return D
| gpl-3.0 |
jowr/le-logger | webapp/plotting.py | 1 | 8629 |
import numpy as np
import pandas as pd
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.layouts import gridplot
from bokeh.models import DatetimeTickFormatter
from bokeh.charts import BoxPlot
from bokeh.palettes import viridis as palette
from database import DataSet
#FIGURE_OPTIONS = dict(plot_width=1200, plot_height=300, logo="grey")
FIGURE_OPTIONS = dict(logo="grey")
SCATTER_OPTIONS = dict(alpha=0.5)
LINE_OPTIONS = dict(line_width=2, alpha=0.95)
def _get_dummies():
data_sets = []
for i in range(4):
ds = DataSet()
ds.set_dummy_data()
data_sets.append(ds)
return data_sets
def alldata(data_sets=[]):
########## BUILD FIGURES ################
if len(data_sets) < 1:
data_sets = _get_dummies()
series_count = len(data_sets)
colours = palette(series_count)
all_data_temp = figure(responsive=True, x_axis_label = "Days", x_axis_type = "datetime", y_axis_label = "Temperature / C", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds) in zip(colours, data_sets):
my_plot = all_data_temp.line(ds.time_series, ds.temp_series, color = clr, legend = ds.name, **LINE_OPTIONS)
all_data_humi = figure(x_range=all_data_temp.x_range, responsive=True, x_axis_label = "Days", x_axis_type = "datetime", y_axis_label = "Relative humidity / \%", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds) in zip(colours, data_sets):
my_plot = all_data_humi.line(ds.time_series, ds.humi_series, color = clr, legend = ds.name, **LINE_OPTIONS)
for p in [all_data_temp, all_data_humi]:
p.xaxis.formatter=DatetimeTickFormatter(formats=dict(
hours=["%k:%M"],
days=["%d. %m. %y"],
months=["%m %Y"],
years=["%Y"],
))
all_data = gridplot([all_data_temp, all_data_humi], ncols=2, plot_width=500, plot_height=250, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
########## RENDER PLOTS ################
resources = INLINE
js_resources = resources.render_js()
css_resources = resources.render_css()
plot_script, plot_divs = components({'Oversigt over alt data': all_data})
return js_resources, css_resources, plot_script, plot_divs
def operating_hours(data_sets=[]):
########## BUILD FIGURES ################
if len(data_sets) < 1:
data_sets = _get_dummies()
series_count = len(data_sets)
colours = palette(series_count)
data_frames = []
for ds in data_sets:
df = ds.as_data_frame()
day_filter = (df['timestamp'].dt.dayofweek == 5) | (df['timestamp'].dt.dayofweek == 6)
#df = df.drop(df[day_filter].index)
hour_filter = (df['timestamp'].dt.hour < 15) | (df['timestamp'].dt.hour > 21)
#df = df.drop(df[hour_filter].index)
#df = df.drop(df[day_filter | hour_filter].index)
#df['temperature'][day_filter | hour_filter] = np.NaN
#df['humidity'][day_filter | hour_filter] = np.NaN
idx = df.ix[day_filter | hour_filter].index
#df.temperature[idx] = np.NaN
#df.humidity[idx] = np.NaN
df.loc[idx,'temperature'] = np.NaN
df.loc[idx,'humidity'] = np.NaN
#df.at[dates[5], 'E'] = 7
df['time'] = df['timestamp'].dt.time
data_frames.append(df)
all_data_temp = figure(responsive=True, x_axis_label = "Time of day", x_axis_type = "datetime", y_axis_label = "Temperature / C", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds, df) in zip(colours, data_sets, data_frames):
#my_plot = all_data_temp.scatter(df.time, df.temperature, color = clr, legend = ds.name, **SCATTER_OPTIONS)
my_plot = all_data_temp.line(df.time, df.temperature, color = clr, legend = ds.name, **LINE_OPTIONS)
all_data_humi = figure(x_range=all_data_temp.x_range, responsive=True, x_axis_label = "Time of day", x_axis_type = "datetime", y_axis_label = "Relative humidity / \%", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds, df) in zip(colours, data_sets, data_frames):
my_plot = all_data_humi.scatter(df.time, df.humidity, color = clr, legend = ds.name, **SCATTER_OPTIONS)
#my_plot = all_data_humi.line(df.time, df.humidity, color = clr, legend = ds.name, **LINE_OPTIONS)
for p in [all_data_temp, all_data_humi]:
p.xaxis.formatter=DatetimeTickFormatter(formats=dict(
hours=["%k:%M"],
days=["%d. %m. %y"],
months=["%m %Y"],
years=["%Y"],
))
all_data = gridplot([all_data_temp, all_data_humi], ncols=2, plot_width=500, plot_height=250, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
########## RENDER PLOTS ################
resources = INLINE
js_resources = resources.render_js()
css_resources = resources.render_css()
plot_script, plot_divs = components({'Data fra kl. 15 - 22, uden loerdag': all_data})
return js_resources, css_resources, plot_script, plot_divs
def statistics(data_sets=[]):
########## BUILD FIGURES ################
if len(data_sets) < 1:
data_sets = _get_dummies()
series_count = len(data_sets)
colours = palette(series_count)
data_frame = pd.DataFrame()
data_frames = []
for ds in data_sets:
df = ds.as_data_frame()
day_filter = (df['timestamp'].dt.dayofweek == 5) | (df['timestamp'].dt.dayofweek == 6)
#df = df.drop(df[day_filter].index)
hour_filter = (df['timestamp'].dt.hour < 15) | (df['timestamp'].dt.hour > 21)
#df = df.drop(df[hour_filter].index)
#df = df.drop(df[day_filter | hour_filter].index)
#df['temperature'][day_filter | hour_filter] = np.NaN
#df['humidity'][day_filter | hour_filter] = np.NaN
idx = df.ix[day_filter | hour_filter].index
#df.temperature[idx] = np.NaN
#df.humidity[idx] = np.NaN
df.loc[idx,'temperature'] = np.NaN
df.loc[idx,'humidity'] = np.NaN
#df.at[dates[5], 'E'] = 7
df = df.drop(idx)
df['time'] = df['timestamp'].dt.time
df['box_label'] = ["{1}-{2} - {0}".format(ds.name, tt, tt+1) for tt in df['timestamp'].dt.hour]
df['box_label_merged'] = ["kl. {1}-{2} - {0}".format(ds.name.split(',')[0], tt, tt+1) for tt in df['timestamp'].dt.hour]
df.loc[:,'colour'] = ds.name
df.loc[:,'colour_merged'] = ds.name.split(',')[0]
data_frames.append(df)
data_frame = pd.concat([data_frame,df], ignore_index=True)
#data_frame = pd.DataFrame(columns=['timestamp', 'temperature', 'humidity', 'box_label'])
all_data_temp = BoxPlot(data_frame, values='temperature', label='box_label', color='colour', responsive=True, xlabel = "Time and place", ylabel = "Temperature / C", legend=False)
all_data_humi = BoxPlot(data_frame, values='humidity', label='box_label', color='colour', responsive=True, xlabel = "Time and place", ylabel = "Relative humidity / \%", legend=False)
all_data = gridplot([all_data_temp, all_data_humi], ncols=2, plot_width=500, plot_height=400, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
merged_data_temp = BoxPlot(data_frame, values='temperature', label='box_label_merged', color='colour_merged', responsive=True, xlabel = "Time and place", ylabel = "Temperature / C", legend=False)
merged_data_humi = BoxPlot(data_frame, values='humidity', label='box_label_merged', color='colour_merged', responsive=True, xlabel = "Time and place", ylabel = "Relative humidity / \%", legend=False)
merged_data = gridplot([merged_data_temp, merged_data_humi], ncols=2, plot_width=500, plot_height=400, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
########## RENDER PLOTS ################
resources = INLINE
js_resources = resources.render_js()
css_resources = resources.render_css()
plot_script, plot_divs = components({'Data fra kl. 15 - 22, uden loerdag': all_data, 'Data fra kl. 15 - 22, uden loerdag, reference og uden udsugning': merged_data})
return js_resources, css_resources, plot_script, plot_divs | gpl-3.0 |
Ervii/garage-time | garage/src/python/pants/backend/jvm/tasks/check_published_deps.py | 2 | 2455 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from pants.backend.core.tasks.console_task import ConsoleTask
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.jar_publish import PushDb
class CheckPublishedDeps(ConsoleTask):
@classmethod
def register_options(cls, register):
super(CheckPublishedDeps, cls).register_options(register)
register('--print-uptodate', default=False, action='store_true',
help='Print up-to-date dependencies.')
def __init__(self, *args, **kwargs):
super(CheckPublishedDeps, self).__init__(*args, **kwargs)
self._print_uptodate = self.get_options().print_uptodate
self.repos = self.context.config.getdict('jar-publish', 'repos')
self._artifacts_to_targets = {}
def is_published(tgt):
return tgt.is_exported
for target in self.context.scan().targets(predicate=is_published):
provided_jar, _, _ = target.get_artifact_info()
artifact = (provided_jar.org, provided_jar.name)
if not artifact in self._artifacts_to_targets:
self._artifacts_to_targets[artifact] = target
def console_output(self, targets):
push_dbs = {}
def get_version_and_sha(target):
db = target.provides.repo.push_db(target)
if db not in push_dbs:
push_dbs[db] = PushDb.load(db)
pushdb_entry = push_dbs[db].get_entry(target)
return pushdb_entry.sem_ver, pushdb_entry.sha
visited = set()
for target in self.context.targets():
if isinstance(target, (JarLibrary, JvmTarget)):
for dep in target.jar_dependencies:
artifact = (dep.org, dep.name)
if artifact in self._artifacts_to_targets and not artifact in visited:
visited.add(artifact)
artifact_target = self._artifacts_to_targets[artifact]
semver, sha = get_version_and_sha(artifact_target)
if semver.version() != dep.rev:
yield 'outdated %s#%s %s latest %s' % (dep.org, dep.name, dep.rev, semver.version())
elif self._print_uptodate:
yield 'up-to-date %s#%s %s' % (dep.org, dep.name, semver.version())
| apache-2.0 |
nmorriss7109/OzamaksQuest | main.py | 1 | 27087 | #COPYRIGHT 2016
#BY NATHAN MORRISSEY
print "//===\\\ ===// //\\\ ||\\\ //|| //\\\ || // //===\\\ "
print "|| || // //==\\\ || v// || //==\\\ ||// \\\____"
print "\\\___// //__// \\\ || || // \\\||\\\ _____||"
print " "
print " //=====\\\ || || ||==== //=====\\\ ========"
print " || || || || || \\\_____ ||"
print " || \\\|| || || ||==== \\\ ||"
print " \\\____\\\ \\\_____// ||____ ______// ||"
print " "
print "Centuries ago, the land of Amarin thrived under the rule of King Ozamak, \na wise and powerful ruler who placed the comfort of his kingdom above his own. \nEventually, however, Ozamak's power and riches began to corrupt his once-benevolent spirit. \nBy the end of the king's life, wars, famine, and disease had desomated his kingdom while he \nhid away in his castle's dungeons, admiring his wealth. Fearing death, Ozamak cast a spell \nupon himself, binding his consciousness to his body indefinitely. Ozamak the Lich guards the \ndungeon's treasure to this day. It is your duty to slay Ozamak and restore Amarin's glory!"
print " "
print 'What is your name, noble adventurer?'
name = raw_input('>')
print "Welcome, " + name + "! You can control your character by typing 'attack' to attack and enemy,\n'flee' to escape from an enemy, 'walk' to advance, 'inv' to display your inventory, 'heal' to use a healing potion."
print " "
defense = 1
damage = 10
crit_chance = .33
max_hp = 10000
hp = max_hp
healing_potions = 1
has_light = True
room = 0
sb_unlocked = False
game_over = False
flame_sword_unlocked = False
armor_unlocked = False
health_sheild_unlocked = False
moat_frozen = False
hp_regen = ['Health Regen', True]
freeze = ['Freeze', True]
confuse = ['Confuse', True]
fortify = ['Fortify', True]
sharpen = ['Sharpen', True]
light = ['Light', True]
weaken = ['Weaken', True]
player = [
name, #..................0
hp, #....................1
max_hp, #................2
damage, #................3
crit_chance, #...........4
healing_potions, #.......5
freeze, #................6 spell
hp_regen, #..............7 ''
confuse, #...............8 ''
fortify, #...............9 ''
sharpen, #...............10 ''
light, #.................11 ''
weaken, #................12 ''
has_light, #.............13
sb_unlocked, #...........14
flame_sword_unlocked, #..15
armor_unlocked, #........16
health_sheild_unlocked, #17
moat_frozen, #...........18
defense #................19
]
skeleton = ['skeleton', 1, 5]
spider = ['spider', 1, 10]
zombie = ['zombie', 30, 7]
witch_doctor = ['Witch Doctor', 100, 15]
posessed_armor = ['Posessed Armor', 200, 20]
ozamak = ['Ozamak the Lich', 1000, 40]
def reset(creature):
if creature == skeleton:
return ['skeleton', 1, 5]
elif creature == spider:
return ['spider', 1, 10]
elif creature == zombie:
return ['zombie', 1, 7]
elif creature == witch_doctor:
return ['Witch Doctor', 1, 15]
elif creature == posessed_armor:
return ['Posessed Armor', 2, 20]
else:
return ['Ozamak the Lich', 1000, 40]
def battle(p, e):
#while both player hp and enemy hp are over 0
while p[1] > 0:
print " "
print "your HP:" + str(p[1])
print " "
print e[0] + " HP:" + str(e[1])
while 1:
print " "
userIn = raw_input('>')
print " "
if userIn.lower() == "attack" or userIn.lower() == 'a':
print "you attacked and did " + str(p[3]) + " damage!"
e[1] -= p[3]
break
elif userIn.lower() == 'heal':
p = heal(p)
elif (userIn.lower() == 'spellbook' or userIn.lower() == 's') and p[14]:
p = spellbook(p, 0)
else:
print 'Cannot perform that command'
if e[1] <= 0:
print "You defeated the " + e[0] + "!"
break
#subtract enemy damage from player hp
print e[0] + " attacked and did " + str(e[2]/p[19]) + " damage!"
p[1] -= e[2]/p[19]
if p[1] <= 0:
break
p[19] = 1.0
return p
def heal(p):
if p[5] > 0:
if p[1] <= (p[2] - 50):
p[1] += 50
else:
p[1] = p[2]
p[5] -= 1
print "You have healed! Current HP: " + str(p[1])
print "You have " + str(p[5]) + " healing potions left."
else:
print "No more healing potions!"
return p
def spellbook(p, room):
print " "
print " ___________________________________________----------____________________________________________"
print " ============== ================"
print " | ============== =========== ============== |"
print " | ============= | ============= |"
print " | | |"
print " | | |"
print " | | |"
print " | | |"
print " | " + p[6][0] + " | " + p[10][0] + " |"
print " | | |"
print " | | |"
print " | " + p[7][0] + " | " + p[11][0] + " |"
print " | | |"
print " | | |"
print " | " + p[8][0] + " | " + p[12][0] + " |"
print " | | |"
print " | | |"
print " | " + p[9][0] + " | |"
print " | | |"
print " | | |"
print " | | |"
print " | | |"
print " | | |"
print " | | |"
print " | | |"
print " | | |"
print " | | |"
print " ============= | =============="
print " ================= =========== ==============="
print " ============= ============"
print " "
print " "
print "From now on, type 'spellbook' to access this menu. Type anything besides a spell's name to exit this menu."
print "Which spell would you like to use? You may only use each spell once."
p[14] = True
userIn = raw_input('>')
if userIn.lower() == 'light' and p[11][1]:
print "You used Light. The spellbook glows to light the way."
p[11][0] = ' '
p[13] = True
p[11][1] = False
return p
elif userIn.lower() == 'freeze' and p[6][1]:
print "You used Freeze. Any water nearby is frozen."
p[6][0] = ' '
p[18] = True
p[6][1] = False
return p
elif userIn.lower() == 'fortify' and p[9][1]:
print "You used Fortify. Your defense temporarily increases."
p[9][0] = ' '
p[9][1] = False
p[19] += .5
return p
else:
print "That doesnt seem to do much..."
return p
def rooms(room, p, sk, sp, zb, pa, wd):
if room == 1:
print "Your torch lights up the corridor, revealing a long hallway lined with various\nworn-out tapestries and rusty suits of armor."
p = user_input(p, room)
print "Despite doing your best to stay quiet, an army of skeletons crawl from every doorway,\nconverging on your location!"
p = battle(p, sk)
sk = reset(sk)
print "Another skeleton attacks!"
p = battle(p, sk)
sk = reset(sk)
print "Another skeleton attacks!"
p = battle(p, sk)
sk = reset(sk)
if p[1] <= 0:
print "You were killed."
return p
print "After slaying the first three skeletons and picking up a health potion (+50 hp),\nyou find an escape route through a crack in the rock wall."
p[5] += 1
print "Do you want to take the escape route or fight your way past the skeletons? (escape, fight)"
escaped = False
userIn = ''
while 1:
userIn = raw_input('>')
if userIn.lower() == 'escape':
p = rooms(4, p, sk, sp, zb, pa, wd)
escaped = True
break
elif userIn.lower() == 'fight':
break
elif userIn.lower() == 'heal':
p = heal(p)
else:
print "That is not a valid option."
if p[1] <= 0:
return p
if escaped == False:
print "You continue fighting the skeletons..."
print "Another skeleton attacks!"
p = battle(p, sk)
sk = reset(sk)
print "Another skeleton attacks!"
p = battle(p, sk)
sk = reset(sk)
if p[1] <= 0:
return p
print "Finally you slash your way past the last corpse and find yourself in a candle-lit chamber."
else:
print "Through the door, you climb a stone spiral staircase and arrive in a candle-lit chamber."
p = user_input(p, room)
print "Advancing, you come to an allyway with a tile floor. You step on one of the tiles but it falls\naway underfoot! You realize that this must be a puzzle. On the wall you read this inscription:"
print " "
print " "
print " I am a well-known sequence, you see,"
print " Most often solved recursively!"
print " "
print " "
print "you look ahead and notice that the tiles are numbered like so:"
print " "
print " "
print " FINISH"
print " =================================================================="
print " | | | | | |"
print " | 1 | 2 | 3 | 4 | 5 |"
print " | | | | | |"
print " | | | | | |"
print " =================================================================="
print " | | | | | |"
print " | 1 | 2 | 3 | 4 | 5 |"
print " | | | | | |"
print " | | | | | |"
print " =================================================================="
print " | | | | | |"
print " | 1 | 2 | 3 | 4 | 5 |"
print " | | | | | |"
print " | | | | | |"
print " =================================================================="
print " | | | | | |"
print " | 1 | 2 | 3 | 4 | 5 |"
print " | | | | | |"
print " | | | | | |"
print " =================================================================="
print " | | | | |############|"
print " | 1 | 2 | 3 | 4 |############|"
print " | | | | |############|"
print " | | | | |############|"
print " =================================================================="
print " START"
print " "
print "Which path would you like to take?"
userIn = raw_input('>')
if userIn == '1 1 2 3 5' or userIn == '11235':
print "That was the right path! You make it accross safely."
elif userIn == '1':
print "You step on the first platform and you hear a slow rumble...and then nothing.\n You have taken the correct first step."
userIn = raw_input('>')
if userIn == '1':
print "You step on to the next platform. You have chosen wisely."
userIn = raw_input('>')
if userIn == '2':
print "You step on to the next platform. You have chosen wisely."
userIn = raw_input('>')
if userIn == '3':
print "You step on to the next platform. You have chosen wisely."
userIn = raw_input('>')
if userIn == '5':
print "You step on to the next platform. You have chosen wisely."
else:
print "You take a wrong step and plummet to your death."
p[1] = 0
return p
else:
print "You take a wrong step and plummet to your death."
p[1] = 0
return p
else:
print "You take a wrong step and plummet to your death."
p[1] = 0
return p
else:
print "You take a wrong step and plummet to your death."
p[1] = 0
return p
else:
print "You take a wrong step and plummet to your death."
p[1] = 0
return p
print "On the other side of the trap you stop to catch your breath and hear a faint moaning coming from up ahead."
p = user_input(p, room)
print "It appears like just another zombie but something seems different. You can just barely make out a metalic\nnoise each time the creature steps cloaser. Now you can see your light glinting off of a fiery\nbroadsword and suit of armor. You can only guess at hat grotesque, rotting corpse lies beneath.\nYou draw your sword and prepart to battle the beast."
p = battle(p, pa)
if p[1] <= 0:
return p
print "After a long and grueling battle, your sword finds its way through the armor and strikes the creature's heart.\nYou grasp the sword from its dead fingers, claiming it as your own. (+10 base damage)"
p[3] += 10
p[15] = True
print "There seems to be an exit... You take it and it leads you back to the main room with the four doors."
return p
elif room == 2:
print "filler 2"
return p
elif room == 3:
print You see a large bronze lock on the front of the door. Engraved on the front it reads, DO NOT OPEN. DEAD INSIDE"
return p
elif room == 4:
print "You find yourself in a dusty library full of tomes and spellbooks.\nUnfortunately, you dropped your torch as you were making your escape. Now your only source of light\nis that of the torch through the crack you climbed through."
p = user_input(p, room)
print "You walk a bit further into the labyrinth of books but don't have enought light to proceed.\nYou decide to look through some spellbooks for something to help you navigate the maze. One ancient,\nleather-bound tome with a ruby-studded spine stands out in particular. You open it and find this:"
p[13] = False
while 1:
print "You can't find your way throught this maze in the dark. You need some sort of light source."
p = spellbook(p, room)
if p[13] == True:
break
print "That spell seems to have proveded you with a portable light source!"
p = user_input(p, room)
print "The aresafsdaa is illuminated just in time to reveal a nest of spiders.\nYou still have time to flee but in doing so you could miss out on some loot. (escape, fight)"
escaped = False
while 1:
userIn = raw_input('>')
if userIn.lower() == 'escape':
escaped = True
break
elif userIn.lower() == 'fight':
print "You have a spider in your sights!"
p = battle(p, sp)
sp = reset(sp)
print "Another spider attacks!"
p = battle(p, sp)
sp = reset(sp)
print "Another spider attacks!"
p = battle(p, sp)
sp = reset(sp)
if p[1] > 0:
break
else:
return p
else:
print "Not a valid command."
if escaped != True:
print "You defeated the nest and claim a healing potion (+50 hp)"
p[5] += 1
elif escaped == True:
print "You sneak past the spiders. You watch for a second as they meticulously craft their massive webs. You hurry past."
print "As you continue to walk, the rows of book cases grow farther apart and you eventually\nreach a deep moat filled with man-eating leaches. The exit seems to be just on the other side!"
while 1:
print "You must find a way to cross the moat."
p = user_input(p, room)
if p[18] == True:
break
print "The spell you used froze the water in front of you! You may now cross the frozen moat."
p = user_input(p, room)
print "You arrive at the exit"
return p
def user_input(p, room):
userIn = ''
while 1:
userIn = raw_input('>')
if userIn.lower() == 'walk' or userIn.lower() == 'w':
break
elif userIn.lower() == 'heal' or userIn.lower() == 'h':
p = heal(p)
elif (userIn.lower() == 'spellbook' or userIn.lower() == 's') and p[14] == True:
p = spellbook(p, room)
elif userIn == 'player':
print p
else:
print 'Cannot perform that command'
return p
def choose_room(p):
print "You walk over to the first door and see this carving on it:"
print " "
print " (0)"
print " (0).(0)"
print " |||"
print " |||"
print " |||"
print " //=========\\\ "
print " //^^^| | |^^^\\\ "
print " V | | | V"
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " | | | "
print " \ | /"
print " \ /"
print " V"
p = user_input(p, room)
print "You walk over to the second door and see this carving on it:"
print " "
print " __ "
print " __ /==\ __"
print " /==\ | | /==\ "
print " | | \ \ | | _"
print " \ \ | _ | \ \ /=\ "
print " ___ | _ | | | | _ || _/ "
print "|===\ | | | | | |/ |"
print " \ \ | . \| . \| . / . /"
print " \ \| / /"
print " \ \ | | | / /"
print " | | | | / / |"
print " | \ \ / | | |"
print " | |"
print " / 0 0 0 0 0 \ "
print " |___ . . . .___|"
print " \____________/"
p = user_input(p, room)
print "You walk over to the third door and see this carving on it:"
print " "
print " |\_ _/|"
print " | \___ ___/ |"
print " | \____________/ |"
print " | |"
print " | |"
print " | |||| |"
print " | |||| |"
print " \ ============ /"
print " | ============ |"
print " \ |||| /"
print " \_ |||| _/"
print " \___ ___/"
print " \______/"
print " "
print "The fourth door seems to be locked by a powerful spell."
print " "
if p[15] and p[16]:
choice = '(3)'
elif p[15] and p[17]:
choice = '(2)'
elif p[16] and p[17]:
choice = '(1)'
elif p[15]:
choice = '(2, 3)'
elif p[16]:
choice = '(1, 3)'
elif p[17]:
choice = '(1, 2)'
else:
choice = '(1, 2, 3)'
print "Which door do you want to enter? " + choice
userIn = ''
while 1:
userIn = raw_input('>')
if userIn in choice:
break
elif userIn.lower() == 'heal':
p = heal(p)
else:
print "That is not a valid door."
print "You enter door " + userIn
return int(userIn), p
#main game loop
while 1:
print " === ==== ==== ==== ==== ==== ==="
print " | |__| |__| |__| |__| |__| |__| |"
print " | |"
print " | |"
print " | |"
print " | ===== ===== |"
print " | / \ / \ |"
print " | | | | | |"
print " | | | | | |"
print " | |_______| |_______| |"
print " \_ _/"
print " \_ _/"
print " \ /"
print " \_ _/"
print " | |"
print " ==== ==== ==== ==| |== ==== ==== ===="
print " | |__| |__| |__| | | |__| |__| |__| |"
print " | | | |"
print " | | | |"
print " | _______ | | _______ |"
print " | ||_|_|_|| | | ||_|_|_|| |"
print " | ||_|_|_|| | | ||_|_|_|| |"
print " | ||_|_|_|| | | ||_|_|_|| |"
print " | | ======= | |"
print " | | /| | | |\ | |"
print " | | |_|_|_|_|_| | |"
print " | | |_|_|_|_|_| | |"
print " |__________________________|_____________|_|_|_|_|_|____________|__________________________|"
print " "
print " "
print "You enter the ancient dungeon, brushing aside thick sheets of cobwebs as you go.\nOn the wall you find a torch and light it, illuminating the dark chamber."
player = user_input(player, room)
print "As you turn the corner, a creature ambushes you!"
player = battle(player, zombie)
zombie = reset(zombie)
print player
if player[1] <= 0:
print "You were defeated!"
break
print " "
print "Past the creature's bloody carcass you make out the outline of four doors."
player = user_input(player, room)
if not player[15] or not player[16] or not player[17]:
room, player = choose_room(player)
#go to that room
player = rooms(room, player, skeleton, spider, zombie, posessed_armor, witch_doctor)
if player[1] <= 0:
break
#Game Over
print " //===\\\ //\\\ ||\\\//|| ||=== //===\\\ \\\ // ||=== ||==\\\ "
print "|| =\\\ //==\\\ || V/ || ||=== || || \\\ // ||=== ||==//"
print " \\\__|| // \\\ || || ||=== \\\___// \\\// ||=== || \\\ "
| gpl-3.0 |
Bloodyaugust/pongsugarlabcpp | lib/boost/tools/build/test/library_chain.py | 6 | 3579 | #!/usr/bin/python
# Copyright 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that a chain of libraries works ok, no matter if we use static or shared
# linking.
import BoostBuild
import os
import string
t = BoostBuild.Tester(use_test_config=False)
# Stage the binary, so that it will be relinked without hardcode-dll-paths.
# That will check that we pass correct -rpath-link, even if not passing -rpath.
t.write("jamfile.jam", """\
stage dist : main ;
exe main : main.cpp b ;
""")
t.write("main.cpp", """\
void foo();
int main() { foo(); }
""")
t.write("jamroot.jam", "")
t.write("a/a.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
gee() {}
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
geek() {}
""")
t.write("a/jamfile.jam", "lib a : a.cpp ;")
t.write("b/b.cpp", """\
void geek();
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() { geek(); }
""")
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a ;")
t.run_build_system(["-d2"], stderr=None)
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Check that <library> works for static linking.
t.write("b/jamfile.jam", "lib b : b.cpp : <library>../a//a ;")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a/<link>shared : <link>static ;")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Test that putting a library in sources of a searched library works.
t.write("jamfile.jam", """\
exe main : main.cpp png ;
lib png : z : <name>png ;
lib z : : <name>zzz ;
""")
t.run_build_system(["-a", "-d+2"], status=None, stderr=None)
# Try to find the "zzz" string either in response file (for Windows compilers),
# or in the standard output.
rsp = t.adjust_names("bin/$toolset/debug/main.exe.rsp")[0]
if os.path.exists(rsp) and ( string.find(open(rsp).read(), "zzz") != -1 ):
pass
elif string.find(t.stdout(), "zzz") != -1:
pass
else:
t.fail_test(1)
# Test main -> libb -> liba chain in the case where liba is a file and not a
# Boost.Build target.
t.rm(".")
t.write("jamroot.jam", "")
t.write("a/jamfile.jam", """\
lib a : a.cpp ;
install dist : a ;
""")
t.write("a/a.cpp", """\
#if defined(_WIN32)
__declspec(dllexport)
#endif
void a() {}
""")
t.run_build_system(subdir="a")
t.expect_addition("a/dist/a.dll")
if ( os.name == 'nt' or os.uname()[0].lower().startswith('cygwin') ) and \
BoostBuild.get_toolset() != 'gcc':
# This is a Windows import library -- we know the exact name.
file = "a/dist/a.lib"
else:
file = t.adjust_names("a/dist/a.dll")[0]
t.write("b/jamfile.jam", "lib b : b.cpp ../%s ;" % file)
t.write("b/b.cpp", """\
#if defined(_WIN32)
__declspec(dllimport)
#endif
void a();
#if defined(_WIN32)
__declspec(dllexport)
#endif
void b() { a(); }
""")
t.write("jamroot.jam", "exe main : main.cpp b//b ;")
t.write("main.cpp", """\
#if defined(_WIN32)
__declspec(dllimport)
#endif
void b();
int main() { b(); }
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.cleanup()
| gpl-2.0 |
18F/sqlalchemy-jsonapi | sqlalchemy_jsonapi.py | 2 | 13843 | """
SQLAlchemy-JSONAPI Serializer.
Colton J. Provias - cj@coltonprovias.com
http://github.com/coltonprovias/sqlalchemy-jsonapi
Licensed with MIT License
"""
from functools import wraps
from sqlalchemy.orm.base import MANYTOONE, ONETOMANY
def as_relationship(to_many=False, linked_key=None, link_key=None,
columns=[]):
"""
Turn a method into a pseudo-relationship for serialization.
Arguments:
- to_many: Whether the relationship is to-many or to-one.
- linked_key: The key used in the linked section of the serialized data
- link_key: The key used in the link section in the model's serialization
- columns: Columns tied to this relationship
"""
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
if to_many:
wrapped.direction = ONETOMANY
else:
wrapped.direction = MANYTOONE
wrapped.key = link_key or wrapped.__name__
wrapped.linked_key = linked_key or wrapped.key
wrapped.local_columns = columns
return wrapped
return wrapper
class JSONAPIMixin:
""" Mixin that enables serialization of a model. """
# Columns to be excluded from serialization
jsonapi_columns_exclude = []
# Extra columns to be included with serialization
jsonapi_columns_include = []
# Hook for overriding column data
jsonapi_columns_override = {}
# Relationships to be excluded from serialization
jsonapi_relationships_exclude = []
# Extra relationships to be included with serialization
jsonapi_relationships_include = []
# Hook for overriding relationships
jsonapi_relationships_override = {}
def id(self):
""" JSON API recommends having an id for each resource. """
raise NotImplemented
def jsonapi_can_view(self):
""" Return True if this model can be serialized. """
return True
class SkipType(object):
""" Used for skipping types during conversion. """
pass
class JSONAPI:
""" The main JSONAPI serializer class. """
# A dictionary of converters for serialization
converters = {}
def __init__(self, model):
"""
Create a serializer object.
Arguments:
- model: Should be a SQLAlchemy model class.
"""
self.model = model
def inflector(self, to_inflect):
"""
Format text for use in keys in serialization.
Override this if you need to meet requirements on your front-end.
Arguments:
- to_inflect: The string to be inflected
Returns the altered string.
"""
return to_inflect
def convert(self, item, to_convert):
"""
Convert from Python objects to JSON-friendly values.
Arguments:
- item: A SQLAlchemy model instance
- to_convert: Python object to be converted
Returns either a string, int, float, bool, or SkipType.
"""
if to_convert is None:
return None
if isinstance(to_convert, (str, int, float, bool)):
return to_convert
if callable(to_convert):
return to_convert(item)
if self.converters[type(to_convert).__name__] is not None:
converter = self.converters[type(to_convert).__name__]
return converter(to_convert)
return SkipType
def get_api_key(self, model):
"""
Generate a key for a model.
Arguments:
- model: SQLAlchemy model instance
Returns an inflected key that is generated from jsonapi_key or from
__tablename__.
"""
api_key = getattr(model, 'jsonapi_key', model.__tablename__)
return self.inflector(api_key)
def sort_query(self, model, query, sorts):
"""
Sort a query based upon provided sorts.
Arguments:
- model: SQLAlchemy model class
- query: Instance of Query or AppenderQuery
- sorts: A dictionary of sorts keyed by the api_key for each model
Returns a query with appropriate order_by appended.
"""
if sorts is None:
return query
api_key = self.get_api_key(model)
for sort in sorts[api_key]:
if sort.startswith('-'):
sort_by = getattr(model, sort[1:]).desc()
else:
sort_by = getattr(model, sort)
query = query.order_by(sort_by)
return query
def parse_include(self, include):
"""
Parse the include query parameter.
Arguments:
- include: A list of resources to be included by link_keys
Returns a dictionary of the parsed include list. A None value
signifies that the resource itself should be dumped.
"""
ret = {}
for item in include:
if '.' in item:
local, remote = item.split('.', maxsplit=1)
else:
local = item
remote = None
if local not in ret.keys():
ret[local] = []
ret[local].append(remote)
return ret
def dump_column_data(self, item, fields):
"""
Dump the data from the colums of a model instance.
Arguments:
- item: An SQLAlchemy model instance
- fields: A list of requested fields. If it is None, all available
fields will be returned.
Returns a dictionary representing the instance's data.
"""
obj = dict()
columns = list(item.__table__.columns)
column_data = dict()
api_key = self.get_api_key(item)
for column in columns:
if column.name in item.jsonapi_columns_exclude:
continue
column_data[column.name] = getattr(item, column.name)
for column in item.jsonapi_columns_include:
column_data[column] = getattr(item, column)
column_data.update(item.jsonapi_columns_override)
for name, value in column_data.items():
key = self.inflector(name)
if key != 'id' and fields is not None and \
api_key in fields.keys() and \
key not in fields[api_key]:
continue
converted = self.convert(item, value)
if converted != SkipType:
obj[key] = converted
return obj
def dump_relationship_data(self, item, obj, depth, fields, sort, include):
"""
Handle relationship dumping for a model.
Arguments:
- item: SQLAlchemy model instance
- obj: Column data for the model post-dump
- depth: How much deeper into the relationships do we have to go
captain?
- fields: A dictionary of fields to be parsed based on linked_keys.
- sort: A dictionary of fields to sort by
- include: A list of resources to be included by link_keys.
"""
relationships = dict(list(map((lambda x: (x.key, x)),
item.__mapper__.relationships)))
for key in item.jsonapi_relationships_exclude:
if key not in relationships.keys():
continue
del relationships[key]
for key in item.jsonapi_relationships_include:
relationships[key] = getattr(item, key)
for key, value in item.jsonapi_relationships_override:
relationships[key] = getattr(item, value)
if include is not None:
include = self.parse_include(include)
obj['links'] = {}
linked = {}
for key, relationship in relationships.items():
dump_this = True
link_key = self.inflector(key)
if hasattr(relationship, 'mapper'):
mapper = relationship.mapper.class_
linked_key = self.inflector(getattr(mapper, 'jsonapi_key',
mapper.__tablename__))
else:
linked_key = self.inflector(relationship.linked_key)
if relationship.direction == MANYTOONE:
for column in relationship.local_columns:
if isinstance(column, str):
col_name = self.inflector(column)
else:
col_name = self.inflector(column.name)
if col_name in obj.keys():
obj['links'][link_key] = self.convert(item,
obj[col_name])
del obj[col_name]
if include is not None:
if link_key not in include.keys():
continue
local_include = include[link_key]
if None in include[link_key]:
local_include.remove(None)
else:
dump_this = False
else:
local_include = None
if depth > 0 or (include is not None and
local_include is not None):
if callable(relationship):
related = relationship()
else:
related = getattr(item, relationship.key)
if relationship.direction == MANYTOONE:
if isinstance(related, JSONAPIMixin):
if not related.jsonapi_can_view():
continue
if dump_this and linked_key not in linked.keys():
linked[linked_key] = {}
r_obj, r_lnk = self.dump_object(related, depth - 1,
fields, sort,
local_include)
linked.update(r_lnk)
if dump_this:
linked[linked_key][str(r_obj['id'])] = r_obj
else:
if sort is not None and linked_key in sort.keys():
related = self.sort_query(mapper, related, sort)
if link_key not in obj['links'].keys():
obj['links'][link_key] = []
for local_item in list(related):
if not isinstance(local_item, JSONAPIMixin):
continue
if not local_item.jsonapi_can_view():
continue
if dump_this and linked_key not in linked.keys():
linked[linked_key] = {}
obj['links'][link_key].append(str(local_item.id))
r_obj, r_lnk = self.dump_object(local_item, depth - 1,
fields, sort,
local_include)
linked.update(r_lnk)
if dump_this:
linked[linked_key][str(r_obj['id'])] = r_obj
return obj, linked
def dump_object(self, item, depth, fields, sort, include):
"""
Quick, simple way of coordinating a dump.
Arguments:
- item: Instance of a SQLAlchemy model
- depth: Integer of how deep relationships should be queried
- fields: Dictionary of fields to be returned, keyed by linked_keys
- sort: Dictionary of fields to sory by, keyed by linked_keys
- include: List of resources to side-load by link_keys.
"""
obj = self.dump_column_data(item, fields)
return self.dump_relationship_data(item, obj, depth, fields, sort,
include)
def serialize(self, to_serialize, depth=1, fields=None, sort=None,
include=None):
"""
Perform the serialization to dictionary in JSON API format.
Arguments:
- to_serialize: The query, collection, or instance to serialize.
- depth: How deep to side-load relationships. If include is provided,
this will be overridden
- fields: Dictionary of fields to be returned keyed by linked_keys or
a list of fields for the current instance
- sort: Dictionary of fields to sort by keyed by linked_keys or a list
of fields to sort by for the current instance
- include: List of resources to side-load by link_keys.
"""
api_key = self.get_api_key(self.model)
to_return = {api_key: [], 'linked': {}, 'meta': {}}
linked = dict()
if isinstance(to_serialize, JSONAPIMixin):
is_single = True
to_serialize = [to_serialize]
else:
is_single = False
if isinstance(fields, list):
fields = {api_key: fields}
if isinstance(sort, list):
sort = {api_key: sort}
if not is_single:
to_serialize = self.sort_query(self.model, to_serialize, sort)
for item in to_serialize:
if not item.jsonapi_can_view():
continue
dumped = self.dump_object(item, depth, fields, sort, include)
if dumped is None:
continue
obj, new_linked = dumped
to_return[api_key].append(obj)
for key in new_linked.keys():
if key not in linked.keys():
linked[key] = dict()
linked[key].update(new_linked[key])
for key in linked.keys():
to_return['linked'][key] = list(linked[key].values())
if is_single:
to_return[api_key] = to_return[api_key][0]
return to_return
| mit |
Nexenta/cinder | cinder/volume/drivers/coprhd/iscsi.py | 6 | 6543 | # Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Driver for EMC CoprHD iSCSI volumes."""
from oslo_log import log as logging
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.coprhd import common as coprhd_common
LOG = logging.getLogger(__name__)
@interface.volumedriver
class EMCCoprHDISCSIDriver(driver.ISCSIDriver):
"""CoprHD iSCSI Driver."""
VERSION = "3.0.0.0"
# ThirdPartySystems wiki page name
CI_WIKI_NAME = "EMC_CoprHD_CI"
def __init__(self, *args, **kwargs):
super(EMCCoprHDISCSIDriver, self).__init__(*args, **kwargs)
self.common = self._get_common_driver()
def _get_common_driver(self):
return coprhd_common.EMCCoprHDDriverCommon(
protocol='iSCSI',
default_backend_name=self.__class__.__name__,
configuration=self.configuration)
def check_for_setup_error(self):
self.common.check_for_setup_error()
def create_volume(self, volume):
"""Creates a Volume."""
self.common.create_volume(volume, self)
self.common.set_volume_tags(volume, ['_obj_volume_type'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned Volume."""
self.common.create_cloned_volume(volume, src_vref)
self.common.set_volume_tags(volume, ['_obj_volume_type'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self.common.create_volume_from_snapshot(snapshot, volume)
self.common.set_volume_tags(volume, ['_obj_volume_type'])
def extend_volume(self, volume, new_size):
"""expands the size of the volume."""
self.common.expand_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes an volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.common.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector=None):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.common.delete_consistencygroup(context, group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates volumes in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(cgsnapshot, snapshots)
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info."""
initiator_ports = []
initiator_ports.append(connector['initiator'])
itls = self.common.initialize_connection(volume,
'iSCSI',
initiator_ports,
connector['host'])
properties = {}
properties['target_discovered'] = False
properties['volume_id'] = volume['id']
if itls:
properties['target_iqn'] = itls[0]['target']['port']
properties['target_portal'] = '%s:%s' % (
itls[0]['target']['ip_address'],
itls[0]['target']['tcp_port'])
properties['target_lun'] = itls[0]['hlu']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
LOG.debug("ISCSI properties: %s", properties)
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
init_ports = []
init_ports.append(connector['initiator'])
self.common.terminate_connection(volume,
'iSCSI',
init_ports,
connector['host'])
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from virtual pool/virtual array."""
LOG.debug("Updating volume stats")
self._stats = self.common.update_volume_stats()
def retype(self, ctxt, volume, new_type, diff, host):
"""Change the volume type."""
return self.common.retype(ctxt, volume, new_type, diff, host)
| apache-2.0 |
ojengwa/oh-mainline | vendor/packages/kombu/kombu/clocks.py | 35 | 4494 | """
kombu.clocks
============
Logical Clocks and Synchronization.
"""
from __future__ import absolute_import
from threading import Lock
from itertools import islice
from operator import itemgetter
from .five import zip
__all__ = ['LamportClock', 'timetuple']
R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})'
class timetuple(tuple):
"""Tuple of event clock information.
Can be used as part of a heap to keep events ordered.
:param clock: Event clock value.
:param timestamp: Event UNIX timestamp value.
:param id: Event host id (e.g. ``hostname:pid``).
:param obj: Optional obj to associate with this event.
"""
__slots__ = ()
def __new__(cls, clock, timestamp, id, obj=None):
return tuple.__new__(cls, (clock, timestamp, id, obj))
def __repr__(self):
return R_CLOCK.format(*self)
def __getnewargs__(self):
return tuple(self)
def __lt__(self, other):
# 0: clock 1: timestamp 3: process id
try:
A, B = self[0], other[0]
# uses logical clock value first
if A and B: # use logical clock if available
if A == B: # equal clocks use lower process id
return self[2] < other[2]
return A < B
return self[1] < other[1] # ... or use timestamp
except IndexError:
return NotImplemented
__gt__ = lambda self, other: other < self
__le__ = lambda self, other: not other < self
__ge__ = lambda self, other: not self < other
clock = property(itemgetter(0))
timestamp = property(itemgetter(1))
id = property(itemgetter(2))
obj = property(itemgetter(3))
class LamportClock(object):
"""Lamport's logical clock.
From Wikipedia:
A Lamport logical clock is a monotonically incrementing software counter
maintained in each process. It follows some simple rules:
* A process increments its counter before each event in that process;
* When a process sends a message, it includes its counter value with
the message;
* On receiving a message, the receiver process sets its counter to be
greater than the maximum of its own value and the received value
before it considers the message received.
Conceptually, this logical clock can be thought of as a clock that only
has meaning in relation to messages moving between processes. When a
process receives a message, it resynchronizes its logical clock with
the sender.
.. seealso::
* `Lamport timestamps`_
* `Lamports distributed mutex`_
.. _`Lamport Timestamps`: http://en.wikipedia.org/wiki/Lamport_timestamps
.. _`Lamports distributed mutex`: http://bit.ly/p99ybE
*Usage*
When sending a message use :meth:`forward` to increment the clock,
when receiving a message use :meth:`adjust` to sync with
the time stamp of the incoming message.
"""
#: The clocks current value.
value = 0
def __init__(self, initial_value=0, Lock=Lock):
self.value = initial_value
self.mutex = Lock()
def adjust(self, other):
with self.mutex:
value = self.value = max(self.value, other) + 1
return value
def forward(self):
with self.mutex:
self.value += 1
return self.value
def sort_heap(self, h):
"""List of tuples containing at least two elements, representing
an event, where the first element is the event's scalar clock value,
and the second element is the id of the process (usually
``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])``
The list must already be sorted, which is why we refer to it as a
heap.
The tuple will not be unpacked, so more than two elements can be
present.
Will return the latest event.
"""
if h[0][0] == h[1][0]:
same = []
for PN in zip(h, islice(h, 1, None)):
if PN[0][0] != PN[1][0]:
break # Prev and Next's clocks differ
same.append(PN[0])
# return first item sorted by process id
return sorted(same, key=lambda event: event[1])[0]
# clock values unique, return first item
return h[0]
def __str__(self):
return str(self.value)
def __repr__(self):
return '<LamportClock: {0.value}>'.format(self)
| agpl-3.0 |
mou4e/zirconium | third_party/closure_compiler/error_filter.py | 52 | 5197 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implement filtering out closure compiler errors due to incorrect type-
checking on promise-based return types.
The compiler's type-checker doesn't correctly unwrap Promised return values
prior to type-checking them. There are a couple of scenarios where this occurs,
examples can be found below in the code that deals with each specific scenario.
This filtering code applies a set of matchers to the errors that the compiler
emits. Each matcher fits a known pattern for compiler errors arising from the
issue described above. If any of the matchers matches an error, that error is
filtered out of the error list.
Note that this is just a coarse filter. It doesn't, for example, check that the
unwrapped promise type actually matches the type accepted by the next callback
in the Promise chain. Doing so would be the correct way to fix this problem,
but that fix belongs in the compiler.
"""
import re
class PromiseErrorFilter:
"""Runs checks to filter out promise chain errors."""
def __init__(self):
self._allowed_error_patterns = [
ChainedPromisePattern(),
ReturnedPromisePattern()
]
def filter(self, error_list):
"""Filters out errors matching any of the allowed patterns.
Args:
error_list: A list of errors from the closure compiler.
Return:
A list of errors, with spurious Promise type errors removed.
"""
return [error for error in error_list if not self._should_ignore(error)];
def _should_ignore(self, error):
"""Check the given error against all the filters. An error should be
ignored if it is a match for any of the allowed message patterns.
Args:
error: A single entry from the closure compiler error list.
Return:
True if the error should be ignored, False otherwise.
"""
return any([pattern.match(error)
for pattern in self._allowed_error_patterns]);
class ErrorPattern:
"""A matcher for compiler error messages. This matches compiler type errors,
which look like:
# ERROR - <some error message>
# found : <some type expression>
# required: <some type expression>
The message and type expressions are customizable.
"""
def __init__(self, msg, found_pattern, required_pattern):
# A string literal that is compared to the first line of the error.
self._error_msg = msg
# A regex for matching the found type.
self._found_line_regex = re.compile("found\s*:\s*" + found_pattern)
# A regex for matching the required type.
self._required_line_regex = re.compile("required:\s*" + required_pattern)
def match(self, error):
error_lines = error.split('\n')
# Match the error message to see if this pattern applies to the given error.
# If the error message matches, then compare the found and required lines.
if self._error_msg not in error_lines[0]:
return False
else:
return (self._found_line_regex.match(error_lines[1]) and
self._required_line_regex.match(error_lines[2]))
class ChainedPromisePattern(ErrorPattern):
"""Matcher for spurious errors arising from chained promises. Example code:
Promise.resolve()
.then(
/** @return {!Promise<string>} */
function() { return Promise.resolve('foo'); })
.then(
/** @param {string} s */
function(s) { console.log(s); });
The compiler will emit an error that looks like
ERROR - actual parameter 1 of Promise.prototype.then does not match formal
parameter
found : function (string): undefined
required: (function (Promise<string>): ?|null|undefined)
"""
def __init__(self):
# Matches the initial error message.
msg = ("ERROR - actual parameter 1 of Promise.prototype.then "
"does not match formal parameter")
# Examples:
# - function (string): Promise<string>
# - function ((SomeType|null)): SomeOtherType
found_pattern = "function\s*\(.*\):\s*.*"
# Examples:
# - (function(Promise<string>): ?|null|undefined)
required_pattern = "\(function\s*\(Promise<.*>\):\s*.*\)"
ErrorPattern.__init__(self, msg, found_pattern, required_pattern)
class ReturnedPromisePattern(ErrorPattern):
"""Matcher for spurious errors arising from Promised return values. Example
code:
/** @return {!Promise<string>} */
var getStringAsync = function() {
/** @return {!Promise<string>} */
var generateString = function() {return Promise.resolve('foo');};
return Promise.resolve().then(generateString);
};
The compiler will emit an error that looks like
ERROR - inconsistent return type
found : Promise<Promise<string>>
required: Promise<string>
"""
def __init__(self):
# Matches the initial error message.
msg = "ERROR - inconsistent return type"
# Example:
# - Promise<Promise<string>>
found_pattern = "Promise<Promise<[^<>]*>"
# Example:
# - Promise<string>
required_pattern = "Promise<[^<>]*>"
ErrorPattern.__init__(self, msg, found_pattern, required_pattern)
| bsd-3-clause |
benklaasen/namebench | nb_third_party/simplejson/ordered_dict.py | 1039 | 3370 | """Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| apache-2.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/pacbio-dextractor/package.py | 3 | 2299 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PacbioDextractor(MakefilePackage):
"""The Dextractor and Compression Command Library. This is a special
fork required by some pacbio utilities."""
homepage = "https://github.com/PacificBiosciences/DEXTRACTOR"
url = "https://github.com/PacificBiosciences/DEXTRACTOR"
version('2016-08-09',
git='https://github.com/PacificBiosciences/DEXTRACTOR.git',
commit='89726800346d0bed15d98dcc577f4c7733aab4b1')
depends_on('hdf5')
depends_on('gmake', type='build')
def edit(self, spec, prefix):
mkdirp(prefix.bin)
makefile = FileFilter('Makefile')
makefile.filter('PATH_HDF5\s*=\s*/sw/apps/hdf5/current',
'PATH_HDF5 = ' + spec['hdf5'].prefix)
makefile.filter('PATH_HDF5\*s=\s*/usr/local/hdf5', '')
makefile.filter('DEST_DIR\s*=\s*~/bin', 'DEST_DIR = ' + prefix.bin)
gmf = FileFilter('GNUmakefile')
gmf.filter('rsync\s*-av\s*\$\{ALL\}\s*\$\{PREFIX\}/bin',
'cp ${ALL} ' + prefix.bin)
| lgpl-2.1 |
dr-guangtou/KungPao | kungpao/isophote/helper.py | 1 | 5156 | """Helper functions for isophote analysis."""
import os
import platform
import subprocess
import numpy as np
from matplotlib.patches import Ellipse
import kungpao
__all__ = ['fits_to_pl', 'iraf_commands', 'fix_pa_profile', 'isophote_to_ellip',
'save_isophote_output', 'remove_index_from_output']
def fits_to_pl(ximage, fits, output=None, verbose=False):
"""Convert FITS image into the IRAF .pl format.
Parameters
----------
ximage: string
Location of the x_images.e executable file.
fits: string
Input FITS file name.
output: string, optional
Output .pl file name. Default: None.
If None, the file name will be "input.fits.pl".
verbose: bool, optional
Blah, Blah. Default: False.
"""
if not os.path.isfile(ximage) and not os.path.islink(ximage):
raise FileNotFoundError("Can not find x_images.e: {}".format(ximage))
if not os.path.isfile(fits):
raise FileNotFoundError("Can not find input FITS image: {}".format(fits))
if output is None:
# TODO: Need to test whether .fits.pl or .pl works.
output = fits.replace('.fits', '.fits.pl')
if os.path.isfile(output):
if verbose:
print("# Output file exists! Will remove {}".format(output))
os.remove(output)
imcopy = "{} imcopy input={} output={} verbose=no".format(
ximage, fits.strip(), output.strip()
)
os.system(imcopy)
return output
def iraf_commands():
"""Locate the exectuble files for IRAF functions.
Returns
-------
iraf: dict
Dictionary for the IRAF functions.
"""
if platform.system() == 'Darwin':
IRAF_DIR = os.path.join(
os.path.dirname(kungpao.__file__), 'iraf', 'macosx')
elif platform.system() == 'Linux':
IRAF_DIR = os.path.join(
os.path.dirname(kungpao.__file__), 'iraf', 'linux')
else:
raise ValueError(
'Wrong platform: only support MacOSX or Linux now')
return (os.path.join(IRAF_DIR, 'x_isophote.e'),
os.path.join(IRAF_DIR, 'x_ttools.e'),
os.path.join(IRAF_DIR, 'x_images.e'))
def fix_pa_profile(ellipse_output, pa_col='pa', delta_pa=75.0):
"""
Correct the position angle for large jump.
Parameters
----------
ellipse_output: astropy.table
Output table summarizing the result from `ellipse`.
pa_col: string, optional
Name of the position angle column. Default: pa
delta_pa: float, optional
Largest PA difference allowed for two adjacent radial bins. Default=75.
Return
------
ellipse_output with updated position angle column.
"""
pa = ellipse_output[pa_col]
for i in range(1, len(pa)):
if (pa[i] - pa[i - 1]) >= delta_pa:
pa[i] -= 180.0
elif pa[i] - pa[i - 1] <= (-1.0 * delta_pa):
pa[i] += 180.0
ellipse_output[pa_col] = pa
return ellipse_output
def isophote_to_ellip(ellipse_output, x_pad=0.0, y_pad=0.0):
"""
Convert ellipse results into ellipses for visualization.
Parameters
----------
ellipse_output: astropy.table
Output table summarizing the result from `ellipse`.
Return
------
ell_list: list
List of Matplotlib elliptical patches for making plot.
"""
x = ellipse_output['x0'] - x_pad
y = ellipse_output['y0'] - y_pad
pa = ellipse_output['pa']
a = ellipse_output['sma'] * 2.0
b = ellipse_output['sma'] * 2.0 * (1.0 - ellipse_output['ell'])
ell_list = [Ellipse(xy=np.array([x[i], y[i]]), width=np.array(b[i]),
height=np.array(a[i]), angle=np.array(pa[i]))
for i in range(x.shape[0])]
return ell_list
def save_isophote_output(ellip_output, prefix=None, ellip_config=None, location=''):
"""
Save the Ellipse output to file.
Parameters
----------
ellip_output: astropy.table
Output table for the isophote analysis.
ellip_config: dict
Configuration parameters for the isophote analysis.
prefix: string, optional
Prefix of the output file. Default: None
location: string, optional
Directory to keep the output.
Returns
-------
output_file: string
Name of the output numpy record.
"""
if prefix is None:
prefix = 'ellip_output'
output_file = os.path.join(location, prefix + ".npz")
# Save the output and configuration parameters in a 'npz'.
np.savez(output_file, output=ellip_output, config=ellip_config)
return output_file
def remove_index_from_output(output_tab, replace='NaN'):
"""
Remove the Indef values from the Ellipse output.
Parameters:
"""
if os.path.exists(output_tab):
subprocess.call(['sed', '-i_back', 's/INDEF/' + replace + '/g', output_tab])
# Remove the back-up file
if os.path.isfile(output_tab.replace('.tab', '_back.tab')):
os.remove(output_tab.replace('.tab', '_back.tab'))
else:
raise FileExistsError('Can not find the input catalog: {}'.format(output_tab))
return output_tab
| gpl-3.0 |
anupcshan/buddyfs | kad_server/buddynode.py | 1 | 2546 | """
This will be the fundamental part of the buddy daemon.
Starts a Kademlia node and implements functionality for Node ID verification.
"""
from entangled.kademlia.node import Node
from entangled.kademlia.datastore import SQLiteDataStore
import cPickle as pickle
import hashlib
import logging
import os
import settings
import time
import twisted
logger = logging.getLogger(__name__)
class BuddyNode(Node):
""" Kademlia node with a few helper functions for BuddyFS """
node = None
@classmethod
def get_node(cls, start_port, known_ip=None, known_port=None):
if BuddyNode.node is not None:
return BuddyNode.node
dbpath = settings.DBPATH + '/buddydht-%s.db' % start_port
datastore = SQLiteDataStore(dbFile=dbpath)
logger.info('Starting buddy-daemon on port %d', start_port)
BuddyNode.node = BuddyNode(None, start_port, datastore)
if known_ip is None or known_port is None:
BuddyNode.node.joinNetwork([])
else:
BuddyNode.node.joinNetwork([(known_ip, known_port)])
logger.debug('Bootstrap with node %s:%s', known_ip, known_port)
return BuddyNode.node
def __init__(self, nodeid, udpPort, dataStore, routingTable=None, networkProtocol=None):
if nodeid is None:
nodeid = self.get_node_id()
super(BuddyNode, self).__init__(nodeid, udpPort, dataStore, routingTable, networkProtocol)
logger.debug('Singleton node created')
BuddyNode.node = self
return
def get_node_id(self):
nodeid = ''
if os.path.isfile('.nodeid'):
logger.debug('NodeID file exists')
f = open('.nodeid', 'r')
x = f.read()
if x != '':
logger.debug('Reusing NodeID %s', x)
return x
" Create new node id and store it in .nodeid file "
file = open('.nodeid', 'w+')
nodeid = self._generateID()
logger.debug('New NodeID generated : %s', nodeid)
file.write(nodeid)
file.close()
return nodeid
def get_root(self, pubkey):
datastore = SQLiteDataStore(dbFile=settings.DBPATH + '/buddydht.db')
key = hashlib.sha1('root_' + pubkey).digest()
return self.iterativeFindValue(key)
def set_root(self, pubkey, root_inode):
datastore = SQLiteDataStore(dbFile=settings.DBPATH + '/buddydht.db')
key = hashlib.sha1('root_' + pubkey).digest()
self.iterativeStore(key, root_inode, self.get_node_id(), 0)
| mit |
byshen/pyspider | pyspider/libs/url.py | 68 | 3814 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2012-11-09 14:39:57
import mimetypes
import six
import shlex
from six.moves.urllib.parse import urlparse, urlunparse
from requests.models import RequestEncodingMixin
def get_content_type(filename):
"""Guessing file type by filename"""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
_encode_params = RequestEncodingMixin._encode_params
def _encode_multipart_formdata(fields, files):
body, content_type = RequestEncodingMixin._encode_files(files, fields)
return content_type, body
def _build_url(url, _params):
"""Build the actual URL to use."""
# Support for unicode domain names and paths.
scheme, netloc, path, params, query, fragment = urlparse(url)
netloc = netloc.encode('idna').decode('utf-8')
if not path:
path = '/'
if six.PY2:
if isinstance(scheme, six.text_type):
scheme = scheme.encode('utf-8')
if isinstance(netloc, six.text_type):
netloc = netloc.encode('utf-8')
if isinstance(path, six.text_type):
path = path.encode('utf-8')
if isinstance(params, six.text_type):
params = params.encode('utf-8')
if isinstance(query, six.text_type):
query = query.encode('utf-8')
if isinstance(fragment, six.text_type):
fragment = fragment.encode('utf-8')
enc_params = _encode_params(_params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = (urlunparse([scheme, netloc, path, params, query, fragment]))
return url
def quote_chinese(url, encodeing="utf-8"):
"""Quote non-ascii characters"""
if isinstance(url, six.text_type):
return quote_chinese(url.encode(encodeing))
if six.PY3:
res = [six.int2byte(b).decode('latin-1') if b < 128 else '%%%02X' % b for b in url]
else:
res = [b if ord(b) < 128 else '%%%02X' % ord(b) for b in url]
return "".join(res)
def curl_to_arguments(curl):
kwargs = {}
headers = {}
command = None
urls = []
current_opt = None
for part in shlex.split(curl):
if command is None:
# curl
command = part
elif not part.startswith('-') and not current_opt:
# waiting for url
urls.append(part)
elif current_opt is None and part.startswith('-'):
# flags
if part == '--compressed':
kwargs['use_gzip'] = True
else:
current_opt = part
else:
# option
if current_opt is None:
raise TypeError('Unknow curl argument: %s' % part)
elif current_opt in ('-H', '--header'):
key_value = part.split(':', 1)
if len(key_value) == 2:
key, value = key_value
headers[key.strip()] = value.strip()
elif current_opt in ('-d', '--data'):
kwargs['data'] = part
elif current_opt in ('--data-binary'):
if part[0] == '$':
part = part[1:]
kwargs['data'] = part
elif current_opt in ('-X', '--request'):
kwargs['method'] = part
else:
raise TypeError('Unknow curl option: %s' % current_opt)
current_opt = None
if not urls:
raise TypeError('curl: no URL specified!')
if current_opt:
raise TypeError('Unknow curl option: %s' % current_opt)
kwargs['urls'] = urls
if headers:
kwargs['headers'] = headers
return kwargs
| apache-2.0 |
CyanogenMod/android_kernel_samsung_msm8930-common | scripts/gcc-wrapper.py | 234 | 4095 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
"f_qdss.c:586",
"mipi_tc358764_dsi2lvds.c:746",
"dynamic_debug.h:75",
"hci_conn.c:407",
"f_qdss.c:740",
"mipi_novatek.c:569",
"swab.h:34",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
bigswitch/snac-nox | src/utilities/findtrailingcommas.py | 15 | 1265 | #!/usr/bin/env python
import os, re
trail_newline_re = re.compile('(.*,\s*[\}\]])', re.M)
slash_comment = re.compile('//.*$', re.M)
comment = re.compile('/\*.*?\*/', re.S)
regexps = re.compile(r'(?<!\\)/.*?(?<!\\)/', re.M)
string1 = re.compile(r"(?<!\\)'.*?(?<!\\)'", re.M)
string2 = re.compile(r'(?<!\\)".*?(?<!\\)"', re.M)
def main():
cwd = os.getcwd()
cwd_len = len(cwd)
for root, dirs, files in os.walk(cwd):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for file in files:
if file[-3:] != '.js':
continue
fn = os.path.join(root, file)
contents = open(fn).read()
contents = slash_comment.sub('', contents)
contents = comment.sub('', contents)
contents = regexps.sub('//', contents)
contents = string1.sub("''", contents)
contents = string2.sub('""', contents)
m = trail_newline_re.search(contents)
if trail_newline_re.search(contents):
for g in m.groups():
print '.%s: %r' % (fn[cwd_len:],g)
if __name__ == '__main__':
main()
| gpl-3.0 |
vebin/Wox | PythonHome/Lib/site-packages/requests/packages/urllib3/exceptions.py | 330 | 3364 | # urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class ConnectionError(HTTPError):
"Raised when a normal connection fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
| mit |
XuQiufeng/kernel_msm | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
analurandis/Tur | backend/venv/Lib/site-packages/Cheetah/Servlet.py | 16 | 1261 | #!/usr/bin/env python
'''
Provides an abstract Servlet baseclass for Cheetah's Template class
'''
import sys
import os.path
class Servlet(object):
"""
This class is an abstract baseclass for Cheetah.Template.Template.
"""
transaction = None
application = None
request = None
session = None
def respond(self, trans=None):
raise NotImplementedError("""\
couldn't find the template's main method. If you are using #extends
without #implements, try adding '#implements respond' to your template
definition.""")
def sleep(self, transaction):
super(Servlet, self).sleep(transaction)
self.session = None
self.request = None
self._request = None
self.response = None
self.transaction = None
def shutdown(self):
pass
def serverSidePath(self, path=None,
normpath=os.path.normpath,
abspath=os.path.abspath
):
if path:
return normpath(abspath(path.replace("\\", '/')))
elif hasattr(self, '_filePath') and self._filePath:
return normpath(abspath(self._filePath))
else:
return None
# vim: shiftwidth=4 tabstop=4 expandtab
| mit |
kosgroup/odoo | addons/report/tests/test_reports.py | 24 | 1241 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import odoo
import odoo.tests
_logger = logging.getLogger(__name__)
@odoo.tests.common.at_install(False)
@odoo.tests.common.post_install(True)
class TestReports(odoo.tests.TransactionCase):
def test_reports(self):
domain = [('report_type', 'like', 'qweb')]
for report in self.env['ir.actions.report.xml'].search(domain):
report_model = 'report.%s' % report.report_name
try:
self.env[report_model]
except KeyError:
# Only test the generic reports here
_logger.info("testing report %s", report.report_name)
report_model = self.env[report.model]
report_records = report_model.search([], limit=10)
if not report_records:
_logger.info("no record found skipping report %s", report.report_name)
if not report.multi:
report_records = report_records[:1]
# Test report generation
self.env['report'].get_html(report_records.ids, report.report_name)
else:
continue
| gpl-3.0 |
alshedivat/tensorflow | tensorflow/contrib/grid_rnn/__init__.py | 179 | 1060 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GridRNN cells
## This package provides classes for GridRNN
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.grid_rnn.python.ops.grid_rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
| apache-2.0 |
tzaffi/git-in-practice-repo | book/lib/python2.7/site-packages/django/core/management/commands/dbshell.py | 329 | 1243 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = ("Runs the command-line client for specified database, or the "
"default database if none is provided.")
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database onto which to '
'open a shell. Defaults to the "default" database.'),
)
requires_model_validation = False
def handle(self, **options):
connection = connections[options.get('database')]
try:
connection.client.runshell()
except OSError:
# Note that we're assuming OSError means that the client program
# isn't installed. There's a possibility OSError would be raised
# for some other reason, in which case this error message would be
# inaccurate. Still, this message catches the common case.
raise CommandError('You appear not to have the %r program installed or on your path.' % \
connection.client.executable_name)
| mit |
fernandog/Medusa | lib/shutil_custom/__init__.py | 36 | 1355 | import os
import platform
import stat
try:
from shutil import SpecialFileError, Error
except:
from shutil import Error
from shutil import _samefile
def copyfile_custom(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
try:
raise SpecialFileError("`%s` is a named pipe" % fn)
except NameError:
raise Error("`%s` is a named pipe" % fn)
try:
# Windows
O_BINARY = os.O_BINARY
except:
O_BINARY = 0
READ_FLAGS = os.O_RDONLY | O_BINARY
WRITE_FLAGS = os.O_WRONLY | os.O_CREAT | os.O_TRUNC | O_BINARY
BUFFER_SIZE = 128*1024
try:
fin = os.open(src, READ_FLAGS)
fout = os.open(dst, WRITE_FLAGS)
for x in iter(lambda: os.read(fin, BUFFER_SIZE), ""):
os.write(fout, x)
except Exception as e:
raise
finally:
try:
os.close(fin)
os.close(fout)
except:
pass
| gpl-3.0 |
Qwertyon666/Necro | tools/mapmerge/map_conflict_fixer.py | 61 | 4423 | import map_helpers
import sys
import os
import time
def main(relative_root):
git_version = map_helpers.run_shell_command("git version")
if not git_version:
print("ERROR: Failed to run git. Make sure it is installed and in your PATH.")
return False
print("--- DISCLAIMER ---")
print("This script is in a testing phase. Verify all the results yourself to make sure you got what you expected. Make sure to read the readme to learn how to use this.")
input("Press Enter to GO\n")
file_conflicts = map_helpers.run_shell_command("git diff --name-only --diff-filter=U").split("\n")
map_conflicts = [path for path in file_conflicts if path[len(path)-3::] == "dmm"]
for i in range(0, len(map_conflicts)):
print("[{}]: {}".format(i, map_conflicts[i]))
selection = input("Choose maps you want to fix (example: 1,3-5,12):\n")
selection = selection.replace(" ", "")
selection = selection.split(",")
#shamelessly copied from mapmerger cli
valid_indices = list()
for m in selection:
index_range = m.split("-")
if len(index_range) == 1:
index = map_helpers.string_to_num(index_range[0])
if index >= 0 and index < len(map_conflicts):
valid_indices.append(index)
elif len(index_range) == 2:
index0 = map_helpers.string_to_num(index_range[0])
index1 = map_helpers.string_to_num(index_range[1])
if index0 >= 0 and index0 <= index1 and index1 < len(map_conflicts):
valid_indices.extend(range(index0, index1 + 1))
if not len(valid_indices):
print("No map selected, exiting.")
sys.exit()
print("Attempting to fix the following maps:")
for i in valid_indices:
print(map_conflicts[i])
marker = None
priority = 0
print("\nFixing modes:")
print("[{}]: Dictionary conflict fixing mode".format(map_helpers.MAP_FIX_DICTIONARY))
print("[{}]: Full map conflict fixing mode".format(map_helpers.MAP_FIX_FULL))
mode = map_helpers.string_to_num(input("Select fixing mode [Dictionary]: "))
if mode != map_helpers.MAP_FIX_FULL:
mode = map_helpers.MAP_FIX_DICTIONARY
print("DICTIONARY mode selected.")
else:
marker = input("FULL mode selected. Input a marker [/obj/effect/debugging/marker]: ")
if not marker:
marker = "/obj/effect/debugging/marker"
print("Marker selected: {}".format(marker))
print("\nVersion priorities:")
print("[{}]: Your version".format(map_helpers.MAP_FIX_PRIORITY_OURS))
print("[{}]: Their version".format(map_helpers.MAP_FIX_PRIORITY_THEIRS))
priority = map_helpers.string_to_num(input("Select priority [Yours]: "))
if priority != map_helpers.MAP_FIX_PRIORITY_THEIRS:
priority = map_helpers.MAP_FIX_PRIORITY_OURS
print("Your version will be prioritized.")
else:
print("Their version will be prioritized.")
ed = "FIXED" if mode == map_helpers.MAP_FIX_DICTIONARY else "MARKED"
ing = "FIXING" if mode == map_helpers.MAP_FIX_DICTIONARY else "MARKING"
print("\nMaps will be converted to TGM.")
print("Writing maps to 'file_path/file_name.fixed.dmm'. Please verify the results before commiting.")
if mode == map_helpers.MAP_FIX_FULL:
print("After editing the marked maps, run them through the map merger!")
input("Press Enter to start.")
print(".")
time.sleep(0.3)
print(".")
for i in valid_indices:
path = map_conflicts[i]
print("{}: {}".format(ing, path))
ours_map_raw_text = map_helpers.run_shell_command("git show HEAD:{}".format(path))
theirs_map_raw_text = map_helpers.run_shell_command("git show MERGE_HEAD:{}".format(path))
common_ancestor_hash = map_helpers.run_shell_command("git merge-base HEAD MERGE_HEAD").strip()
base_map_raw_text = map_helpers.run_shell_command("git show {}:{}".format(common_ancestor_hash, path))
ours_map = map_helpers.parse_map(ours_map_raw_text)
theirs_map = map_helpers.parse_map(theirs_map_raw_text)
base_map = map_helpers.parse_map(base_map_raw_text)
if map_helpers.fix_map_git_conflicts(base_map, ours_map, theirs_map, mode, marker, priority, relative_root+path):
print("{}: {}".format(ed, path))
print(".")
main(sys.argv[1])
| agpl-3.0 |
sdecoder/CMDS-HDFS | common/build/contrib/hod/hodlib/Common/miniHTMLParser.py | 182 | 1402 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import urllib, urlparse, re
from HTMLParser import HTMLParser
class miniHTMLParser( HTMLParser ):
viewedQueue = []
instQueue = []
def setBaseUrl(self, url):
self.baseUrl = url
def getNextLink( self ):
if self.instQueue == []:
return None
else:
return self.instQueue.pop(0)
def handle_starttag( self, tag, attrs ):
if tag == 'a':
newstr = urlparse.urljoin(self.baseUrl, str(attrs[0][1]))
if re.search('mailto', newstr) != None:
return
if (newstr in self.viewedQueue) == False:
self.instQueue.append( newstr )
self.viewedQueue.append( newstr )
| apache-2.0 |
niteoweb/libcloud | libcloud/compute/drivers/ciscoccs.py | 11 | 2001 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cisco CCS Driver
"""
from libcloud.compute.providers import Provider
from libcloud.common.dimensiondata import (DimensionDataConnection,
API_ENDPOINTS)
from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver
DEFAULT_REGION = 'cisco-na'
class CiscoCCSNodeDriver(DimensionDataNodeDriver):
"""
Cisco CCS node driver, based on Dimension Data driver
"""
selected_region = None
connectionCls = DimensionDataConnection
name = 'CiscoCCS'
website = 'http://www.cisco.com/'
type = Provider.CISCOCCS
features = {'create_node': ['password']}
api_version = 1.0
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=DEFAULT_REGION, **kwargs):
if region not in API_ENDPOINTS:
raise ValueError('Invalid region: %s' % (region))
self.selected_region = API_ENDPOINTS[region]
super(CiscoCCSNodeDriver, self).__init__(
key=key,
secret=secret,
secure=secure,
host=host,
port=port,
api_version=api_version,
region=region,
**kwargs)
| apache-2.0 |
zmike/servo | tests/wpt/web-platform-tests/mixed-content/generic/expect.py | 95 | 4179 | import json, os, urllib, urlparse
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def create_redirect_url(request, swap_scheme = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
scheme = parsed.scheme
if swap_scheme:
scheme = "http" if parsed.scheme == "https" else "https"
hostname = parsed.netloc.split(':')[0]
port = request.server.config["ports"][scheme][0]
destination_netloc = ":".join([hostname, str(port)])
# Remove "redirection" from query to avoid redirect loops.
parsed_query = dict(urlparse.parse_qsl(parsed.query))
assert "redirection" in parsed_query
del parsed_query["redirection"]
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = scheme,
netloc = destination_netloc,
path = parsed.path,
query = urllib.urlencode(parsed_query),
fragment = None))
return destination_url
def main(request, response):
if "redirection" in request.GET:
redirection = request.GET["redirection"]
if redirection == "no-redirect":
pass
elif redirection == "keep-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=False), response)
return
elif redirection == "swap-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=True), response)
return
else:
raise ValueError ("Invalid redirect type: %s" % redirection)
content_type = "text/plain"
response_data = ""
if "action" in request.GET:
action = request.GET["action"]
if "content_type" in request.GET:
content_type = request.GET["content_type"]
key = request.GET["key"]
stash = request.server.stash
path = request.GET.get("path", request.url.split('?'))[0]
if action == "put":
value = request.GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
if content_type == "image/png":
response_data = open(os.path.join(request.doc_root,
"images",
"smiley.png"), "rb").read()
elif content_type == "audio/mpeg":
response_data = open(os.path.join(request.doc_root,
"media",
"sound_5.oga"), "rb").read()
elif content_type == "video/mp4":
response_data = open(os.path.join(request.doc_root,
"media",
"movie_5.mp4"), "rb").read()
elif content_type == "application/javascript":
response_data = open(os.path.join(request.doc_root,
"mixed-content",
"generic",
"worker.js"), "rb").read()
else:
response_data = "/* purged */"
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", "no-cache; must-revalidate")
response.writer.end_headers()
response.writer.write(response_data)
| mpl-2.0 |
HuaweiSwitch/ansible | lib/ansible/plugins/strategy/__init__.py | 4 | 43473 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_text
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
from ansible.template import Templar
from ansible.utils.vars import combine_vars
from ansible.vars.manager import strip_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['StrategyBase']
class StrategySentinel:
pass
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.action_loader = action_loader
self.connection_loader = connection_loader
self.filter_loader = filter_loader
self.test_loader = test_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
_sentinel = StrategySentinel()
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
else:
strategy._results_lock.acquire()
strategy._results.append(result)
strategy._results_lock.release()
except (IOError, EOFError):
break
except Queue.Empty:
pass
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm._notified_handlers
self._listening_handlers = tqm._listening_handlers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = getattr(tqm._options, 'step', False)
self._diff = getattr(tqm._options, 'diff', False)
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
def cleanup(self):
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be ITERATING_COMPLETE by
# this point, though the strategy may not advance the hosts itself.
[iterator.get_next_task_for_host(host) for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts)
if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = [h.name for h in self.get_hosts_remaining(play)]
vars['ansible_failed_hosts'] = [h.name for h in self.get_failed_hosts(play)]
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# and then queue the new task
try:
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
queued = False
starting_worker = self._cur_worker
while True:
(worker_prc, rslt_q) = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
self._workers[self._cur_worker][0] = worker_prc
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
else:
host_list = [task_host]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_host', None)
if host_name is not None:
actual_host = self._inventory.get_host(host_name)
if actual_host is None:
actual_host = Host(name=host_name)
else:
actual_host = Host(name=task.delegate_to)
return [actual_host]
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
def get_original_host(host_name):
#FIXME: this should not need x2 _inventory
host_name = to_text(host_name)
if host_name in self._inventory.hosts:
return self._inventory.hosts[host_name]
else:
return self._inventory.get_host(host_name)
def search_handler_blocks_by_name(handler_name, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_task.name:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler_task)
templar = Templar(loader=self._loader, variables=handler_vars)
try:
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
target_handler_name = templar.template(handler_task.name)
if target_handler_name == handler_name:
return handler_task
else:
target_handler_name = templar.template(handler_task.get_name())
if target_handler_name == handler_name:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable):
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_uuid == handler_task._uuid:
return handler_task
return None
def parent_handler_match(target_handler, handler_name):
if target_handler:
if isinstance(target_handler, (TaskInclude, IncludeRole)):
try:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=target_handler)
templar = Templar(loader=self._loader, variables=handler_vars)
target_handler_name = templar.template(target_handler.name)
if target_handler_name == handler_name:
return True
else:
target_handler_name = templar.template(target_handler.get_name())
if target_handler_name == handler_name:
return True
except (UndefinedError, AnsibleUndefinedVariable):
pass
return parent_handler_match(target_handler._parent, handler_name)
else:
return False
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.pop()
except IndexError:
break
finally:
self._results_lock.release()
# get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
original_host = get_original_host(task_result._host)
found_task = iterator.get_original_task(original_host, task_result._task)
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._host = original_host
task_result._task = original_task
# get the correct loop var for use later
if original_task.loop_control:
loop_var = original_task.loop_control.loop_var or 'item'
else:
loop_var = 'item'
# send callbacks for 'non final' results
if '_ansible_retry' in task_result._result:
self._tqm.send_callback('v2_runner_retry', task_result)
continue
elif '_ansible_item_result' in task_result._result:
if task_result.is_failed() or task_result.is_unreachable():
self._tqm.send_callback('v2_runner_item_on_failed', task_result)
elif task_result.is_skipped():
self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
else:
if 'diff' in task_result._result:
if self._diff:
self._tqm.send_callback('v2_on_file_diff', task_result)
self._tqm.send_callback('v2_runner_item_on_ok', task_result)
continue
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(task_result._result)
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
state, _ = iterator.get_next_task_for_host(h, peek=True)
iterator.mark_host_failed(h)
state, new_task = iterator.get_next_task_for_host(h, peek=True)
else:
iterator.mark_host_failed(original_host)
# increment the failed count for this host
self._tqm._stats.increment('failures', original_host.name)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
if state and state.run_state == iterator.ITERATING_RESCUE:
self._variable_manager.set_nonpersistent_facts(
original_host,
dict(
ansible_failed_task=original_task.serialize(),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [ task_result._result ]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
# FIXME: should this be a callback?
display.vv("NOTIFIED HANDLER %s" % (handler_name,))
else:
# As there may be more than one handler with the notified name as the
# parent, so we just keep track of whether or not we found one at all
for target_handler_uuid in self._notified_handlers:
target_handler = search_handler_blocks_by_uuid(target_handler_uuid, iterator._play.handlers)
if target_handler and parent_handler_match(target_handler, handler_name):
found = True
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),))
if handler_name in self._listening_handlers:
for listening_handler_uuid in self._listening_handlers[handler_name]:
listening_handler = search_handler_blocks_by_uuid(listening_handler_uuid, iterator._play.handlers)
if listening_handler is not None:
found = True
else:
continue
if original_host not in self._notified_handlers[listening_handler._uuid]:
self._notified_handlers[listening_handler._uuid].append(original_host)
display.vv("NOTIFIED HANDLER %s" % (listening_handler.get_name(),))
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, iterator)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
if 'ansible_facts' in result_item:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action == 'include_vars':
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
for target_host in host_list:
if original_task.action == 'set_fact':
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
else:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff:
self._tqm.send_callback('v2_on_file_diff', task_result)
if original_task.action not in ['include', 'include_role']:
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: #TODO: and original_task.action != 'include_role':?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]):
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass+1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, iterator):
'''
Helper function to add a new host to inventory based on a task result.
'''
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if not host_name in self._inventory.hosts:
self._inventory.add_host(host_name, 'all')
new_host = self._inventory.hosts.get(host_name)
# Set/update the vars for this host
new_host.vars = combine_vars(new_host.get_vars(), host_info.get('host_vars', dict()))
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self._inventory.groups:
self._inventory.add_group(group_name)
new_group = self._inventory.groups[group_name]
new_group.add_host(self._inventory.hosts[host_name])
# clear pattern caching completely since it's unpredictable what patterns may have referenced the group
self._inventory.clear_pattern_cache()
# reconcile inventory, ensures inventory rules are followed
self._inventory.reconcile_inventory()
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.hosts[host.name]
group_name = result_item.get('add_group')
if group_name not in self._inventory.groups:
# create the new group and add it to inventory
self._inventory.add_group(group_name)
changed = True
group = self._inventory.groups[group_name]
if real_host.name not in group.get_hosts():
group.add_host(real_host)
changed = True
if group_name not in host.get_groups():
real_host.add_group(group)
changed = True
if changed:
self._inventory.clear_pattern_cache()
self._inventory.reconcile_inventory()
return changed
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = included_file._task.copy()
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._args)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file._task.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
included_file._task.tags = tags
ti_copy.vars = temp_vars
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=None,
task_include=ti_copy,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleError as e:
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_text(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler._uuid in self._notified_handlers and len(self._notified_handlers[handler._uuid]):
result = self._do_handler_run(handler, handler.get_name(), iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
saved_name = handler.name
handler.name = handler_name
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
handler.name = saved_name
if notified_hosts is None:
notified_hosts = self._notified_handlers[handler._uuid]
run_once = False
try:
action = action_loader.get(handler.action, class_only=True)
if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not handler.has_triggered(host) and (not iterator.is_failed(host) or play_context.force_handlers):
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler)
self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
if run_once:
break
# collect the results from the handler run
host_results = self._wait_on_pending_results(iterator)
try:
included_files = IncludedFile.process_include_results(
host_results,
self._tqm,
iterator=iterator,
inventory=self._inventory,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError as e:
return False
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
iterator.cache_block_tasks(block)
for task in block.block:
result = self._do_handler_run(
handler=task,
handler_name=None,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(str(e))
continue
# wipe the notification list
self._notified_handlers[handler._uuid] = []
display.debug("done running handlers, result is: %s" % result)
return result
def _take_step(self, task, host=None):
ret=False
msg=u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y','yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
# FIXME(s):
# * raise an error or show a warning when a conditional is used
# on a meta task that doesn't support them
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
msg="noop"
elif meta_action == 'flush_handlers':
self.run_handlers(iterator, play_context)
msg = "ran handlers"
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._variable_manager.clear_facts(host)
msg = "facts cleared"
else:
skipped = True
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
msg="cleared host errors"
else:
skipped = True
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if not host.name in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg="ending play"
elif meta_action == 'reset_connection':
connection = connection_loader.get(play_context.connection, play_context, os.devnull)
if connection:
connection.reset()
msg= 'reset connection'
else:
msg= 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = { 'msg': msg }
if skipped:
result['skipped'] = True
else:
result['changed'] = False
display.vv("META: %s" % msg)
return [TaskResult(target_host, task, result)]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._inventory.get_hosts(iterator._play.hosts, order=iterator._play.order):
if host.name not in self._tqm._unreachable_hosts:
hosts_left.append(host)
return hosts_left
| gpl-3.0 |
tombtc/dashman | lib/ecdsa/keys.py | 28 | 12416 | import binascii
from . import ecdsa
from . import der
from . import rfc6979
from .curves import NIST192p, find_curve
from .util import string_to_number, number_to_string, randrange
from .util import sigencode_string, sigdecode_string
from .util import oid_ecPublicKey, encoded_oid_ecPublicKey
from .six import PY3, b
from hashlib import sha1
class BadSignatureError(Exception):
pass
class BadDigestError(Exception):
pass
class VerifyingKey:
def __init__(self, _error__please_use_generate=None):
if not _error__please_use_generate:
raise TypeError("Please use SigningKey.generate() to construct me")
@classmethod
def from_public_point(klass, point, curve=NIST192p, hashfunc=sha1):
self = klass(_error__please_use_generate=True)
self.curve = curve
self.default_hashfunc = hashfunc
self.pubkey = ecdsa.Public_key(curve.generator, point)
self.pubkey.order = curve.order
return self
@classmethod
def from_string(klass, string, curve=NIST192p, hashfunc=sha1,
validate_point=True):
order = curve.order
assert len(string) == curve.verifying_key_length, \
(len(string), curve.verifying_key_length)
xs = string[:curve.baselen]
ys = string[curve.baselen:]
assert len(xs) == curve.baselen, (len(xs), curve.baselen)
assert len(ys) == curve.baselen, (len(ys), curve.baselen)
x = string_to_number(xs)
y = string_to_number(ys)
if validate_point:
assert ecdsa.point_is_valid(curve.generator, x, y)
from . import ellipticcurve
point = ellipticcurve.Point(curve.curve, x, y, order)
return klass.from_public_point(point, curve, hashfunc)
@classmethod
def from_pem(klass, string):
return klass.from_der(der.unpem(string))
@classmethod
def from_der(klass, string):
# [[oid_ecPublicKey,oid_curve], point_str_bitstring]
s1,empty = der.remove_sequence(string)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER pubkey: %s" %
binascii.hexlify(empty))
s2,point_str_bitstring = der.remove_sequence(s1)
# s2 = oid_ecPublicKey,oid_curve
oid_pk, rest = der.remove_object(s2)
oid_curve, empty = der.remove_object(rest)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER pubkey objects: %s" %
binascii.hexlify(empty))
assert oid_pk == oid_ecPublicKey, (oid_pk, oid_ecPublicKey)
curve = find_curve(oid_curve)
point_str, empty = der.remove_bitstring(point_str_bitstring)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after pubkey pointstring: %s" %
binascii.hexlify(empty))
assert point_str.startswith(b("\x00\x04"))
return klass.from_string(point_str[2:], curve)
def to_string(self):
# VerifyingKey.from_string(vk.to_string()) == vk as long as the
# curves are the same: the curve itself is not included in the
# serialized form
order = self.pubkey.order
x_str = number_to_string(self.pubkey.point.x(), order)
y_str = number_to_string(self.pubkey.point.y(), order)
return x_str + y_str
def to_pem(self):
return der.topem(self.to_der(), "PUBLIC KEY")
def to_der(self):
order = self.pubkey.order
x_str = number_to_string(self.pubkey.point.x(), order)
y_str = number_to_string(self.pubkey.point.y(), order)
point_str = b("\x00\x04") + x_str + y_str
return der.encode_sequence(der.encode_sequence(encoded_oid_ecPublicKey,
self.curve.encoded_oid),
der.encode_bitstring(point_str))
def verify(self, signature, data, hashfunc=None, sigdecode=sigdecode_string):
hashfunc = hashfunc or self.default_hashfunc
digest = hashfunc(data).digest()
return self.verify_digest(signature, digest, sigdecode)
def verify_digest(self, signature, digest, sigdecode=sigdecode_string):
if len(digest) > self.curve.baselen:
raise BadDigestError("this curve (%s) is too short "
"for your digest (%d)" % (self.curve.name,
8*len(digest)))
number = string_to_number(digest)
r, s = sigdecode(signature, self.pubkey.order)
sig = ecdsa.Signature(r, s)
if self.pubkey.verifies(number, sig):
return True
raise BadSignatureError
class SigningKey:
def __init__(self, _error__please_use_generate=None):
if not _error__please_use_generate:
raise TypeError("Please use SigningKey.generate() to construct me")
@classmethod
def generate(klass, curve=NIST192p, entropy=None, hashfunc=sha1):
secexp = randrange(curve.order, entropy)
return klass.from_secret_exponent(secexp, curve, hashfunc)
# to create a signing key from a short (arbitrary-length) seed, convert
# that seed into an integer with something like
# secexp=util.randrange_from_seed__X(seed, curve.order), and then pass
# that integer into SigningKey.from_secret_exponent(secexp, curve)
@classmethod
def from_secret_exponent(klass, secexp, curve=NIST192p, hashfunc=sha1):
self = klass(_error__please_use_generate=True)
self.curve = curve
self.default_hashfunc = hashfunc
self.baselen = curve.baselen
n = curve.order
assert 1 <= secexp < n
pubkey_point = curve.generator*secexp
pubkey = ecdsa.Public_key(curve.generator, pubkey_point)
pubkey.order = n
self.verifying_key = VerifyingKey.from_public_point(pubkey_point, curve,
hashfunc)
self.privkey = ecdsa.Private_key(pubkey, secexp)
self.privkey.order = n
return self
@classmethod
def from_string(klass, string, curve=NIST192p, hashfunc=sha1):
assert len(string) == curve.baselen, (len(string), curve.baselen)
secexp = string_to_number(string)
return klass.from_secret_exponent(secexp, curve, hashfunc)
@classmethod
def from_pem(klass, string, hashfunc=sha1):
# the privkey pem file has two sections: "EC PARAMETERS" and "EC
# PRIVATE KEY". The first is redundant.
if PY3 and isinstance(string, str):
string = string.encode()
privkey_pem = string[string.index(b("-----BEGIN EC PRIVATE KEY-----")):]
return klass.from_der(der.unpem(privkey_pem), hashfunc)
@classmethod
def from_der(klass, string, hashfunc=sha1):
# SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1),
# cont[1],bitstring])
s, empty = der.remove_sequence(string)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER privkey: %s" %
binascii.hexlify(empty))
one, s = der.remove_integer(s)
if one != 1:
raise der.UnexpectedDER("expected '1' at start of DER privkey,"
" got %d" % one)
privkey_str, s = der.remove_octet_string(s)
tag, curve_oid_str, s = der.remove_constructed(s)
if tag != 0:
raise der.UnexpectedDER("expected tag 0 in DER privkey,"
" got %d" % tag)
curve_oid, empty = der.remove_object(curve_oid_str)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER privkey "
"curve_oid: %s" % binascii.hexlify(empty))
curve = find_curve(curve_oid)
# we don't actually care about the following fields
#
#tag, pubkey_bitstring, s = der.remove_constructed(s)
#if tag != 1:
# raise der.UnexpectedDER("expected tag 1 in DER privkey, got %d"
# % tag)
#pubkey_str = der.remove_bitstring(pubkey_bitstring)
#if empty != "":
# raise der.UnexpectedDER("trailing junk after DER privkey "
# "pubkeystr: %s" % binascii.hexlify(empty))
# our from_string method likes fixed-length privkey strings
if len(privkey_str) < curve.baselen:
privkey_str = b("\x00")*(curve.baselen-len(privkey_str)) + privkey_str
return klass.from_string(privkey_str, curve, hashfunc)
def to_string(self):
secexp = self.privkey.secret_multiplier
s = number_to_string(secexp, self.privkey.order)
return s
def to_pem(self):
# TODO: "BEGIN ECPARAMETERS"
return der.topem(self.to_der(), "EC PRIVATE KEY")
def to_der(self):
# SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1),
# cont[1],bitstring])
encoded_vk = b("\x00\x04") + self.get_verifying_key().to_string()
return der.encode_sequence(der.encode_integer(1),
der.encode_octet_string(self.to_string()),
der.encode_constructed(0, self.curve.encoded_oid),
der.encode_constructed(1, der.encode_bitstring(encoded_vk)),
)
def get_verifying_key(self):
return self.verifying_key
def sign_deterministic(self, data, hashfunc=None, sigencode=sigencode_string):
hashfunc = hashfunc or self.default_hashfunc
digest = hashfunc(data).digest()
return self.sign_digest_deterministic(digest, hashfunc=hashfunc, sigencode=sigencode)
def sign_digest_deterministic(self, digest, hashfunc=None, sigencode=sigencode_string):
"""
Calculates 'k' from data itself, removing the need for strong
random generator and producing deterministic (reproducible) signatures.
See RFC 6979 for more details.
"""
secexp = self.privkey.secret_multiplier
k = rfc6979.generate_k(
self.curve.generator.order(), secexp, hashfunc, digest)
return self.sign_digest(digest, sigencode=sigencode, k=k)
def sign(self, data, entropy=None, hashfunc=None, sigencode=sigencode_string, k=None):
"""
hashfunc= should behave like hashlib.sha1 . The output length of the
hash (in bytes) must not be longer than the length of the curve order
(rounded up to the nearest byte), so using SHA256 with nist256p is
ok, but SHA256 with nist192p is not. (In the 2**-96ish unlikely event
of a hash output larger than the curve order, the hash will
effectively be wrapped mod n).
Use hashfunc=hashlib.sha1 to match openssl's -ecdsa-with-SHA1 mode,
or hashfunc=hashlib.sha256 for openssl-1.0.0's -ecdsa-with-SHA256.
"""
hashfunc = hashfunc or self.default_hashfunc
h = hashfunc(data).digest()
return self.sign_digest(h, entropy, sigencode, k)
def sign_digest(self, digest, entropy=None, sigencode=sigencode_string, k=None):
if len(digest) > self.curve.baselen:
raise BadDigestError("this curve (%s) is too short "
"for your digest (%d)" % (self.curve.name,
8*len(digest)))
number = string_to_number(digest)
r, s = self.sign_number(number, entropy, k)
return sigencode(r, s, self.privkey.order)
def sign_number(self, number, entropy=None, k=None):
# returns a pair of numbers
order = self.privkey.order
# privkey.sign() may raise RuntimeError in the amazingly unlikely
# (2**-192) event that r=0 or s=0, because that would leak the key.
# We could re-try with a different 'k', but we couldn't test that
# code, so I choose to allow the signature to fail instead.
# If k is set, it is used directly. In other cases
# it is generated using entropy function
if k is not None:
_k = k
else:
_k = randrange(order, entropy)
assert 1 <= _k < order
sig = self.privkey.sign(number, _k)
return sig.r, sig.s
| mit |
40223125/40223125-2 | static/Brython3.1.1-20150328-091302/Lib/test/regrtest.py | 718 | 65317 | #! /usr/bin/python3.3
"""
Usage:
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
--timeout TIMEOUT
-- dump the traceback and exit if a test takes more
than TIMEOUT seconds; disabled if TIMEOUT is negative
or equals to zero
--wait -- wait for user input, e.g., allow a debugger to be attached
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- display test output on failure
-d/--debug -- print traceback for failed tests
-q/--quiet -- no output unless one or more tests fail
-o/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-m/--match PAT -- match test cases and methods with glob pattern PAT
-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
--testdir DIR
-- execute test files in the specified directory (instead
of the Python stdlib test suite)
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-n/--nowindows -- suppress error message boxes on Windows
-F/--forever -- run the specified tests in a loop, until an error happens
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import faulthandler
import getopt
import io
import json
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=0, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsoS:rf:lu:t:TD:NLR:FdwWM:nj:Gm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'coverage', 'slaveargs=', 'forever', 'debug',
'start=', 'nowindows', 'header', 'testdir=', 'timeout=', 'wait',
'failfast', 'match=', 'next='])
except getopt.error as msg:
usage(msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
debug = False
start = None
timeout = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-G', '--failfast'):
failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = 1
elif o == '--next':
single = int(a)
elif o in ('-o', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-m', '--match'):
match_tests = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
# CWD is replaced with a temporary dir before calling main(), so we
# need join it with the saved CWD so it goes where the user expects.
coverdir = os.path.join(support.SAVEDCWD, a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
if use_mp <= 0:
try:
import multiprocessing
# Use all cores + extras for tests that like to sleep
use_mp = 2 + multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
use_mp = 3
if use_mp == 1:
use_mp = None
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
elif o == '--testdir':
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
testdir = os.path.join(support.SAVEDCWD, a)
elif o == '--timeout':
if hasattr(faulthandler, 'dump_tracebacks_later'):
timeout = float(a)
if timeout <= 0:
timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_tracebacks_later")
timeout = None
elif o == '--wait':
input("Press any key to continue...")
else:
print(("No handler for option {}. Please report this as a bug "
"at http://bugs.python.org.").format(o), file=sys.stderr)
sys.exit(1)
if single and fromfile:
usage("-s and -f don't go together!")
if use_mp and trace:
usage("-T and -j don't go together!")
if use_mp and findleaks:
usage("-l and -j don't go together!")
if use_mp and support.max_memuse:
usage("-M and -j don't go together!")
if failfast and not (verbose or verbose3):
usage("-G/--failfast needs either -v or -W")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(support.SAVEDCWD, fromfile))
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single != 1 or tests or args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if testdir:
alltests = findtests(testdir, list(), set())
else:
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
first_selected = selected[0]
index_selected = alltests.index(first_selected)
if index_selected + single > len(alltests):
single = len(alltests) - index_selected
selected = alltests[index_selected:index_selected+single]
try:
next_single_test = alltests[index_selected+single]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if start:
try:
del selected[:selected.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.seed(random_seed)
print("Using random seed", random_seed)
random.shuffle(selected)
if trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
pending = MultiprocessTests(tests)
opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
debug=debug, output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
)
# -E is needed by some tests, e.g. test_import
# Running the child from the same working directory ensures
# that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet, timeout=timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks, debug,
output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
bad = sorted(set(bad) - set(environment_changed))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
verbose = True
ok = runtest(test, True, quiet, huntrleaks, debug, timeout=timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, debug=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_tracebacks_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
debug, display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks, debug,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_tracebacks_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions', 'threading._dangling',
'multiprocessing.process._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'support.TESTFN',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_support_TESTFN(self):
if os.path.isfile(support.TESTFN):
result = 'f'
elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
elif os.path.isdir(support.TESTFN):
shutil.rmtree(support.TESTFN)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, debug=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
tests = unittest.TestLoader().loadTestsFromModule(the_module)
test_runner = lambda: support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner,
huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = (
('win32',
"""
test__locale
test_crypt
test_curses
test_dbm
test_devpoll
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_dbm_ndbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
"""),
('linux',
"""
test_curses
test_devpoll
test_largefile
test_kqueue
test_ossaudiodev
"""),
('unixware',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('openunix',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('sco_sv',
"""
test_asynchat
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
"""),
('darwin',
"""
test__locale
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_gdb
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
"""),
('sunos',
"""
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
"""),
('hp-ux',
"""
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
"""),
('cygwin',
"""
test_curses
test_dbm
test_devpoll
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
"""),
('os2emx',
"""
test_audioop
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
"""),
('freebsd',
"""
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
"""),
('aix',
"""
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
"""),
('openbsd',
"""
test_ctypes
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
('netbsd',
"""
test_ctypes
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
)
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
expected = None
for item in _expectations:
if sys.platform.startswith(item[0]):
expected = item[1]
break
if expected is not None:
self.expected = set(expected.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = {"test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"}
self.expected |= WIN_ONLY
if sys.platform != 'sunos5':
self.expected.add('test_nis')
if support.python_is_optimized():
self.expected.add("test_gdb")
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
def _make_temp_dir_for_build(TEMPDIR):
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
return TEMPDIR, TESTCWD
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
TEMPDIR, TESTCWD = _make_temp_dir_for_build(TEMPDIR)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(TESTCWD, quiet=True):
main()
| gpl-3.0 |
AnishShah/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py | 25 | 3055 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.contrib.distributions.python.ops.bijectors.inline import Inline
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class InlineBijectorTest(test.TestCase):
"""Tests correctness of the inline constructed bijector."""
def testBijector(self):
with self.cached_session():
exp = Exp()
inline = Inline(
forward_fn=math_ops.exp,
inverse_fn=math_ops.log,
inverse_log_det_jacobian_fn=lambda y: -math_ops.log(y),
forward_log_det_jacobian_fn=lambda x: x,
forward_min_event_ndims=0,
name="exp")
self.assertEqual(exp.name, inline.name)
x = [[[1., 2.], [3., 4.], [5., 6.]]]
y = np.exp(x)
self.assertAllClose(y, inline.forward(x).eval())
self.assertAllClose(x, inline.inverse(y).eval())
self.assertAllClose(
-np.sum(np.log(y), axis=-1),
inline.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-inline.inverse_log_det_jacobian(y, event_ndims=1).eval(),
inline.forward_log_det_jacobian(x, event_ndims=1).eval())
def testShapeGetters(self):
with self.cached_session():
bijector = Inline(
forward_event_shape_tensor_fn=lambda x: array_ops.concat((x, [1]), 0),
forward_event_shape_fn=lambda x: x.as_list() + [1],
inverse_event_shape_tensor_fn=lambda x: x[:-1],
inverse_event_shape_fn=lambda x: x[:-1],
forward_min_event_ndims=0,
name="shape_only")
x = tensor_shape.TensorShape([1, 2, 3])
y = tensor_shape.TensorShape([1, 2, 3, 1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
100325128/VulnerabilitiesClassifier | classifier/OSTypes.py | 1 | 5892 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
GENERIC_WINDOWS="Windows"
GENERIC_MAC_OS_X="Mac OS X"
GENERIC_SOLARIS="Solaris"
GENERIC_SUN_OS="sun os"
GENERIC_IPHONE_OS="iphone OS"
GENERIC_ANDROID="Android"
GENERIC_WINDOWS_PHONE="Windows Phone"
GENERIC_BLACKBERRY_OS="BlackBerry OS"
GENERIC_SYMBIAN_OS="Symbian OS"
MS_DOS="MS-DOS"
WINDOWS_NT="Windows NT"
WINDOWS_95="Windows 95"
WINDOWS_98="Windows 98"
WINDOWS_2000="Windows 2000"
WINDOWS_XP="Windows XP"
WINDOWS_VISTA="Windows Vista"
WINDOWS_7="Windows 7"
WINDOWS_8="Windows 8"
WINDOWS_10="Windows 10"
MAC_OS_X_10_0="Mac OS X 10.0"
MAC_OS_X_10_1="Mac OS X 10.1"
MAC_OS_X_10_2="Mac OS X 10.2"
MAC_OS_X_10_3="Mac OS X 10.3"
MAC_OS_X_10_4="Mac OS X 10.4"
MAC_OS_X_10_5="Mac OS X 10.5"
MAC_OS_X_10_6="Mac OS X 10.6"
MAC_OS_X_10_7="Mac OS X 10.7"
MAC_OS_X_10_8="Mac OS X 10.8"
MAC_OS_X_10_9="Mac OS X 10.9"
MAC_OS_X_10_10="Mac OS X 10.10"
MAC_OS_X_10_11="Mac OS X 10.11"
SOLARIS_2_0="Solaris 2.0"
SOLARIS_2_1="Solaris 2.1"
SOLARIS_2_2="Solaris 2.2"
SOLARIS_2_3="Solaris 2.3"
SOLARIS_2_4="Solaris 2.4"
SOLARIS_2_5="Solaris 2.5"
SOLARIS_2_5_1="Solaris 2.5.1"
SOLARIS_2_6="Solaris 2.6"
SOLARIS_7="Solaris 7"
SOLARIS_8="Solaris 8"
SOLARIS_9="Solaris 9"
SOLARIS_10="Solaris 10"
SOLARIS_11="Solaris 11"
LINUX="Linux"
#mobile
IPHONE_OS_1_0="iphone OS 1.0"
IPHONE_OS_1_1="iphone OS 1.1"
IPHONE_OS_1_1_1="iphone OS 1.1.1"
IPHONE_OS_2_0="iphone OS 2.0"
IPHONE_OS_2_1="iphone OS 2.1"
IPHONE_OS_2_2="iphone OS 2.2"
IPHONE_OS_3_0="iphone OS 3.0"
IPHONE_OS_3_1="iphone OS 3.1"
IPHONE_OS_3_2="iphone OS 3.2"
IPHONE_OS_4="iphone OS 4"
IPHONE_OS_4_1="iphone OS 4.1"
IPHONE_OS_4_2="iphone OS 4.2"
IPHONE_OS_4_2_10="iphone OS 4.2.10"
IPHONE_OS_4_3="iphone OS 4.3"
IPHONE_OS_5_0="iphone OS 5.0"
IPHONE_OS_5_1="iphone OS 5.1"
IPHONE_OS_6="iphone OS 6"
IPHONE_OS_7="iphone OS 7"
IPHONE_OS_8="iphone OS 8"
IPHONE_OS_9="iphone OS 9"
ANDROID_1_0="Android 1.0"
ANDROID_1_1="Android 1.1"
ANDROID_1_5="Android 1.5"
ANDROID_1_6="Android 1.6"
ANDROID_2_0="Android 2.0"
ANDROID_2_1="Android 2.1"
ANDROID_2_2="Android 2.2"
ANDROID_2_3="Android 2.3"
ANDROID_3="Android 3"
ANDROID_4_0="Android 4.0"
ANDROID_4_1="Android 4.1"
ANDROID_4_2="Android 4.2"
ANDROID_4_3="Android 4.3"
ANDROID_4_4="Android 4.4"
ANDROID_5_0="Android 5.0"
ANDROID_6_0="Android 6.0"
WINDOWS_PHONE_7="Windows Phone 7"
WINDOWS_PHONE_8="Windows Phone 8"
WINDOWS_PHONE_8_1="Windows Phone 8.1"
BLACKBERRY_OS_1_0="BlackBerry OS 1.0"
BLACKBERRY_OS_3_6="BlackBerry OS 3.6"
BLACKBERRY_OS_4_5="BlackBerry OS 4.5"
BLACKBERRY_OS_4_6="BlackBerry OS 4.6"
BLACKBERRY_OS_5_0="BlackBerry OS 5.0"
BLACKBERRY_OS_6_0="BlackBerry OS 6.0"
BLACKBERRY_OS_7_0="BlackBerry OS 7.0"
BLACKBERRY_OS_7_1="BlackBerry OS 7.1"
BLACKBERRY_OS_10_0="BlackBerry OS 10.0"
BLACKBERRY_OS_10_1="BlackBerry OS 10.1"
BLACKBERRY_OS_10_2="BlackBerry OS 10.2"
BLACKBERRY_OS_10_3="BlackBerry OS 10.3"
SYMBIAN_OS_6_0="Symbian OS 6.0"
SYMBIAN_OS_6_1="Symbian OS 6.1"
SYMBIAN_OS_7_0="Symbian OS 7.0"
SYMBIAN_OS_8_0="Symbian OS 8.0"
SYMBIAN_OS_8_1="Symbian OS 8.1"
SYMBIAN_OS_9_1="Symbian OS 9.1"
SYMBIAN_OS_9_2="Symbian OS 9.2"
SYMBIAN_OS_9_3="Symbian OS 9.3"
SYMBIAN_OS_9_4="Symbian OS 9.4"
SYMBIAN_OS_9_5="Symbian OS 9.5"
SYMBIAN_OS_10_1="Symbian OS 10.1"
#equivalence
LIST_SUNOS={"sunos 5.0":SOLARIS_2_0, "sunos 5.1":SOLARIS_2_1, "sunos 5.2":SOLARIS_2_2, "sunos 5.3":SOLARIS_2_3, "sunos 5.4":SOLARIS_2_4,
"sunos 5.5":SOLARIS_2_5, "sunos 5.5.1":SOLARIS_2_5_1, "sunos 5.6":SOLARIS_2_6, "sunos 5.7":SOLARIS_7, "sunos 5.8":SOLARIS_8, "sunos 5.9":SOLARIS_9 ,
"sunos 5.10":SOLARIS_10, "sunos 5.11":SOLARIS_11}
OS_DESKTOP_INDEXES = {MS_DOS:6, WINDOWS_NT:7, WINDOWS_95:8, WINDOWS_98:9, WINDOWS_2000:10,
WINDOWS_XP:11, WINDOWS_VISTA:12, WINDOWS_7:13, WINDOWS_8:14,WINDOWS_10:15,
MAC_OS_X_10_0:16, MAC_OS_X_10_1:17, MAC_OS_X_10_2:18, MAC_OS_X_10_3:19, MAC_OS_X_10_4:20,
MAC_OS_X_10_5:21, MAC_OS_X_10_6:22, MAC_OS_X_10_7:23, MAC_OS_X_10_8:24, MAC_OS_X_10_9:25,
MAC_OS_X_10_10:26, MAC_OS_X_10_11:27, SOLARIS_2_0:28, SOLARIS_2_1:29, SOLARIS_2_2:30,
SOLARIS_2_3:31, SOLARIS_2_4:32, SOLARIS_2_5:33, SOLARIS_2_5_1:34, SOLARIS_2_6:35,
SOLARIS_7:36, SOLARIS_8:37, SOLARIS_9:38, SOLARIS_10:39, SOLARIS_11:40,
LINUX:41}
OS_MOBILE_INDEXES = {IPHONE_OS_1_0:6, IPHONE_OS_1_1:7, IPHONE_OS_1_1_1:8, IPHONE_OS_2_0:9, IPHONE_OS_2_1:10,
IPHONE_OS_2_2:11, IPHONE_OS_3_0:12, IPHONE_OS_3_1:13, IPHONE_OS_3_2:14, IPHONE_OS_4:15,
IPHONE_OS_4_1:16, IPHONE_OS_4_2:17, IPHONE_OS_4_2_10:18, IPHONE_OS_4_3:19, IPHONE_OS_5_0:20,
IPHONE_OS_5_1:21, IPHONE_OS_6:22, IPHONE_OS_7:23, IPHONE_OS_8:24, IPHONE_OS_9:25,
ANDROID_1_0:26, ANDROID_1_1:27, ANDROID_1_5:28, ANDROID_1_6:29, ANDROID_2_0:30, ANDROID_2_1:30,
ANDROID_2_2:31, ANDROID_2_3:32, ANDROID_3:33, ANDROID_4_0:34, ANDROID_4_1:35,
ANDROID_4_2:36, ANDROID_4_3:37, ANDROID_4_4:38, ANDROID_5_0:39, ANDROID_6_0:40,
WINDOWS_PHONE_7:41, WINDOWS_PHONE_8:42, WINDOWS_PHONE_8_1:43, BLACKBERRY_OS_1_0:44, BLACKBERRY_OS_3_6:45,
BLACKBERRY_OS_4_5:46, BLACKBERRY_OS_4_6:47, BLACKBERRY_OS_5_0:48, BLACKBERRY_OS_6_0:49, BLACKBERRY_OS_7_0:50,
BLACKBERRY_OS_7_1:51, BLACKBERRY_OS_10_0:52, BLACKBERRY_OS_10_1:53, BLACKBERRY_OS_10_2:54, BLACKBERRY_OS_10_3:55,
SYMBIAN_OS_6_0:56, SYMBIAN_OS_6_1:57, SYMBIAN_OS_7_0:58, SYMBIAN_OS_8_0:59, SYMBIAN_OS_8_1:60,
SYMBIAN_OS_9_1:61, SYMBIAN_OS_9_2:62, SYMBIAN_OS_9_3:63, SYMBIAN_OS_9_4:64, SYMBIAN_OS_9_5:65,
SYMBIAN_OS_10_1:66}
| gpl-3.0 |
librae8226/linux-2.6.30.4 | scripts/rt-tester/rt-tester.py | 904 | 5366 | #!/usr/bin/env python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
ahmetcemturan/SFACT | fabmetheus_utilities/geometry/geometry_utilities/boolean_geometry.py | 8 | 8191 | """
This page is in the table of contents.
The xml.py script is an import translator plugin to get a carving from an Art of Illusion xml file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an xml file and returns the carving.
An xml file can be exported from Art of Illusion by going to the "File" menu, then going into the "Export" menu item, then picking the XML choice. This will bring up the XML file chooser window, choose a place to save the file then click "OK". Leave the "compressFile" checkbox unchecked. All the objects from the scene will be exported, this plugin will ignore the light and camera. If you want to fabricate more than one object at a time, you can have multiple objects in the Art of Illusion scene and they will all be carved, then fabricated together.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities.evaluate_elements import setting
from fabmetheus_utilities.geometry.geometry_utilities import boolean_solid
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import settings
from fabmetheus_utilities import xml_simple_writer
import math
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getEmptyZLoops(archivableObjects, importRadius, shouldPrintWarning, z, zoneArrangement):
'Get loops at empty z level.'
emptyZ = zoneArrangement.getEmptyZ(z)
visibleObjects = evaluate.getVisibleObjects(archivableObjects)
visibleObjectLoopsList = boolean_solid.getVisibleObjectLoopsList(importRadius, visibleObjects, emptyZ)
loops = euclidean.getConcatenatedList(visibleObjectLoopsList)
if euclidean.isLoopListIntersecting(loops):
loops = boolean_solid.getLoopsUnion(importRadius, visibleObjectLoopsList)
if shouldPrintWarning:
print('Warning, the triangle mesh slice intersects itself in getExtruderPaths in boolean_geometry.')
print('Something will still be printed, but there is no guarantee that it will be the correct shape.')
print('Once the gcode is saved, you should check over the layer with a z of:')
print(z)
return loops
def getLoopLayers(archivableObjects, importRadius, layerHeight, maximumZ, shouldPrintWarning, z, zoneArrangement):
'Get loop layers.'
loopLayers = []
while z <= maximumZ:
triangle_mesh.getLoopLayerAppend(loopLayers, z).loops = getEmptyZLoops(archivableObjects, importRadius, True, z, zoneArrangement)
z += layerHeight
return loopLayers
def getMinimumZ(geometryObject):
'Get the minimum of the minimum z of the archivableObjects and the object.'
booleanGeometry = BooleanGeometry()
booleanGeometry.archivableObjects = geometryObject.archivableObjects
booleanGeometry.importRadius = setting.getImportRadius(geometryObject.elementNode)
booleanGeometry.layerHeight = setting.getLayerHeight(geometryObject.elementNode)
archivableMinimumZ = booleanGeometry.getMinimumZ()
geometryMinimumZ = geometryObject.getMinimumZ()
if archivableMinimumZ == None:
return geometryMinimumZ
if geometryMinimumZ == None:
return archivableMinimumZ
return min(archivableMinimumZ, geometryMinimumZ)
class BooleanGeometry:
'A boolean geometry scene.'
def __init__(self):
'Add empty lists.'
self.archivableObjects = []
self.belowLoops = []
self.importRadius = 0.6
self.layerHeight = 0.4
self.loopLayers = []
def __repr__(self):
'Get the string representation of this carving.'
elementNode = None
if len(self.archivableObjects) > 0:
elementNode = self.archivableObjects[0].elementNode
output = xml_simple_writer.getBeginGeometryXMLOutput(elementNode)
self.addXML( 1, output )
return xml_simple_writer.getEndGeometryXMLString(output)
def addXML(self, depth, output):
'Add xml for this object.'
xml_simple_writer.addXMLFromObjects( depth, self.archivableObjects, output )
def getCarveBoundaryLayers(self):
'Get the boundary layers.'
if self.getMinimumZ() == None:
return []
z = self.minimumZ + 0.5 * self.layerHeight
self.loopLayers = getLoopLayers(self.archivableObjects, self.importRadius, self.layerHeight, self.maximumZ, True, z, self.zoneArrangement)
self.cornerMaximum = Vector3(-912345678.0, -912345678.0, -912345678.0)
self.cornerMinimum = Vector3(912345678.0, 912345678.0, 912345678.0)
for loopLayer in self.loopLayers:
for loop in loopLayer.loops:
for point in loop:
pointVector3 = Vector3(point.real, point.imag, loopLayer.z)
self.cornerMaximum.maximize(pointVector3)
self.cornerMinimum.minimize(pointVector3)
self.cornerMaximum.z += self.halfHeight
self.cornerMinimum.z -= self.halfHeight
for loopLayerIndex in xrange(len(self.loopLayers) -1, -1, -1):
loopLayer = self.loopLayers[loopLayerIndex]
if len(loopLayer.loops) > 0:
return self.loopLayers[: loopLayerIndex + 1]
return []
def getCarveCornerMaximum(self):
'Get the corner maximum of the vertexes.'
return self.cornerMaximum
def getCarveCornerMinimum(self):
'Get the corner minimum of the vertexes.'
return self.cornerMinimum
def getCarveLayerHeight(self):
'Get the layer height.'
return self.layerHeight
def getFabmetheusXML(self):
'Return the fabmetheus XML.'
if len(self.archivableObjects) > 0:
return self.archivableObjects[0].elementNode.getOwnerDocument().getOriginalRoot()
return None
def getInterpretationSuffix(self):
'Return the suffix for a boolean carving.'
return 'xml'
def getMatrix4X4(self):
'Get the matrix4X4.'
return None
def getMatrixChainTetragrid(self):
'Get the matrix chain tetragrid.'
return None
def getMinimumZ(self):
'Get the minimum z.'
vertexes = []
for visibleObject in evaluate.getVisibleObjects(self.archivableObjects):
vertexes += visibleObject.getTransformedVertexes()
if len(vertexes) < 1:
return None
self.maximumZ = -912345678.0
self.minimumZ = 912345678.0
for vertex in vertexes:
self.maximumZ = max(self.maximumZ, vertex.z)
self.minimumZ = min(self.minimumZ, vertex.z)
self.zoneArrangement = triangle_mesh.ZoneArrangement(self.layerHeight, vertexes)
self.halfHeight = 0.5 * self.layerHeight
self.setActualMinimumZ()
return self.minimumZ
def getNumberOfEmptyZLoops(self, z):
'Get number of empty z loops.'
return len(getEmptyZLoops(self.archivableObjects, self.importRadius, False, z, self.zoneArrangement))
def setActualMinimumZ(self):
'Get the actual minimum z at the lowest rotated boundary layer.'
halfHeightOverMyriad = 0.0001 * self.halfHeight
while self.minimumZ < self.maximumZ:
if self.getNumberOfEmptyZLoops(self.minimumZ + halfHeightOverMyriad) > 0:
if self.getNumberOfEmptyZLoops(self.minimumZ - halfHeightOverMyriad) < 1:
return
increment = -self.halfHeight
while abs(increment) > halfHeightOverMyriad:
self.minimumZ += increment
increment = 0.5 * abs(increment)
if self.getNumberOfEmptyZLoops(self.minimumZ) > 0:
increment = -increment
self.minimumZ = round(self.minimumZ, -int(round(math.log10(halfHeightOverMyriad) + 1.5)))
return
self.minimumZ += self.layerHeight
def setCarveImportRadius( self, importRadius ):
'Set the import radius.'
self.importRadius = importRadius
def setCarveIsCorrectMesh( self, isCorrectMesh ):
'Set the is correct mesh flag.'
self.isCorrectMesh = isCorrectMesh
def setCarveLayerHeight( self, layerHeight ):
'Set the layer height.'
self.layerHeight = layerHeight
| agpl-3.0 |
varunarya10/boto | boto/contrib/ymlmessage.py | 70 | 1880 | # Copyright (c) 2006,2007 Chris Moyer
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module was contributed by Chris Moyer. It provides a subclass of the
SQS Message class that supports YAML as the body of the message.
This module requires the yaml module.
"""
from boto.sqs.message import Message
import yaml
class YAMLMessage(Message):
"""
The YAMLMessage class provides a YAML compatible message. Encoding and
decoding are handled automaticaly.
Access this message data like such:
m.data = [ 1, 2, 3]
m.data[0] # Returns 1
This depends on the PyYAML package
"""
def __init__(self, queue=None, body='', xml_attrs=None):
self.data = None
super(YAMLMessage, self).__init__(queue, body)
def set_body(self, body):
self.data = yaml.load(body)
def get_body(self):
return yaml.dump(self.data)
| mit |
pantonov/serna-free | serna/dist/plugins/dita/publishing/DitaPublisher.py | 5 | 11181 | ##
## Copyright(c) 2009 Syntext, Inc. All Rights Reserved.
## Contact: info@syntext.com, http://www.syntext.com
##
## This file is part of Syntext Serna XML Editor.
##
## COMMERCIAL USAGE
## Licensees holding valid Syntext Serna commercial licenses may use this file
## in accordance with the Syntext Serna Commercial License Agreement provided
## with the software, or, alternatively, in accorance with the terms contained
## in a written agreement between you and Syntext, Inc.
##
## GNU GENERAL PUBLIC LICENSE USAGE
## Alternatively, this file may be used under the terms of the GNU General
## Public License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the packaging
## of this file. In addition, as a special exception, Syntext, Inc. gives you
## certain additional rights, which are described in the Syntext, Inc. GPL
## Exception for Syntext Serna Free Edition, included in the file
## GPL_EXCEPTION.txt in this package.
##
## You should have received a copy of appropriate licenses along with this
## package. If not, see <http://www.syntext.com/legal/>. If you are unsure
## which license is appropriate for your use, please contact the sales
## department at sales@syntext.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
from Publisher import *
from Transformer import *
from PublisherUtils import *
from utils import *
from SimplePublisher import SimplePublisherCreator
from dialogs.DitaOtOptionsDialog import get_dita_options
from publishing import PublishingPlugin
try:
from SernaApi import *
except:
from XSernaApi.SernaConfig import SernaConfig
pass
import tempfile
__all__ = ['register_creators']
def get_win32_free_drive():
drivelist = [ '%s:' % chr(x) for x in range(ord('d'), ord('z') + 1)]
drivelist.reverse()
for drive in drivelist:
if not os.path.exists(drive + r'\.'):
return drive
return None
from subprocess import call
class DitaTempDir(Value):
def __init__(self):
Value.__init__(self)
if 'win32' == sys.platform:
tempDir = tempfile.mkdtemp()
tempDrive = get_win32_free_drive()
if tempDrive:
tempDirParent, tempDirBase = os.path.split(tempDir)
call('subst %s "%s"' % (tempDrive, tempDirParent),
creationflags=0x08000000)
self._tempDir = '%s\\%s' % (tempDrive, tempDirBase)
if not os.path.exists(self._tempDir):
self._tempDir = tempDir
self._parentDir = tempDirParent
self._tempDrive = tempDrive
else:
self._parentDir = os.path.dirname(tempDir)
self._absTempDir = tempDir
else:
self._absTempDir = self._tempDir = tempfile.mkdtemp()
self._parentDir = os.path.dirname(self._absTempDir)
def get(self):
return self._tempDir
def parent(self):
return self._parentDir
def __del__(self):
self.cleanup()
pass
def cleanup(self):
from shutil import rmtree
if self._tempDir and os.path.exists(self._tempDir) and \
self._tempDir != os.path.dirname(self._tempDir):
rmtree(self._tempDir, True)
if 'win32' == sys.platform and self._tempDrive and \
os.path.exists(self._tempDrive + r'\.'):
call('subst /d %s' % self._tempDrive, creationflags=0x08000000)
class DitaOtTransformer(Transformer):
def __init__(self, **kwargs):
Transformer.__init__(self, **kwargs)
if self._ditaOtDir is None:
dotDir = SernaConfig.getProperty("vars/dita_ot_dir")
self._ditaOtDir = unicode(dotDir)
def _prepare(self, srcUri, dstUri):
self._tempDir = DitaTempDir()
ditaJars = "dost;resolver;fop;avalon-framework-cvs-20020806;batik;" \
"xalan;xercesImpl;xml-apis;icu4j".split(';')
ditaLibDir = os.path.join(self._ditaOtDir, 'lib')
classPath = [ os.path.join(ditaLibDir, '%s.jar' % x) for x in ditaJars ]
classPath.append(ditaLibDir)
self._env = { 'CLASSPATH': os.pathsep.join(classPath) }
if 'win32' == sys.platform:
ditaOtDirUrl = "file:///" + self._ditaOtDir
else:
ditaOtDirUrl = "file://" + self._ditaOtDir
xmlCatalog = ditaOtDirUrl.replace('\\', '/') + "/catalog-dita.xml"
self._catMgrProps = os.path.join(self._tempDir.get(),
"CatalogManager.properties")
open(self._catMgrProps, "w+").write("""
catalogs=%s
relative-catalogs=no
prefer=public
static-catalog=yes
""".strip() % xmlCatalog)
antArgsDict = {
'args.target': 'init',
'args.input': self.srcUri,
'output.dir': os.path.dirname(self.dstUri),
'dita.temp.dir': self._tempDir.get(),
'args.logdir': self._tempDir.get()
}
if self._antArgsDict:
for k, v in self._antArgsDict.iteritems():
if v.startswith('$'):
name = v[1:]
if name in self:
self._antArgsDict[k] = self[name]
antArgsDict.update(self._antArgsDict)
self._antArgsDict = antArgsDict
def _transform(self, srcUri, dstUri):
self._prepare(srcUri, dstUri)
args = []
for name, value in self._antArgsDict.iteritems():
args.append('-D%s=%s' % (name, value))
if self._antOptions:
opts = self._antOptions.split()
if opts:
args.extend(opts)
if self._args:
self._args.extend(args)
else:
self._args = args
self._args.extend(['-f', self._antMakefile])
self._args.append(self._antTarget)
self._scriptRunner = ScriptRunner(self)
antScript = find_script(unicode(self._antScript))
wd = os.path.dirname(self.srcUri)
self._scriptRunner.run(antScript, self._args, wd=wd, env=self._env)
return True
def _scriptFinished(self, exitCode, exitStatus):
if self._tempDir:
self._tempDir.cleanup()
return False
class DitaPublisher(Publisher):
def __init__(self, **kwargs):
Publisher.__init__(self, **kwargs)
def hasAdvancedOptions(self):
return True
def fillAdvancedOptions(self, widget):
accepted, dlg = get_dita_options(self, widget)
if accepted:
for opt in ('_antScript', '_antMakefile', '_antOptions'):
prop = dlg.getProperty(opt[1:])
if prop:
self[opt] = prop
class HtmlDitaOtPublisher(DitaPublisher):
def __init__(self, **kwargs):
Publisher.__init__(self, **kwargs)
self.extension = 'html'
def _fillAdvancedOptions(self, dlg):
dlg.setLabel('antMakefile', 'DITA to XHTML Ant makefile')
title = 'Publishing settings: DITA to XHTML with DITA Open Toolkit'
dlg.setWindowTitle(title)
def _publish(self, dsi, dstUri):
base, ext = os.path.splitext(os.path.basename(self.srcUri))
args = ['-Dargs.transtype=xhtml', '-Dargs.xhtml.toc=%s' % base]
kwargs = {
'_ditaOtDir': self._ditaOtDir,
'_antTarget': 'dita2xhtml',
'_args': args,
'exts': [ext, '.html'],
'caller': self
}
for name, value in self.attrs().iteritems():
if name.startswith('_ant'):
kwargs[name] = value
self._dotHtmlTrans = DitaOtTransformer(**kwargs)
self._dotHtmlTrans.transform(self.srcUri, dstUri)
class FoDitaOtPublisher(DitaPublisher):
def __init__(self, **kwargs):
Publisher.__init__(self, **kwargs)
self._fromFoTrans = self.transFactory.make(self.fromFoTrans)
self.script = self._fromFoTrans.script
self.extension = self._fromFoTrans.exts[1]
def _fillAdvancedOptions(self, dlg):
dlg.setLabel('antMakefile', 'DITA to XSL-FO Ant makefile')
title = 'Publishing settings: DITA to PDF with DITA Open Toolkit and %s' % self._fromFoTrans.name
dlg.setWindowTitle(title)
def _publish(self, dsi, dstUri):
base, ext = os.path.splitext(os.path.basename(self.srcUri))
kwargs = {
'_ditaOtDir': self._ditaOtDir,
'_antTarget': 'dita2fo',
'_defineOutput': 0, '_defineInput': 0,
'exts': [ext, '.fo'],
'caller': self
}
for name, value in self.attrs().iteritems():
if name.startswith('_ant'):
kwargs[name] = value
toFoTrans = DitaOtTransformer(**kwargs)
toFoTrans._antArgsDict = { 'output.fo': '$dstUri' }
self._trans = ChainedTransformer([toFoTrans, self._fromFoTrans],
name='Dita2%s' % self.fromFoTrans,
caller=self)
self._trans.transform(self.srcUri, self.dstUri)
def _cancel(self, kill):
if self._trans:
self._trans.cancel(kill)
class DitaOTPublisherCreator(SimplePublisherCreator):
def __init__(self, **kwargs):
SimplePublisherCreator.__init__(self, 'Dita', **kwargs)
params = Value()
self._ditaOtDir = unicode(SernaConfig.getProperty("vars/dita_ot_dir"))
params._ditaOtDir = self._ditaOtDir
params._antHome = os.path.join(params._ditaOtDir, 'tools', 'ant')
params._antScript = os.path.join(params._antHome, 'bin', 'ant')
for k in params.attrs().keys():
params[k] = os.path.normpath(params[k])
self._ditaParams = params
def _make(self, **kwargs):
tag = kwargs.get('tag', None)
if tag:
kwargs.update(self._ditaParams.attrs())
if tag in self._foTags:
antMakefile = os.path.join(self._ditaOtDir, "build_with_fo.xml")
return FoDitaOtPublisher(fromFoTrans=self._foTags[tag],
transFactory=self._transFactory,
_antMakefile=antMakefile,
**kwargs)
elif tag.startswith('HTML'):
antMakefile = os.path.join(self._ditaOtDir, "build.xml")
return HtmlDitaOtPublisher(_antMakefile=antMakefile, **kwargs)
return None
def _getTags(self, dsi):
category = unicode(dsi.getProperty("category").getString())
if not 'DITA' in category:
return []
tags = ['HTML']
foTransformers = self._foTags.keys()
tags.extend(foTransformers)
return tags
def register_creators(pluginDir):
registry = PublishingPlugin.PublishingPlugin.getPublishersRegistry()
if registry:
pc = DitaOTPublisherCreator(pluginDir=pluginDir)
registry.registerPublisherCreator(pc)
| gpl-3.0 |
uclouvain/osis | education_group/views/mini_training/general_information_read.py | 1 | 4564 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import functools
from django.shortcuts import redirect
from django.urls import reverse
from base.business.education_groups import general_information_sections
from base.models.enums.publication_contact_type import PublicationContactType
from education_group.views import serializers
from education_group.views.mini_training.common_read import MiniTrainingRead, Tab
class MiniTrainingReadGeneralInformation(MiniTrainingRead):
template_name = "education_group_app/mini_training/general_informations_read.html"
active_tab = Tab.GENERAL_INFO
def get(self, request, *args, **kwargs):
if not self.have_general_information_tab():
return redirect(reverse('mini_training_identification', kwargs=self.kwargs))
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return {
**super().get_context_data(**kwargs),
"sections": self.get_sections(),
"update_label_url": self.get_update_label_url(),
"publish_url": reverse('publish_general_information', args=[
self.node_identity.year,
self.node_identity.code
]) +
"?path={}".format(self.get_path()),
"can_edit_information":
self.request.user.has_perm(
"base.change_minitraining_pedagogyinformation",
self.get_education_group_version().offer
),
"show_contacts": self.can_have_contacts(),
"entity_contact": self.get_entity_contact(),
"academic_responsibles": self.get_academic_responsibles(),
"other_academic_responsibles": self.get_other_academic_responsibles(),
"jury_members": self.get_jury_members(),
"other_contacts": self.get_other_contacts()
}
def get_update_label_url(self):
return reverse(
'mini_training_general_information_update',
args=[self.get_mini_training().year, self.get_mini_training().code]
) + "?path={}".format(self.get_path())
def get_sections(self):
return serializers.general_information.get_sections(self.get_group(), self.request.LANGUAGE_CODE)
def can_have_contacts(self):
return general_information_sections.CONTACTS in \
general_information_sections.SECTIONS_PER_OFFER_TYPE[self.get_group().type.name]['specific']
def get_entity_contact(self):
return getattr(
self.get_education_group_version().offer.publication_contact_entity_version,
'verbose_title',
None
)
def get_academic_responsibles(self):
return self._get_contacts().get(PublicationContactType.ACADEMIC_RESPONSIBLE.name) or []
def get_other_academic_responsibles(self):
return self._get_contacts().get(PublicationContactType.OTHER_ACADEMIC_RESPONSIBLE.name) or []
def get_jury_members(self):
return self._get_contacts().get(PublicationContactType.JURY_MEMBER.name) or []
def get_other_contacts(self):
return self._get_contacts().get(PublicationContactType.OTHER_CONTACT.name) or []
@functools.lru_cache()
def _get_contacts(self):
return serializers.general_information.get_contacts(self.get_group())
| agpl-3.0 |
shsingh/ansible | lib/ansible/modules/net_tools/ip_netns.py | 45 | 4040 | #!/usr/bin/python
# (c) 2017, Arie Bregman <abregman@redhat.com>
#
# This file is a module for Ansible that interacts with Network Manager
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ip_netns
version_added: 2.5
author: "Arie Bregman (@bregman-arie)"
short_description: Manage network namespaces
requirements: [ ip ]
description:
- Create or delete network namespaces using the ip command.
options:
name:
required: false
description:
- Name of the namespace
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the namespace should exist
'''
EXAMPLES = '''
# Create a namespace named mario
- name: Create a namespace named mario
namespace:
name: mario
state: present
- name: Delete a namespace named luigi
namespace:
name: luigi
state: absent
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
class Namespace(object):
"""Interface to network namespaces. """
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.state = module.params['state']
def _netns(self, command):
'''Run ip nents command'''
return self.module.run_command(['ip', 'netns'] + command)
def exists(self):
'''Check if the namespace already exists'''
rc, out, err = self.module.run_command('ip netns list')
if rc != 0:
self.module.fail_json(msg=to_text(err))
return self.name in out
def add(self):
'''Create network namespace'''
rtc, out, err = self._netns(['add', self.name])
if rtc != 0:
self.module.fail_json(msg=err)
def delete(self):
'''Delete network namespace'''
rtc, out, err = self._netns(['del', self.name])
if rtc != 0:
self.module.fail_json(msg=err)
def check(self):
'''Run check mode'''
changed = False
if self.state == 'present' and self.exists():
changed = True
elif self.state == 'absent' and self.exists():
changed = True
elif self.state == 'present' and not self.exists():
changed = True
self.module.exit_json(changed=changed)
def run(self):
'''Make the necessary changes'''
changed = False
if self.state == 'absent':
if self.exists():
self.delete()
changed = True
elif self.state == 'present':
if not self.exists():
self.add()
changed = True
self.module.exit_json(changed=changed)
def main():
"""Entry point."""
module = AnsibleModule(
argument_spec={
'name': {'default': None},
'state': {'default': 'present', 'choices': ['present', 'absent']},
},
supports_check_mode=True,
)
network_namespace = Namespace(module)
if module.check_mode:
network_namespace.check()
else:
network_namespace.run()
if __name__ == '__main__':
main()
| gpl-3.0 |
ypwalter/evennia | evennia/web/views.py | 7 | 2908 |
"""
This file contains the generic, assorted views that don't fall under one of
the other applications. Views are django's way of processing e.g. html
templates on the fly.
"""
from django.contrib.admin.sites import site
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from evennia import SESSION_HANDLER
from evennia.objects.models import ObjectDB
from evennia.players.models import PlayerDB
_BASE_CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
def page_index(request):
"""
Main root page.
"""
# Some misc. configurable stuff.
# TODO: Move this to either SQL or settings.py based configuration.
fpage_player_limit = 4
# A QuerySet of the most recently connected players.
recent_users = PlayerDB.objects.get_recently_connected_players()[:fpage_player_limit]
nplyrs_conn_recent = len(recent_users) or "none"
nplyrs = PlayerDB.objects.num_total_players() or "none"
nplyrs_reg_recent = len(PlayerDB.objects.get_recently_created_players()) or "none"
nsess = SESSION_HANDLER.player_count()
# nsess = len(PlayerDB.objects.get_connected_players()) or "no one"
nobjs = ObjectDB.objects.all().count()
nrooms = ObjectDB.objects.filter(db_location__isnull=True).exclude(db_typeclass_path=_BASE_CHAR_TYPECLASS).count()
nexits = ObjectDB.objects.filter(db_location__isnull=False, db_destination__isnull=False).count()
nchars = ObjectDB.objects.filter(db_typeclass_path=_BASE_CHAR_TYPECLASS).count()
nothers = nobjs - nrooms - nchars - nexits
pagevars = {
"page_title": "Front Page",
"players_connected_recent": recent_users,
"num_players_connected": nsess or "no one",
"num_players_registered": nplyrs or "no",
"num_players_connected_recent": nplyrs_conn_recent or "no",
"num_players_registered_recent": nplyrs_reg_recent or "no one",
"num_rooms": nrooms or "none",
"num_exits": nexits or "no",
"num_objects": nobjs or "none",
"num_characters": nchars or "no",
"num_others": nothers or "no"
}
return render(request, 'evennia_general/index.html', pagevars)
def to_be_implemented(request):
"""
A notice letting the user know that this particular feature hasn't been
implemented yet.
"""
pagevars = {
"page_title": "To Be Implemented...",
}
return render(request, 'evennia_general/tbi.html', pagevars)
@staff_member_required
def evennia_admin(request):
"""
Helpful Evennia-specific admin page.
"""
return render(
request, 'evennia_general/evennia_admin.html', {
'playerdb': PlayerDB})
def admin_wrapper(request):
"""
Wrapper that allows us to properly use the base Django admin site, if needed.
"""
return staff_member_required(site.index)(request)
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/dateutil/tz/tz.py | 48 | 48924 | # -*- coding: utf-8 -*-
"""
This module offers timezone implementations subclassing the abstract
:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
environment string (in all known formats), given ranges (with help from
relative deltas), local machine timezone, fixed offset timezone, and UTC
timezone.
"""
import datetime
import struct
import time
import sys
import os
import bisect
import copy
from operator import itemgetter
from contextlib import contextmanager
from six import string_types, PY3
from ._common import tzname_in_python2, _tzinfo, _total_seconds
from ._common import tzrangebase, enfold
try:
from .win import tzwin, tzwinlocal
except ImportError:
tzwin = tzwinlocal = None
ZERO = datetime.timedelta(0)
EPOCH = datetime.datetime.utcfromtimestamp(0)
EPOCHORDINAL = EPOCH.toordinal()
class tzutc(datetime.tzinfo):
"""
This is a tzinfo object that represents the UTC time zone.
"""
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
def __eq__(self, other):
if not isinstance(other, (tzutc, tzoffset)):
return NotImplemented
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
"""
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object.
"""
def __init__(self, name, offset):
self._name = name
try:
# Allow a timedelta
offset = _total_seconds(offset)
except (TypeError, AttributeError):
pass
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
int(_total_seconds(self._offset)))
__reduce__ = object.__reduce__
class tzlocal(_tzinfo):
"""
A :class:`tzinfo` subclass built around the ``time`` timezone functions.
"""
def __init__(self):
super(tzlocal, self).__init__()
self._std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
self._dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
self._dst_offset = self._std_offset
self._dst_saved = self._dst_offset - self._std_offset
self._hasdst = bool(self._dst_saved)
def utcoffset(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset - self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
naive_dst = self._naive_is_dst(dt)
return (not naive_dst and
(naive_dst != self._naive_is_dst(dt - self._dst_saved)))
def _naive_is_dst(self, dt):
timestamp = _datetime_to_timestamp(dt)
return time.localtime(timestamp + time.timezone).tm_isdst
def _isdst(self, dt, fold_naive=True):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
# >>> import tz, datetime
# >>> t = tz.tzlocal()
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
#
# Here is a more stable implementation:
#
if not self._hasdst:
return False
# Check for ambiguous times:
dstval = self._naive_is_dst(dt)
fold = getattr(dt, 'fold', None)
if self.is_ambiguous(dt):
if fold is not None:
return not self._fold(dt)
else:
return True
return dstval
def __eq__(self, other):
if not isinstance(other, tzlocal):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr",
"isstd", "isgmt", "dstoffset"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return NotImplemented
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt and
self.dstoffset == other.dstoffset)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class _tzfile(object):
"""
Lightweight class for holding the relevant transition and time zone
information read from binary tzfiles.
"""
attrs = ['trans_list', 'trans_idx', 'ttinfo_list',
'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
def __init__(self, **kwargs):
for attr in self.attrs:
setattr(self, attr, kwargs.get(attr, None))
class tzfile(_tzinfo):
"""
This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
format timezone files to extract current and historical zone information.
:param fileobj:
This can be an opened file stream or a file name that the time zone
information can be read from.
:param filename:
This is an optional parameter specifying the source of the time zone
information in the event that ``fileobj`` is a file object. If omitted
and ``fileobj`` is a file stream, this parameter will be set either to
``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
See `Sources for Time Zone and Daylight Saving Time Data
<http://www.twinsun.com/tz/tz-link.htm>`_ for more information. Time zone
files can be compiled from the `IANA Time Zone database files
<https://www.iana.org/time-zones>`_ with the `zic time zone compiler
<https://www.freebsd.org/cgi/man.cgi?query=zic&sektion=8>`_
"""
def __init__(self, fileobj, filename=None):
super(tzfile, self).__init__()
file_opened_here = False
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
file_opened_here = True
elif filename is not None:
self._filename = filename
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
if fileobj is not None:
if not file_opened_here:
fileobj = _ContextWrapper(fileobj)
with fileobj as file_stream:
tzobj = self._read_tzfile(file_stream)
self._set_tzdata(tzobj)
def _set_tzdata(self, tzobj):
""" Set the time zone data of this object from a _tzfile object """
# Copy the relevant attributes over as private attributes
for attr in _tzfile.attrs:
setattr(self, '_' + attr, getattr(tzobj, attr))
def _read_tzfile(self, fileobj):
out = _tzfile()
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
out.trans_list = list(struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4)))
else:
out.trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
out.trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
out.trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now (but read anyway for correct file position)
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# Build ttinfo list
out.ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = 60 * ((gmtoff + 30) // 60)
tti = _ttinfo()
tti.offset = gmtoff
tti.dstoffset = datetime.timedelta(0)
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
out.ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
out.ttinfo_std = None
out.ttinfo_dst = None
out.ttinfo_before = None
if out.ttinfo_list:
if not out.trans_list:
out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = out.trans_idx[i]
if not out.ttinfo_std and not tti.isdst:
out.ttinfo_std = tti
elif not out.ttinfo_dst and tti.isdst:
out.ttinfo_dst = tti
if out.ttinfo_std and out.ttinfo_dst:
break
else:
if out.ttinfo_dst and not out.ttinfo_std:
out.ttinfo_std = out.ttinfo_dst
for tti in out.ttinfo_list:
if not tti.isdst:
out.ttinfo_before = tti
break
else:
out.ttinfo_before = out.ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = None
for i, tti in enumerate(out.trans_idx):
if not tti.isdst:
offset = tti.offset
laststdoffset = offset
else:
if laststdoffset is not None:
# Store the DST offset as well and update it in the list
tti.dstoffset = tti.offset - laststdoffset
out.trans_idx[i] = tti
offset = laststdoffset or 0
out.trans_list[i] += offset
# In case we missed any DST offsets on the way in for some reason, make
# a second pass over the list, looking for the /next/ DST offset.
laststdoffset = None
for i in reversed(range(len(out.trans_idx))):
tti = out.trans_idx[i]
if tti.isdst:
if not (tti.dstoffset or laststdoffset is None):
tti.dstoffset = tti.offset - laststdoffset
else:
laststdoffset = tti.offset
if not isinstance(tti.dstoffset, datetime.timedelta):
tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
out.trans_idx[i] = tti
out.trans_idx = tuple(out.trans_idx)
out.trans_list = tuple(out.trans_list)
return out
def _find_last_transition(self, dt):
# If there's no list, there are no transitions to find
if not self._trans_list:
return None
timestamp = _datetime_to_timestamp(dt)
# Find where the timestamp fits in the transition list - if the
# timestamp is a transition time, it's part of the "after" period.
idx = bisect.bisect_right(self._trans_list, timestamp)
# We want to know when the previous transition was, so subtract off 1
return idx - 1
def _get_ttinfo(self, idx):
# For no list or after the last transition, default to _ttinfo_std
if idx is None or (idx + 1) == len(self._trans_list):
return self._ttinfo_std
# If there is a list and the time is before it, return _ttinfo_before
if idx < 0:
return self._ttinfo_before
return self._trans_idx[idx]
def _find_ttinfo(self, dt):
idx = self._resolve_ambiguous_time(dt)
return self._get_ttinfo(idx)
def is_ambiguous(self, dt, idx=None):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if idx is None:
idx = self._find_last_transition(dt)
# Calculate the difference in offsets from current to previous
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx is None or idx <= 0:
return False
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx] # Transition time
return timestamp < tt + od
def _resolve_ambiguous_time(self, dt):
idx = self._find_last_transition(dt)
# If we have no transitions, return the index
_fold = self._fold(dt)
if idx is None or idx == 0:
return idx
# Get the current datetime as a timestamp
idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
return idx - idx_offset
def utcoffset(self, dt):
if dt is None:
return None
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if dt is None:
return None
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.dstoffset
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std or dt is None:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return NotImplemented
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
return self.__reduce_ex__(None)
def __reduce_ex__(self, protocol):
return (self.__class__, (None, self._filename), self.__dict__)
class tzrange(tzrangebase):
"""
The ``tzrange`` object is a time zone specified by a set of offsets and
abbreviations, equivalent to the way the ``TZ`` variable can be specified
in POSIX-like systems, but using Python delta objects to specify DST
start, end and offsets.
:param stdabbr:
The abbreviation for standard time (e.g. ``'EST'``).
:param stdoffset:
An integer or :class:`datetime.timedelta` object or equivalent
specifying the base offset from UTC.
If unspecified, +00:00 is used.
:param dstabbr:
The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
If specified, with no other DST information, DST is assumed to occur
and the default behavior or ``dstoffset``, ``start`` and ``end`` is
used. If unspecified and no other DST information is specified, it
is assumed that this zone has no DST.
If this is unspecified and other DST information is *is* specified,
DST occurs in the zone but the time zone abbreviation is left
unchanged.
:param dstoffset:
A an integer or :class:`datetime.timedelta` object or equivalent
specifying the UTC offset during DST. If unspecified and any other DST
information is specified, it is assumed to be the STD offset +1 hour.
:param start:
A :class:`relativedelta.relativedelta` object or equivalent specifying
the time and time of year that daylight savings time starts. To specify,
for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
If unspecified and any other DST information is specified, the default
value is 2 AM on the first Sunday in April.
:param end:
A :class:`relativedelta.relativedelta` object or equivalent representing
the time and time of year that daylight savings time ends, with the
same specification method as in ``start``. One note is that this should
point to the first time in the *standard* zone, so if a transition
occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
set the `hours` parameter to +1.
**Examples:**
.. testsetup:: tzrange
from dateutil.tz import tzrange, tzstr
.. doctest:: tzrange
>>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
True
>>> from dateutil.relativedelta import *
>>> range1 = tzrange("EST", -18000, "EDT")
>>> range2 = tzrange("EST", -18000, "EDT", -14400,
... relativedelta(hours=+2, month=4, day=1,
... weekday=SU(+1)),
... relativedelta(hours=+1, month=10, day=31,
... weekday=SU(-1)))
>>> tzstr('EST5EDT') == range1 == range2
True
"""
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
try:
stdoffset = _total_seconds(stdoffset)
except (TypeError, AttributeError):
pass
try:
dstoffset = _total_seconds(dstoffset)
except (TypeError, AttributeError):
pass
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = bool(self._start_delta)
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
base_year = datetime.datetime(year, 1, 1)
start = base_year + self._start_delta
end = base_year + self._end_delta
return (start, end)
def __eq__(self, other):
if not isinstance(other, tzrange):
return NotImplemented
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzstr(tzrange):
"""
``tzstr`` objects are time zone objects specified by a time-zone string as
it would be passed to a ``TZ`` variable on POSIX-style systems (see
the `GNU C Library: TZ Variable`_ for more details).
There is one notable exception, which is that POSIX-style time zones use an
inverted offset format, so normally ``GMT+3`` would be parsed as an offset
3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
behavior, pass a ``True`` value to ``posix_offset``.
The :class:`tzrange` object provides the same functionality, but is
specified using :class:`relativedelta.relativedelta` objects. rather than
strings.
:param s:
A time zone string in ``TZ`` variable format. This can be a
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
or a stream emitting unicode characters (e.g. :class:`StringIO`).
:param posix_offset:
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
POSIX standard.
.. _`GNU C Library: TZ Variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
"""
def __init__(self, s, posix_offset=False):
global parser
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC") and not posix_offset:
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
self.hasdst = bool(self._start_delta)
def _delta(self, x, isend=0):
from dateutil import relativedelta
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset - self._std_offset
kwargs["seconds"] -= delta.seconds + delta.days * 86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(_tzinfo):
def __init__(self, tzid, comps=[]):
super(_tzicalvtz, self).__init__()
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))]
except ValueError:
pass
lastcompdt = None
lastcomp = None
for comp in self._comps:
compdt = self._find_compdt(comp, dt)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, (dt, self._fold(dt)))
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def _find_compdt(self, comp, dt):
if comp.tzoffsetdiff < ZERO and self._fold(dt):
dt -= comp.tzoffsetdiff
compdt = comp.rrule.before(dt, inc=True)
return compdt
def utcoffset(self, dt):
if dt is None:
return None
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
"""
This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
:param `fileobj`:
A file or stream in iCalendar format, which should be UTF-8 encoded
with CRLF endings.
.. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
"""
def __init__(self, fileobj):
global rrule
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
# ical should be encoded in UTF-8 with CRLF
fileobj = open(fileobj, 'r')
file_opened_here = True
else:
self._s = getattr(fileobj, 'name', repr(fileobj))
fileobj = _ContextWrapper(fileobj)
self._vtz = {}
with fileobj as fobj:
self._parse_rfc(fobj.read())
def keys(self):
"""
Retrieves the available time zones as a list.
"""
return list(self._vtz.keys())
def get(self, tzid=None):
"""
Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
:param tzid:
If there is exactly one time zone available, omitting ``tzid``
or passing :py:const:`None` value returns it. Otherwise a valid
key (which can be retrieved from :func:`keys`) is required.
:raises ValueError:
Raised if ``tzid`` is not specified but there are either more
or fewer than 1 zone defined.
:returns:
Returns either a :py:class:`datetime.tzinfo` object representing
the relevant time zone or :py:const:`None` if the ``tzid`` was
not found.
"""
if tzid is None:
if len(self._vtz) == 0:
raise ValueError("no timezones defined")
elif len(self._vtz) > 1:
raise ValueError("more than one timezone available")
tzid = next(iter(self._vtz))
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0] == '+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
elif len(s) == 6:
return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
else:
raise ValueError("invalid offset: " + s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError(
"at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError(
"unsupported %s parm: %s " % (name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError(
"unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError(
"unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError(
"unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo",
"/usr/lib/zoneinfo",
"/usr/share/lib/zoneinfo",
"/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except WindowsError:
tz = None
if not tz:
from dateutil.zoneinfo import get_zonefile_instance
tz = get_zonefile_instance().get(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
def datetime_exists(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
would fall in a gap.
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" exists in ``tz``.
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
dt = dt.replace(tzinfo=None)
# This is essentially a test of whether or not the datetime can survive
# a round trip to UTC.
dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
dt_rt = dt_rt.replace(tzinfo=None)
return dt == dt_rt
def datetime_ambiguous(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
is ambiguous (i.e if there are two times differentiated only by their DST
status).
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" is ambiguous in
``tz``.
.. versionadded:: 2.6.0
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
# If a time zone defines its own "is_ambiguous" function, we'll use that.
is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
if is_ambiguous_fn is not None:
try:
return tz.is_ambiguous(dt)
except:
pass
# If it doesn't come out and tell us it's ambiguous, we'll just check if
# the fold attribute has any effect on this particular date and time.
dt = dt.replace(tzinfo=tz)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dst = wall_0.dst() == wall_1.dst()
return not (same_offset and same_dst)
def _datetime_to_timestamp(dt):
"""
Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds
since January 1, 1970, ignoring the time zone.
"""
return _total_seconds((dt.replace(tzinfo=None) - EPOCH))
class _ContextWrapper(object):
"""
Class for wrapping contexts so that they are passed through in a
with statement.
"""
def __init__(self, context):
self.context = context
def __enter__(self):
return self.context
def __exit__(*args, **kwargs):
pass
# vim:ts=4:sw=4:et
| gpl-3.0 |
kholia/pyrpm | pyrpm/database/jointdb.py | 2 | 7701 | #
# Copyright (C) 2006 Red Hat, Inc.
# Authors: Florian Festi <ffesti@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import pyrpm.openpgp as openpgp
import db
try:
from itertools import chain
except ImportError:
def chain(*iterables):
for it in iterables:
for element in it:
yield element
class JointDB(db.RpmDatabase):
def __init__(self, config, source, buildroot=''):
self.dbs = []
self.config = config
self.source = source
self.buildroot = buildroot
self.clear()
self.keyring = openpgp.PGPKeyRing()
self.is_read = 0 # 1 if the database was already read
def __contains__(self, pkg):
for db in self.dbs:
if pkg in db:
return True
return False
def importFilelist(self):
ret = 0
for db in self.dbs:
ret |= db.importFilelist()
return ret
def isFilelistImported(self):
ret = 1
for db in self.dbs:
ret &= db.isFilelistImported()
return ret
def isIdentitySave(self):
"""return if package objects that are added are in the db afterwards
(.__contains__() returns True and the object are return from searches)
"""
return False # does not support .addPkg()
def addDB(self, db):
self.dbs.append(db)
def removeDB(self, db):
self.dbs.remove(db)
def removeAllDBs(self):
self.dbs[:] = []
# clear all structures
def clear(self):
for db in self.dbs:
db.clear()
def clearPkgs(self, tags=None, ntags=None):
for db in self.dbs:
db.clearPkgs(tags, ntags)
def setBuildroot(self, buildroot):
"""Set database chroot to buildroot."""
self.buildroot = buildroot
def open(self):
"""If the database keeps a connection, prepare it."""
for db in self.dbs:
result = db.open()
if result != self.OK:
return result
return self.OK
def close(self):
"""If the database keeps a connection, close it."""
for db in self.dbs:
result = db.close()
if result != self.OK:
return result
return self.OK
def read(self):
"""Read the database in memory."""
for db in self.dbs:
result = db.read()
if result != self.OK:
return result
return self.OK
def _merge_search_results(self, dicts):
result = {}
for result_dict in dicts:
for key, pkgs in result_dict.iteritems():
if result.has_key(key):
result[key].extend(pkgs)
else:
result[key] = pkgs
return result
# add package
def addPkg(self, pkg):
raise NotImplementedError
# remove package
def removePkg(self, pkg):
raise NotImplementedError
def searchName(self, name):
result = []
for db in self.dbs:
result.extend(db.searchName(name))
return result
def getPkgs(self):
result = []
for db in self.dbs:
result.extend(db.getPkgs())
return result
def getNames(self):
result = []
for db in self.dbs:
result.extend(db.getNames())
return result
def hasName(self, name):
for db in self.dbs:
if db.hasName(name):
return True
return False
def getPkgsByName(self, name):
result = []
for db in self.dbs:
result.extend(db.getPkgsByName(name))
return result
def getProvides(self):
result = []
for db in self.dbs:
result.extend(db.getProvides())
return result
def getFilenames(self):
result = []
for db in self.dbs:
result.extend(db.getfilenames())
return result
def numFileDuplicates(self, filename):
result = 0
for db in self.dbs:
result += db.getFileDuplicates()
return result
def getFileDuplicates(self): # XXXXXXXXXX
raise NotImplementedError
result = {}
for db in self.dbs:
files = db.getFileDuplicates()
for file, pkgs in files.iteritems():
if result.has_key(file):
result[file].extend(pkgs)
else:
result[file] = pkgs
return result
def getFileRequires(self):
result = []
for db in self.dbs:
result.extend(db.getFileRequires())
return result
def getPkgsFileRequires(self):
result = {}
for db in self.dbs:
result.update(db.getPkgsFileRequires())
return result
def iterProvides(self):
return chain(*[db.iterProvides() for db in self.dbs])
def iterRequires(self):
return chain(*[db.iterRequires() for db in self.dbs])
def iterConflicts(self):
return chain(*[db.iterConflicts() for db in self.dbs])
def iterObsoletes(self):
return chain(*[db.iterObsoletes() for db in self.dbs])
def iterTriggers(self):
return chain(*[db.iterTriggers() for db in self.dbs])
def reloadDependencies(self):
for db in self.dbs: db.reloadDependencies()
def searchPkgs(self, names):
result = []
for db in self.dbs:
result.extend(db.searchPkgs(names))
return result
def search(self, names):
result = []
for db in self.dbs:
result.extend(db.search(names))
return result
def searchProvides(self, name, flag, version):
return self._merge_search_results(
[db.searchProvides(name, flag, version)
for db in self.dbs])
def searchFilenames(self, filename):
result = []
for db in self.dbs:
result.extend(db.searchFilenames(filename))
return result
def searchRequires(self, name, flag, version):
return self._merge_search_results(
[db.searchRequires(name, flag, version)
for db in self.dbs])
def searchConflicts(self, name, flag, version):
return self._merge_search_results(
[db.searchConflicts(name, flag, version)
for db in self.dbs])
def searchObsoletes(self, name, flag, version):
return self._merge_search_results(
[db.searchObsoletes(name, flag, version)
for db in self.dbs])
def searchTriggers(self, name, flag, version):
return self._merge_search_results(
[db.searchTriggers(name, flag, version)
for db in self.dbs])
def searchDependencies(self, name, flag, version):
return self._merge_search_results(
[db.searchDependencies(name, flag, version)
for db in self.dbs])
def _getDBPath(self):
raise NotImplementedError
# vim:ts=4:sw=4:showmatch:expandtab
| gpl-2.0 |
eevee/urwid | docs/conf.py | 10 | 9846 | # -*- coding: utf-8 -*-
#
# Urwid documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 30 20:10:17 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['tools/templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Urwid'
copyright = u'2014, Ian Ward et al'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
FILE_PATH = os.path.dirname(__file__).decode('utf-8')
VERSION_MODULE = os.path.abspath(os.path.join(FILE_PATH,
'../urwid/version.py'))
VERSION_VARS = {}
execfile(VERSION_MODULE, VERSION_VARS)
# The short X.Y version.
version = '.'.join([str(x) for x in VERSION_VARS['VERSION'][:2]])
# The full version, including alpha/beta/rc tags.
release = VERSION_VARS['__version__']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_style = None # make readthedocs really use the default theme
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebarbgcolor':'#263193',
'sidebarbtncolor':'#263193',
'footerbgcolor':'#181035',
'relbarbgcolor':'#181035',
'sidebarlinkcolor':'#aabee8',
'linkcolor':'#263193',
'visitedlinkcolor':'#263193',
'headtextcolor':'#181035',
'headlinkcolor':'#181035',
'collapsiblesidebar': True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Urwid %s" % (release,)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'urwid-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['tools/static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'indexcontent.html',
}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Urwiddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Urwid.tex', u'Urwid Documentation',
u'Ian Ward', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'urwid', u'Urwid Documentation',
[u'Ian Ward'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Urwid', u'Urwid Documentation',
u'Ian Ward', 'Urwid', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Urwid'
epub_author = u'Ian Ward'
epub_publisher = u'Ian Ward'
epub_copyright = u'2014, Ian Ward et al'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
autoclass_content = "both"
autodoc_member_order = "alphabetical"
autodoc_default_flags = ["members"]
| lgpl-2.1 |
Vertexwahn/bgfx | 3rdparty/scintilla/test/ScintillaCallable.py | 68 | 4970 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ctypes, os, sys
from ctypes import c_int, c_ulong, c_char_p, c_wchar_p, c_ushort, c_uint, c_long
class TEXTRANGE(ctypes.Structure):
_fields_= (\
('cpMin', c_long),
('cpMax', c_long),
('lpstrText', ctypes.POINTER(ctypes.c_char)),
)
class FINDTEXT(ctypes.Structure):
_fields_= (\
('cpMin', c_long),
('cpMax', c_long),
('lpstrText', c_char_p),
('cpMinText', c_long),
('cpMaxText', c_long),
)
class SciCall:
def __init__(self, fn, ptr, msg, stringResult=False):
self._fn = fn
self._ptr = ptr
self._msg = msg
self._stringResult = stringResult
def __call__(self, w=0, l=0):
ww = ctypes.cast(w, c_char_p)
if self._stringResult:
lengthBytes = self._fn(self._ptr, self._msg, ww, None)
if lengthBytes == 0:
return bytearray()
result = (ctypes.c_byte * lengthBytes)(0)
lengthBytes2 = self._fn(self._ptr, self._msg, ww, ctypes.cast(result, c_char_p))
assert lengthBytes == lengthBytes2
return bytearray(result)[:lengthBytes]
else:
ll = ctypes.cast(l, c_char_p)
return self._fn(self._ptr, self._msg, ww, ll)
sciFX = ctypes.CFUNCTYPE(c_long, c_char_p, c_int, c_char_p, c_char_p)
class ScintillaCallable:
def __init__(self, face, scifn, sciptr):
self.__dict__["face"] = face
self.__dict__["used"] = set()
self.__dict__["all"] = set()
# The k member is for accessing constants as a dictionary
self.__dict__["k"] = {}
for f in face.features:
self.all.add(f)
if face.features[f]["FeatureType"] == "val":
self.k[f] = int(self.face.features[f]["Value"], 0)
elif face.features[f]["FeatureType"] == "evt":
self.k["SCN_"+f] = int(self.face.features[f]["Value"], 0)
scifn = sciFX(scifn)
self.__dict__["_scifn"] = scifn
self.__dict__["_sciptr"] = sciptr
def __getattr__(self, name):
if name in self.face.features:
self.used.add(name)
feature = self.face.features[name]
value = int(feature["Value"], 0)
#~ print("Feature", name, feature)
if feature["FeatureType"] == "val":
self.__dict__[name] = value
return value
else:
if feature["Param2Type"] == "stringresult" and \
name not in ["GetText", "GetLine", "GetCurLine"]:
return SciCall(self._scifn, self._sciptr, value, True)
else:
return SciCall(self._scifn, self._sciptr, value)
elif ("Get" + name) in self.face.features:
self.used.add("Get" + name)
feature = self.face.features["Get" + name]
value = int(feature["Value"], 0)
if feature["FeatureType"] == "get" and \
not name.startswith("Get") and \
not feature["Param1Type"] and \
not feature["Param2Type"] and \
feature["ReturnType"] in ["bool", "int", "position"]:
#~ print("property", feature)
return self._scifn(self._sciptr, value, None, None)
elif name.startswith("SCN_") and name in self.k:
self.used.add(name)
feature = self.face.features[name[4:]]
value = int(feature["Value"], 0)
#~ print("Feature", name, feature)
if feature["FeatureType"] == "val":
return value
raise AttributeError(name)
def __setattr__(self, name, val):
if ("Set" + name) in self.face.features:
self.used.add("Set" + name)
feature = self.face.features["Set" + name]
value = int(feature["Value"], 0)
#~ print("setproperty", feature)
if feature["FeatureType"] == "set" and not name.startswith("Set"):
if feature["Param1Type"] in ["bool", "int", "position"]:
return self._scifn(self._sciptr, value, c_char_p(val), None)
elif feature["Param2Type"] in ["string"]:
return self._scifn(self._sciptr, value, None, c_char_p(val))
raise AttributeError(name)
raise AttributeError(name)
def getvalue(self, name):
if name in self.face.features:
feature = self.face.features[name]
if feature["FeatureType"] != "evt":
try:
return int(feature["Value"], 0)
except ValueError:
return -1
return -1
def ByteRange(self, start, end):
tr = TEXTRANGE()
tr.cpMin = start
tr.cpMax = end
length = end - start
tr.lpstrText = ctypes.create_string_buffer(length + 1)
self.GetTextRange(0, ctypes.byref(tr))
text = tr.lpstrText[:length]
text += b"\0" * (length - len(text))
return text
def StyledTextRange(self, start, end):
tr = TEXTRANGE()
tr.cpMin = start
tr.cpMax = end
length = 2 * (end - start)
tr.lpstrText = ctypes.create_string_buffer(length + 2)
self.GetStyledText(0, ctypes.byref(tr))
styledText = tr.lpstrText[:length]
styledText += b"\0" * (length - len(styledText))
return styledText
def FindBytes(self, start, end, s, flags):
ft = FINDTEXT()
ft.cpMin = start
ft.cpMax = end
ft.lpstrText = s
ft.cpMinText = 0
ft.cpMaxText = 0
pos = self.FindText(flags, ctypes.byref(ft))
#~ print(start, end, ft.cpMinText, ft.cpMaxText)
return pos
def Contents(self):
return self.ByteRange(0, self.Length)
def SetContents(self, s):
self.TargetStart = 0
self.TargetEnd = self.Length
self.ReplaceTarget(len(s), s)
| bsd-2-clause |
ppwwyyxx/tensorflow | tensorflow/python/kernel_tests/depthtospace_op_test.py | 8 | 15106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for DepthToSpace op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import device_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class DepthToSpaceTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
with self.cached_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# Run this test only if only CPU device is available
if all(x.device_type == "CPU" for x in device_lib.list_local_devices()):
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.depth_to_space(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"No OpKernel was registered to support Op 'DepthToSpace'"):
output_nhwc.eval()
if test.is_gpu_available():
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.depth_to_space(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
@test_util.run_deprecated_v1
def testBasic(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out)
@test_util.run_deprecated_v1
def testBasicFloat16(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out, dtype=dtypes.float16)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
@test_util.run_deprecated_v1
def testBlockSize2(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 2
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
@test_util.run_deprecated_v1
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i],
[5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
def batch_output_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
input_nhwc = array_ops.ones([batch_size, 2, 3, 12])
x_out = array_ops.ones([batch_size, 4, 6, 3])
with self.cached_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
if test.is_gpu_available():
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
self.evaluate(x_tf)
# Tests for different width and height.
@test_util.run_deprecated_v1
def testNonSquare(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
[[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]],
[[5, 50], [6, 60]],
[[7, 70], [8, 80]],
[[9, 90], [10, 100]],
[[11, 110], [12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
@test_util.run_deprecated_v1
def testBlockSize4FlatInput(self):
x_np = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
block_size = 4
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleaved(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
@test_util.run_deprecated_v1
def testDepthInterleavedLarger(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40],
[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
# Tests for a block larger for the depth. In this case should raise an
# exception.
@test_util.run_deprecated_v1
def testBlockSizeTooLarge(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 4
# Raise an exception, since th depth is only 4 and needs to be
# divisible by 16.
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
# Test when the block size is 0.
@test_util.run_deprecated_v1
def testBlockSize0(self):
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
# Test when the block size is 1. The block size should be > 1.
@test_util.run_deprecated_v1
def testBlockSizeOne(self):
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeLargerThanInput(self):
# The block size is too large for this input.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisibleDepth(self):
# The depth is not divisible by the square of the block size.
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
@test_util.run_deprecated_v1
def testUnknownShape(self):
t = array_ops.depth_to_space(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def depthToSpaceUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, ih, iw, block_size, block_size, oc])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, block_size, block_size, oc, ih, iw])
tensor = array_ops.transpose(tensor, [0, 3, 4, 1, 5, 2])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
return tensor
def compareToTranspose(self, batch_size, in_height, in_width, out_channels,
block_size, data_format, use_gpu):
in_channels = out_channels * block_size * block_size
nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
nchw_input_shape = [batch_size, in_channels, in_height, in_width]
total_size = np.prod(nhwc_input_shape)
if data_format == "NCHW_VECT_C":
# Initialize the input tensor with qint8 values that circle -127..127.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
expected = self.depthToSpaceUsingTranspose(t, block_size, "NHWC")
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
t = array_ops.depth_to_space(t, block_size, data_format="NCHW_VECT_C")
t = gen_array_ops.dequantize(t, -128, 127)
actual = test_util.NCHW_VECT_CToNHWC(t)
else:
# Initialize the input tensor with ascending whole numbers as floats.
x = [f * 1.0 for f in range(total_size)]
shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
expected = self.depthToSpaceUsingTranspose(t, block_size, data_format)
actual = array_ops.depth_to_space(t, block_size, data_format=data_format)
with self.session(use_gpu=use_gpu) as sess:
actual_vals, expected_vals = self.evaluate([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
def testAgainstTranspose(self):
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", False)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", False)
if not test.is_gpu_available():
tf_logging.info("skipping gpu tests since gpu not available")
return
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 1, 3, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", True)
self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", True)
self.compareToTranspose(3, 200, 300, 32, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 8, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 4, 3, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", True)
self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 200, 300, 32, 2, "NCHW_VECT_C", True)
class DepthToSpaceGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size, data_format):
# NCHW is implemented for only GPU.
if data_format == "NCHW" and not test.is_gpu_available():
return
assert 4 == x.ndim
with self.cached_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for depth_to_space of x which is a four dimensional
# tensor of shape [b, h, w, d * block_size * block_size].
def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
np.float32)
if data_format == "NHWC":
x = data.reshape([b, h, w, d * block_size_sq])
else:
x = data.reshape([b, d * block_size_sq, h, w])
self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here, as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
block_size = 2
self._compare(3, 2, 5, 3, block_size, "NHWC")
self._compare(3, 2, 5, 3, block_size, "NCHW")
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 3
self._compare(1, 2, 3, 2, block_size, "NHWC")
self._compare(1, 2, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
test.main()
| apache-2.0 |
timduru/platform-external-chromium_org | chrome/test/functional/perf.py | 47 | 107008 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Basic pyauto performance tests.
For tests that need to be run for multiple iterations (e.g., so that average
and standard deviation values can be reported), the default number of iterations
run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
That value can optionally be tweaked by setting an environment variable
'NUM_ITERATIONS' to a positive integer, representing the number of iterations
to run. An additional, initial iteration will also be run to "warm up" the
environment, and the result from that initial iteration will be ignored.
Some tests rely on repeatedly appending tabs to Chrome. Occasionally, these
automation calls time out, thereby affecting the timing measurements (see issue
crosbug.com/20503). To work around this, the tests discard timing measurements
that involve automation timeouts. The value |_DEFAULT_MAX_TIMEOUT_COUNT|
specifies the threshold number of timeouts that can be tolerated before the test
fails. To tweak this value, set environment variable 'MAX_TIMEOUT_COUNT' to the
desired threshold value.
"""
import BaseHTTPServer
import commands
import errno
import itertools
import logging
import math
import os
import posixpath
import re
import SimpleHTTPServer
import SocketServer
import signal
import subprocess
import sys
import tempfile
import threading
import time
import timeit
import urllib
import urllib2
import urlparse
import pyauto_functional # Must be imported before pyauto.
import pyauto
import simplejson # Must be imported after pyauto; located in third_party.
from netflix import NetflixTestHelper
import pyauto_utils
import test_utils
import webpagereplay
from youtube import YoutubeTestHelper
_CHROME_BASE_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
def FormatChromePath(posix_path, **kwargs):
"""Convert a path relative to the Chromium root into an OS-specific path.
Args:
posix_path: a path string that may be a format().
Example: 'src/third_party/{module_name}/__init__.py'
kwargs: args for the format replacement.
Example: {'module_name': 'pylib'}
Returns:
an absolute path in the current Chromium tree with formatting applied.
"""
formated_path = posix_path.format(**kwargs)
path_parts = formated_path.split('/')
return os.path.join(_CHROME_BASE_DIR, *path_parts)
def StandardDeviation(values):
"""Returns the standard deviation of |values|."""
avg = Mean(values)
if len(values) < 2 or not avg:
return 0.0
temp_vals = [math.pow(x - avg, 2) for x in values]
return math.sqrt(sum(temp_vals) / (len(temp_vals) - 1))
def Mean(values):
"""Returns the arithmetic mean of |values|."""
if not values or None in values:
return None
return sum(values) / float(len(values))
def GeometricMean(values):
"""Returns the geometric mean of |values|."""
if not values or None in values or [x for x in values if x < 0.0]:
return None
if 0.0 in values:
return 0.0
return math.exp(Mean([math.log(x) for x in values]))
class BasePerfTest(pyauto.PyUITest):
"""Base class for performance tests."""
_DEFAULT_NUM_ITERATIONS = 10 # Keep synced with desktopui_PyAutoPerfTests.py.
_DEFAULT_MAX_TIMEOUT_COUNT = 10
_PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_'
_PERF_OUTPUT_MARKER_POST = '_PERF_POST_'
def setUp(self):
"""Performs necessary setup work before running each test."""
self._num_iterations = self._DEFAULT_NUM_ITERATIONS
if 'NUM_ITERATIONS' in os.environ:
self._num_iterations = int(os.environ['NUM_ITERATIONS'])
self._max_timeout_count = self._DEFAULT_MAX_TIMEOUT_COUNT
if 'MAX_TIMEOUT_COUNT' in os.environ:
self._max_timeout_count = int(os.environ['MAX_TIMEOUT_COUNT'])
self._timeout_count = 0
# For users who want to see local perf graphs for Chrome when running the
# tests on their own machines.
self._local_perf_dir = None
if 'LOCAL_PERF_DIR' in os.environ:
self._local_perf_dir = os.environ['LOCAL_PERF_DIR']
if not os.path.exists(self._local_perf_dir):
self.fail('LOCAL_PERF_DIR environment variable specified as %s, '
'but this directory does not exist.' % self._local_perf_dir)
# When outputting perf graph information on-the-fly for Chrome, this
# variable lets us know whether a perf measurement is for a new test
# execution, or the current test execution.
self._seen_graph_lines = {}
pyauto.PyUITest.setUp(self)
# Flush all buffers to disk and wait until system calms down. Must be done
# *after* calling pyauto.PyUITest.setUp, since that is where Chrome is
# killed and re-initialized for a new test.
# TODO(dennisjeffrey): Implement wait for idle CPU on Windows/Mac.
if self.IsLinux(): # IsLinux() also implies IsChromeOS().
os.system('sync')
self._WaitForIdleCPU(60.0, 0.05)
def _IsPIDRunning(self, pid):
"""Checks if a given process id is running.
Args:
pid: The process id of the process to check.
Returns:
True if the process is running. False if not.
"""
try:
# Note that this sends the signal 0, which should not interfere with the
# process.
os.kill(pid, 0)
except OSError, err:
if err.errno == errno.ESRCH:
return False
try:
with open('/proc/%s/status' % pid) as proc_file:
if 'zombie' in proc_file.read():
return False
except IOError:
return False
return True
def _GetAllDescendentProcesses(self, pid):
pstree_out = subprocess.check_output(['pstree', '-p', '%s' % pid])
children = re.findall('\((\d+)\)', pstree_out)
return [int(pid) for pid in children]
def _WaitForChromeExit(self, browser_info, timeout):
pid = browser_info['browser_pid']
chrome_pids = self._GetAllDescendentProcesses(pid)
initial_time = time.time()
while time.time() - initial_time < timeout:
if any([self._IsPIDRunning(pid) for pid in chrome_pids]):
time.sleep(1)
else:
logging.info('_WaitForChromeExit() took: %s seconds',
time.time() - initial_time)
return
self.fail('_WaitForChromeExit() did not finish within %s seconds' %
timeout)
def tearDown(self):
if self._IsPGOMode():
browser_info = self.GetBrowserInfo()
pid = browser_info['browser_pid']
# session_manager kills chrome without waiting for it to cleanly exit.
# Until that behavior is changed, we stop it and wait for Chrome to exit
# cleanly before restarting it. See:
# crbug.com/264717
subprocess.call(['sudo', 'pkill', '-STOP', 'session_manager'])
os.kill(pid, signal.SIGINT)
self._WaitForChromeExit(browser_info, 120)
subprocess.call(['sudo', 'pkill', '-CONT', 'session_manager'])
pyauto.PyUITest.tearDown(self)
def _IsPGOMode(self):
return 'USE_PGO' in os.environ
def _WaitForIdleCPU(self, timeout, utilization):
"""Waits for the CPU to become idle (< utilization).
Args:
timeout: The longest time in seconds to wait before throwing an error.
utilization: The CPU usage below which the system should be considered
idle (between 0 and 1.0 independent of cores/hyperthreads).
"""
time_passed = 0.0
fraction_non_idle_time = 1.0
logging.info('Starting to wait up to %fs for idle CPU...', timeout)
while fraction_non_idle_time >= utilization:
cpu_usage_start = self._GetCPUUsage()
time.sleep(2)
time_passed += 2.0
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = \
self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
logging.info('Current CPU utilization = %f.', fraction_non_idle_time)
if time_passed > timeout:
self._LogProcessActivity()
message = ('CPU did not idle after %fs wait (utilization = %f).' % (
time_passed, fraction_non_idle_time))
# crosbug.com/37389
if self._IsPGOMode():
logging.info(message)
logging.info('Still continuing because we are in PGO mode.')
return
self.fail(message)
logging.info('Wait for idle CPU took %fs (utilization = %f).',
time_passed, fraction_non_idle_time)
def _LogProcessActivity(self):
"""Logs the output of top on Linux/Mac/CrOS.
TODO: use taskmgr or similar on Windows.
"""
if self.IsLinux() or self.IsMac(): # IsLinux() also implies IsChromeOS().
logging.info('Logging current process activity using top.')
cmd = 'top -b -d1 -n1'
if self.IsMac():
cmd = 'top -l1'
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
output = p.stdout.read()
logging.info(output)
else:
logging.info('Process activity logging not implemented on this OS.')
def _AppendTab(self, url):
"""Appends a tab and increments a counter if the automation call times out.
Args:
url: The string url to which the appended tab should be navigated.
"""
if not self.AppendTab(pyauto.GURL(url)):
self._timeout_count += 1
def _MeasureElapsedTime(self, python_command, num_invocations=1):
"""Measures time (in msec) to execute a python command one or more times.
Args:
python_command: A callable.
num_invocations: An integer number of times to invoke the given command.
Returns:
The time required to execute the python command the specified number of
times, in milliseconds as a float.
"""
assert callable(python_command)
def RunCommand():
for _ in range(num_invocations):
python_command()
timer = timeit.Timer(stmt=RunCommand)
return timer.timeit(number=1) * 1000 # Convert seconds to milliseconds.
def _OutputPerfForStandaloneGraphing(self, graph_name, description, value,
units, units_x, is_stacked):
"""Outputs perf measurement data to a local folder to be graphed.
This function only applies to Chrome desktop, and assumes that environment
variable 'LOCAL_PERF_DIR' has been specified and refers to a valid directory
on the local machine.
Args:
graph_name: A string name for the graph associated with this performance
value.
description: A string description of the performance value. Should not
include spaces.
value: Either a single numeric value representing a performance
measurement, or else a list of (x, y) tuples representing one or more
long-running performance measurements, where 'x' is an x-axis value
(such as an iteration number) and 'y' is the corresponding performance
measurement. If a list of tuples is given, then the |units_x|
argument must also be specified.
units: A string representing the units of the performance measurement(s).
Should not include spaces.
units_x: A string representing the units of the x-axis values associated
with the performance measurements, such as 'iteration' if the x values
are iteration numbers. If this argument is specified, then the
|value| argument must be a list of (x, y) tuples.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
"""
revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
if os.path.exists(revision_num_file):
with open(revision_num_file) as f:
revision = int(f.read())
else:
revision = 0
if not self._seen_graph_lines:
# We're about to output data for a new test run.
revision += 1
# Update graphs.dat.
existing_graphs = []
graphs_file = os.path.join(self._local_perf_dir, 'graphs.dat')
if os.path.exists(graphs_file):
with open(graphs_file) as f:
existing_graphs = simplejson.loads(f.read())
is_new_graph = True
for graph in existing_graphs:
if graph['name'] == graph_name:
is_new_graph = False
break
if is_new_graph:
new_graph = {
'name': graph_name,
'units': units,
'important': False,
}
if units_x:
new_graph['units_x'] = units_x
existing_graphs.append(new_graph)
with open(graphs_file, 'w') as f:
f.write(simplejson.dumps(existing_graphs))
os.chmod(graphs_file, 0755)
# Update data file for this particular graph.
existing_lines = []
data_file = os.path.join(self._local_perf_dir, graph_name + '-summary.dat')
if os.path.exists(data_file):
with open(data_file) as f:
existing_lines = f.readlines()
existing_lines = map(
simplejson.loads, map(lambda x: x.strip(), existing_lines))
seen_key = graph_name
# We assume that the first line |existing_lines[0]| is the latest.
if units_x:
new_line = {
'rev': revision,
'traces': { description: [] }
}
if seen_key in self._seen_graph_lines:
# We've added points previously for this graph line in the current
# test execution, so retrieve the original set of points specified in
# the most recent revision in the data file.
new_line = existing_lines[0]
if not description in new_line['traces']:
new_line['traces'][description] = []
for x_value, y_value in value:
new_line['traces'][description].append([str(x_value), str(y_value)])
else:
new_line = {
'rev': revision,
'traces': { description: [str(value), str(0.0)] }
}
if is_stacked:
new_line['stack'] = True
if 'stack_order' not in new_line:
new_line['stack_order'] = []
if description not in new_line['stack_order']:
new_line['stack_order'].append(description)
if seen_key in self._seen_graph_lines:
# Update results for the most recent revision.
existing_lines[0] = new_line
else:
# New results for a new revision.
existing_lines.insert(0, new_line)
self._seen_graph_lines[seen_key] = True
existing_lines = map(simplejson.dumps, existing_lines)
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
with open(revision_num_file, 'w') as f:
f.write(str(revision))
def _OutputPerfGraphValue(self, description, value, units,
graph_name, units_x=None, is_stacked=False):
"""Outputs a performance value to have it graphed on the performance bots.
The output format differs, depending on whether the current platform is
Chrome desktop or ChromeOS.
For ChromeOS, the performance bots have a 30-character limit on the length
of the key associated with a performance value. A key on ChromeOS is
considered to be of the form "units_description" (for example,
"milliseconds_NewTabPage"), and is created from the |units| and
|description| passed as input to this function. Any characters beyond the
length 30 limit are truncated before results are stored in the autotest
database.
Args:
description: A string description of the performance value. Should not
include spaces.
value: Either a numeric value representing a performance measurement, or
a list of values to be averaged. Lists may also contain (x, y) tuples
representing one or more performance measurements, where 'x' is an
x-axis value (such as an iteration number) and 'y' is the
corresponding performance measurement. If a list of tuples is given,
the |units_x| argument must also be specified.
units: A string representing the units of the performance measurement(s).
Should not include spaces.
graph_name: A string name for the graph associated with this performance
value. Only used on Chrome desktop.
units_x: A string representing the units of the x-axis values associated
with the performance measurements, such as 'iteration' if the x values
are iteration numbers. If this argument is specified, then the
|value| argument must be a list of (x, y) tuples.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
"""
if (isinstance(value, list) and value[0] is not None and
isinstance(value[0], tuple)):
assert units_x
if units_x:
assert isinstance(value, list)
if self.IsChromeOS():
# Autotest doesn't support result lists.
autotest_value = value
if (isinstance(value, list) and value[0] is not None and
not isinstance(value[0], tuple)):
autotest_value = Mean(value)
if units_x:
# TODO(dennisjeffrey): Support long-running performance measurements on
# ChromeOS in a way that can be graphed: crosbug.com/21881.
pyauto_utils.PrintPerfResult(graph_name, description, autotest_value,
units + ' ' + units_x)
else:
# Output short-running performance results in a format understood by
# autotest.
perf_key = '%s_%s' % (units, description)
if len(perf_key) > 30:
logging.warning('The description "%s" will be truncated to "%s" '
'(length 30) when added to the autotest database.',
perf_key, perf_key[:30])
print '\n%s(\'%s\', %f)%s' % (self._PERF_OUTPUT_MARKER_PRE,
perf_key, autotest_value,
self._PERF_OUTPUT_MARKER_POST)
# Also output results in the format recognized by buildbot, for cases
# in which these tests are run on chromeOS through buildbot. Since
# buildbot supports result lists, it's ok for |value| to be a list here.
pyauto_utils.PrintPerfResult(graph_name, description, value, units)
sys.stdout.flush()
else:
# TODO(dmikurube): Support stacked graphs in PrintPerfResult.
# See http://crbug.com/122119.
if units_x:
pyauto_utils.PrintPerfResult(graph_name, description, value,
units + ' ' + units_x)
else:
pyauto_utils.PrintPerfResult(graph_name, description, value, units)
if self._local_perf_dir:
self._OutputPerfForStandaloneGraphing(
graph_name, description, value, units, units_x, is_stacked)
def _OutputEventForStandaloneGraphing(self, description, event_list):
"""Outputs event information to a local folder to be graphed.
See function _OutputEventGraphValue below for a description of an event.
This function only applies to Chrome Endure tests running on Chrome desktop,
and assumes that environment variable 'LOCAL_PERF_DIR' has been specified
and refers to a valid directory on the local machine.
Args:
description: A string description of the event. Should not include
spaces.
event_list: A list of (x, y) tuples representing one or more events
occurring during an endurance test, where 'x' is the time of the event
(in seconds since the start of the test), and 'y' is a dictionary
representing relevant data associated with that event (as key/value
pairs).
"""
revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
if os.path.exists(revision_num_file):
with open(revision_num_file) as f:
revision = int(f.read())
else:
revision = 0
if not self._seen_graph_lines:
# We're about to output data for a new test run.
revision += 1
existing_lines = []
data_file = os.path.join(self._local_perf_dir, '_EVENT_-summary.dat')
if os.path.exists(data_file):
with open(data_file) as f:
existing_lines = f.readlines()
existing_lines = map(eval, map(lambda x: x.strip(), existing_lines))
seen_event_type = description
value_list = []
if seen_event_type in self._seen_graph_lines:
# We've added events previously for this event type in the current
# test execution, so retrieve the original set of values specified in
# the most recent revision in the data file.
value_list = existing_lines[0]['events'][description]
for event_time, event_data in event_list:
value_list.append([str(event_time), event_data])
new_events = {
description: value_list
}
new_line = {
'rev': revision,
'events': new_events
}
if seen_event_type in self._seen_graph_lines:
# Update results for the most recent revision.
existing_lines[0] = new_line
else:
# New results for a new revision.
existing_lines.insert(0, new_line)
self._seen_graph_lines[seen_event_type] = True
existing_lines = map(str, existing_lines)
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
with open(revision_num_file, 'w') as f:
f.write(str(revision))
def _OutputEventGraphValue(self, description, event_list):
"""Outputs a set of events to have them graphed on the Chrome Endure bots.
An "event" can be anything recorded by a performance test that occurs at
particular times during a test execution. For example, a garbage collection
in the v8 heap can be considered an event. An event is distinguished from a
regular perf measurement in two ways: (1) an event is depicted differently
in the performance graphs than performance measurements; (2) an event can
be associated with zero or more data fields describing relevant information
associated with the event. For example, a garbage collection event will
occur at a particular time, and it may be associated with data such as
the number of collected bytes and/or the length of time it took to perform
the garbage collection.
This function only applies to Chrome Endure tests running on Chrome desktop.
Args:
description: A string description of the event. Should not include
spaces.
event_list: A list of (x, y) tuples representing one or more events
occurring during an endurance test, where 'x' is the time of the event
(in seconds since the start of the test), and 'y' is a dictionary
representing relevant data associated with that event (as key/value
pairs).
"""
pyauto_utils.PrintPerfResult('_EVENT_', description, event_list, '')
if self._local_perf_dir:
self._OutputEventForStandaloneGraphing(description, event_list)
def _PrintSummaryResults(self, description, values, units, graph_name):
"""Logs summary measurement information.
This function computes and outputs the average and standard deviation of
the specified list of value measurements. It also invokes
_OutputPerfGraphValue() with the computed *average* value, to ensure the
average value can be plotted in a performance graph.
Args:
description: A string description for the specified results.
values: A list of numeric value measurements.
units: A string specifying the units for the specified measurements.
graph_name: A string name for the graph associated with this performance
value. Only used on Chrome desktop.
"""
logging.info('Overall results for: %s', description)
if values:
logging.info(' Average: %f %s', Mean(values), units)
logging.info(' Std dev: %f %s', StandardDeviation(values), units)
self._OutputPerfGraphValue(description, values, units, graph_name)
else:
logging.info('No results to report.')
def _RunNewTabTest(self, description, open_tab_command, graph_name,
num_tabs=1):
"""Runs a perf test that involves opening new tab(s).
This helper function can be called from different tests to do perf testing
with different types of tabs. It is assumed that the |open_tab_command|
will open up a single tab.
Args:
description: A string description of the associated tab test.
open_tab_command: A callable that will open a single tab.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
num_tabs: The number of tabs to open, i.e., the number of times to invoke
the |open_tab_command|.
"""
assert callable(open_tab_command)
timings = []
for iteration in range(self._num_iterations + 1):
orig_timeout_count = self._timeout_count
elapsed_time = self._MeasureElapsedTime(open_tab_command,
num_invocations=num_tabs)
# Only count the timing measurement if no automation call timed out.
if self._timeout_count == orig_timeout_count:
# Ignore the first iteration.
if iteration:
timings.append(elapsed_time)
logging.info('Iteration %d of %d: %f milliseconds', iteration,
self._num_iterations, elapsed_time)
self.assertTrue(self._timeout_count <= self._max_timeout_count,
msg='Test exceeded automation timeout threshold.')
self.assertEqual(1 + num_tabs, self.GetTabCount(),
msg='Did not open %d new tab(s).' % num_tabs)
for _ in range(num_tabs):
self.CloseTab(tab_index=1)
self._PrintSummaryResults(description, timings, 'milliseconds', graph_name)
def _GetConfig(self):
"""Load perf test configuration file.
Returns:
A dictionary that represents the config information.
"""
config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
config = {'username': None,
'password': None,
'google_account_url': 'https://accounts.google.com/',
'gmail_url': 'https://www.gmail.com',
'plus_url': 'https://plus.google.com',
'docs_url': 'https://docs.google.com'}
if os.path.exists(config_file):
try:
new_config = pyauto.PyUITest.EvalDataFrom(config_file)
for key in new_config:
if new_config.get(key) is not None:
config[key] = new_config.get(key)
except SyntaxError, e:
logging.info('Could not read %s: %s', config_file, str(e))
return config
def _LoginToGoogleAccount(self, account_key='test_google_account'):
"""Logs in to a test Google account.
Login with user-defined credentials if they exist.
Else login with private test credentials if they exist.
Else fail.
Args:
account_key: The string key in private_tests_info.txt which is associated
with the test account login credentials to use. It will only
be used when fail to load user-defined credentials.
Raises:
RuntimeError: if could not get credential information.
"""
private_file = os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private',
'private_tests_info.txt')
config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
config = self._GetConfig()
google_account_url = config.get('google_account_url')
username = config.get('username')
password = config.get('password')
if username and password:
logging.info(
'Using google account credential from %s',
os.path.join(os.path.dirname(__file__), 'perf.cfg'))
elif os.path.exists(private_file):
creds = self.GetPrivateInfo()[account_key]
username = creds['username']
password = creds['password']
logging.info(
'User-defined credentials not found,' +
' using private test credentials instead.')
else:
message = 'No user-defined or private test ' \
'credentials could be found. ' \
'Please specify credential information in %s.' \
% config_file
raise RuntimeError(message)
test_utils.GoogleAccountsLogin(
self, username, password, url=google_account_url)
self.NavigateToURL('about:blank') # Clear the existing tab.
def _GetCPUUsage(self):
"""Returns machine's CPU usage.
This function uses /proc/stat to identify CPU usage, and therefore works
only on Linux/ChromeOS.
Returns:
A dictionary with 'user', 'nice', 'system' and 'idle' values.
Sample dictionary:
{
'user': 254544,
'nice': 9,
'system': 254768,
'idle': 2859878,
}
"""
try:
f = open('/proc/stat')
cpu_usage_str = f.readline().split()
f.close()
except IOError, e:
self.fail('Could not retrieve CPU usage: ' + str(e))
return {
'user': int(cpu_usage_str[1]),
'nice': int(cpu_usage_str[2]),
'system': int(cpu_usage_str[3]),
'idle': int(cpu_usage_str[4])
}
def _GetFractionNonIdleCPUTime(self, cpu_usage_start, cpu_usage_end):
"""Computes the fraction of CPU time spent non-idling.
This function should be invoked using before/after values from calls to
_GetCPUUsage().
"""
time_non_idling_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
cpu_usage_end['system'])
time_non_idling_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
cpu_usage_start['system'])
total_time_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
cpu_usage_end['system'] + cpu_usage_end['idle'])
total_time_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
cpu_usage_start['system'] + cpu_usage_start['idle'])
return ((float(time_non_idling_end) - time_non_idling_start) /
(total_time_end - total_time_start))
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
flags = super(BasePerfTest, self).ExtraChromeFlags()
# Window size impacts a variety of perf tests, ensure consistency.
flags.append('--window-size=1024,768')
if self._IsPGOMode():
flags = flags + ['--child-clean-exit', '--no-sandbox']
return flags
class TabPerfTest(BasePerfTest):
"""Tests that involve opening tabs."""
def testNewTab(self):
"""Measures time to open a new tab."""
self._RunNewTabTest('NewTabPage',
lambda: self._AppendTab('chrome://newtab'), 'open_tab')
def testNewTabFlash(self):
"""Measures time to open a new tab navigated to a flash page."""
self.assertTrue(
os.path.exists(os.path.join(self.ContentDataDir(), 'plugin',
'flash.swf')),
msg='Missing required flash data file.')
url = self.GetFileURLForContentDataPath('plugin', 'flash.swf')
self._RunNewTabTest('NewTabFlashPage', lambda: self._AppendTab(url),
'open_tab')
def test20Tabs(self):
"""Measures time to open 20 tabs."""
self._RunNewTabTest('20TabsNewTabPage',
lambda: self._AppendTab('chrome://newtab'),
'open_20_tabs', num_tabs=20)
class BenchmarkPerfTest(BasePerfTest):
"""Benchmark performance tests."""
def testV8BenchmarkSuite(self):
"""Measures score from v8 benchmark suite."""
url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
def _RunBenchmarkOnce(url):
"""Runs the v8 benchmark suite once and returns the results in a dict."""
self.assertTrue(self.AppendTab(pyauto.GURL(url)),
msg='Failed to append tab for v8 benchmark suite.')
js_done = """
var val = document.getElementById("status").innerHTML;
window.domAutomationController.send(val);
"""
self.assertTrue(
self.WaitUntil(
lambda: 'Score:' in self.ExecuteJavascript(js_done, tab_index=1),
timeout=300, expect_retval=True, retry_sleep=1),
msg='Timed out when waiting for v8 benchmark score.')
js_get_results = """
var result = {};
result['final_score'] = document.getElementById("status").innerHTML;
result['all_results'] = document.getElementById("results").innerHTML;
window.domAutomationController.send(JSON.stringify(result));
"""
results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
score_pattern = '(\w+): (\d+)'
final_score = re.search(score_pattern, results['final_score']).group(2)
result_dict = {'final_score': int(final_score)}
for match in re.finditer(score_pattern, results['all_results']):
benchmark_name = match.group(1)
benchmark_score = match.group(2)
result_dict[benchmark_name] = int(benchmark_score)
self.CloseTab(tab_index=1)
return result_dict
timings = {}
for iteration in xrange(self._num_iterations + 1):
result_dict = _RunBenchmarkOnce(url)
# Ignore the first iteration.
if iteration:
for key, val in result_dict.items():
timings.setdefault(key, []).append(val)
logging.info('Iteration %d of %d:\n%s', iteration,
self._num_iterations, self.pformat(result_dict))
for key, val in timings.items():
if key == 'final_score':
self._PrintSummaryResults('V8Benchmark', val, 'score',
'v8_benchmark_final')
else:
self._PrintSummaryResults('V8Benchmark-%s' % key, val, 'score',
'v8_benchmark_individual')
def testSunSpider(self):
"""Runs the SunSpider javascript benchmark suite."""
url = self.GetFileURLForDataPath('sunspider', 'sunspider-driver.html')
self.assertTrue(self.AppendTab(pyauto.GURL(url)),
msg='Failed to append tab for SunSpider benchmark suite.')
js_is_done = """
var done = false;
if (document.getElementById("console"))
done = true;
window.domAutomationController.send(JSON.stringify(done));
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_is_done, tab_index=1),
timeout=300, expect_retval='true', retry_sleep=1),
msg='Timed out when waiting for SunSpider benchmark score.')
js_get_results = """
window.domAutomationController.send(
document.getElementById("console").innerHTML);
"""
# Append '<br>' to the result to simplify regular expression matching.
results = self.ExecuteJavascript(js_get_results, tab_index=1) + '<br>'
total = re.search('Total:\s*([\d.]+)ms', results).group(1)
logging.info('Total: %f ms', float(total))
self._OutputPerfGraphValue('SunSpider-total', float(total), 'ms',
'sunspider_total')
for match_category in re.finditer('\s\s(\w+):\s*([\d.]+)ms.+?<br><br>',
results):
category_name = match_category.group(1)
category_result = match_category.group(2)
logging.info('Benchmark "%s": %f ms', category_name,
float(category_result))
self._OutputPerfGraphValue('SunSpider-' + category_name,
float(category_result), 'ms',
'sunspider_individual')
for match_result in re.finditer('<br>\s\s\s\s([\w-]+):\s*([\d.]+)ms',
match_category.group(0)):
result_name = match_result.group(1)
result_value = match_result.group(2)
logging.info(' Result "%s-%s": %f ms', category_name, result_name,
float(result_value))
self._OutputPerfGraphValue(
'SunSpider-%s-%s' % (category_name, result_name),
float(result_value), 'ms', 'sunspider_individual')
def testDromaeoSuite(self):
"""Measures results from Dromaeo benchmark suite."""
url = self.GetFileURLForDataPath('dromaeo', 'index.html')
self.assertTrue(self.AppendTab(pyauto.GURL(url + '?dromaeo')),
msg='Failed to append tab for Dromaeo benchmark suite.')
js_is_ready = """
var val = document.getElementById('pause').value;
window.domAutomationController.send(val);
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_is_ready, tab_index=1),
timeout=30, expect_retval='Run', retry_sleep=1),
msg='Timed out when waiting for Dromaeo benchmark to load.')
js_run = """
$('#pause').val('Run').click();
window.domAutomationController.send('done');
"""
self.ExecuteJavascript(js_run, tab_index=1)
js_is_done = """
var val = document.getElementById('timebar').innerHTML;
window.domAutomationController.send(val);
"""
self.assertTrue(
self.WaitUntil(
lambda: 'Total' in self.ExecuteJavascript(js_is_done, tab_index=1),
timeout=900, expect_retval=True, retry_sleep=2),
msg='Timed out when waiting for Dromaeo benchmark to complete.')
js_get_results = """
var result = {};
result['total_result'] = $('#timebar strong').html();
result['all_results'] = {};
$('.result-item.done').each(function (i) {
var group_name = $(this).find('.test b').html().replace(':', '');
var group_results = {};
group_results['result'] =
$(this).find('span').html().replace('runs/s', '')
group_results['sub_groups'] = {}
$(this).find('li').each(function (i) {
var sub_name = $(this).find('b').html().replace(':', '');
group_results['sub_groups'][sub_name] =
$(this).text().match(/: ([\d.]+)/)[1]
});
result['all_results'][group_name] = group_results;
});
window.domAutomationController.send(JSON.stringify(result));
"""
results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
total_result = results['total_result']
logging.info('Total result: ' + total_result)
self._OutputPerfGraphValue('Dromaeo-total', float(total_result),
'runsPerSec', 'dromaeo_total')
for group_name, group in results['all_results'].iteritems():
logging.info('Benchmark "%s": %s', group_name, group['result'])
self._OutputPerfGraphValue('Dromaeo-' + group_name.replace(' ', ''),
float(group['result']), 'runsPerSec',
'dromaeo_individual')
for benchmark_name, benchmark_score in group['sub_groups'].iteritems():
logging.info(' Result "%s": %s', benchmark_name, benchmark_score)
def testSpaceport(self):
"""Measures results from Spaceport benchmark suite."""
# TODO(tonyg): Test is failing on bots. Diagnose and re-enable.
pass
# url = self.GetFileURLForDataPath('third_party', 'spaceport', 'index.html')
# self.assertTrue(self.AppendTab(pyauto.GURL(url + '?auto')),
# msg='Failed to append tab for Spaceport benchmark suite.')
#
# # The test reports results to console.log in the format "name: value".
# # Inject a bit of JS to intercept those.
# js_collect_console_log = """
# window.__pyautoresult = {};
# window.console.log = function(str) {
# if (!str) return;
# var key_val = str.split(': ');
# if (!key_val.length == 2) return;
# __pyautoresult[key_val[0]] = key_val[1];
# };
# window.domAutomationController.send('done');
# """
# self.ExecuteJavascript(js_collect_console_log, tab_index=1)
#
# def _IsDone():
# expected_num_results = 30 # The number of tests in benchmark.
# results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
# return expected_num_results == len(results)
#
# js_get_results = """
# window.domAutomationController.send(
# JSON.stringify(window.__pyautoresult));
# """
# self.assertTrue(
# self.WaitUntil(_IsDone, timeout=1200, expect_retval=True,
# retry_sleep=5),
# msg='Timed out when waiting for Spaceport benchmark to complete.')
# results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
#
# for key in results:
# suite, test = key.split('.')
# value = float(results[key])
# self._OutputPerfGraphValue(test, value, 'ObjectsAt30FPS', suite)
# self._PrintSummaryResults('Overall', [float(x) for x in results.values()],
# 'ObjectsAt30FPS', 'Overall')
class LiveWebappLoadTest(BasePerfTest):
"""Tests that involve performance measurements of live webapps.
These tests connect to live webpages (e.g., Gmail, Calendar, Docs) and are
therefore subject to network conditions. These tests are meant to generate
"ball-park" numbers only (to see roughly how long things take to occur from a
user's perspective), and are not expected to be precise.
"""
def testNewTabGmail(self):
"""Measures time to open a tab to a logged-in Gmail account.
Timing starts right before the new tab is opened, and stops as soon as the
webpage displays the substring 'Last account activity:'.
"""
EXPECTED_SUBSTRING = 'Last account activity:'
def _SubstringExistsOnPage():
js = """
var frame = document.getElementById("canvas_frame");
var divs = frame.contentDocument.getElementsByTagName("div");
for (var i = 0; i < divs.length; ++i) {
if (divs[i].innerHTML.indexOf("%s") >= 0)
window.domAutomationController.send("true");
}
window.domAutomationController.send("false");
""" % EXPECTED_SUBSTRING
return self.ExecuteJavascript(js, tab_index=1)
def _RunSingleGmailTabOpen():
self._AppendTab('http://www.gmail.com')
self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
expect_retval='true', retry_sleep=0.10),
msg='Timed out waiting for expected Gmail string.')
self._LoginToGoogleAccount()
self._RunNewTabTest('NewTabGmail', _RunSingleGmailTabOpen,
'open_tab_live_webapp')
def testNewTabCalendar(self):
"""Measures time to open a tab to a logged-in Calendar account.
Timing starts right before the new tab is opened, and stops as soon as the
webpage displays the calendar print button (title 'Print my calendar').
"""
EXPECTED_SUBSTRING = 'Month'
def _DivTitleStartsWith():
js = """
var divs = document.getElementsByTagName("div");
for (var i = 0; i < divs.length; ++i) {
if (divs[i].innerHTML == "%s")
window.domAutomationController.send("true");
}
window.domAutomationController.send("false");
""" % EXPECTED_SUBSTRING
return self.ExecuteJavascript(js, tab_index=1)
def _RunSingleCalendarTabOpen():
self._AppendTab('http://calendar.google.com')
self.assertTrue(self.WaitUntil(_DivTitleStartsWith, timeout=120,
expect_retval='true', retry_sleep=0.10),
msg='Timed out waiting for expected Calendar string.')
self._LoginToGoogleAccount()
self._RunNewTabTest('NewTabCalendar', _RunSingleCalendarTabOpen,
'open_tab_live_webapp')
def testNewTabDocs(self):
"""Measures time to open a tab to a logged-in Docs account.
Timing starts right before the new tab is opened, and stops as soon as the
webpage displays the expected substring 'last modified' (case insensitive).
"""
EXPECTED_SUBSTRING = 'sort'
def _SubstringExistsOnPage():
js = """
var divs = document.getElementsByTagName("div");
for (var i = 0; i < divs.length; ++i) {
if (divs[i].innerHTML.toLowerCase().indexOf("%s") >= 0)
window.domAutomationController.send("true");
}
window.domAutomationController.send("false");
""" % EXPECTED_SUBSTRING
return self.ExecuteJavascript(js, tab_index=1)
def _RunSingleDocsTabOpen():
self._AppendTab('http://docs.google.com')
self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
expect_retval='true', retry_sleep=0.10),
msg='Timed out waiting for expected Docs string.')
self._LoginToGoogleAccount()
self._RunNewTabTest('NewTabDocs', _RunSingleDocsTabOpen,
'open_tab_live_webapp')
class NetflixPerfTest(BasePerfTest, NetflixTestHelper):
"""Test Netflix video performance."""
def __init__(self, methodName='runTest', **kwargs):
pyauto.PyUITest.__init__(self, methodName, **kwargs)
NetflixTestHelper.__init__(self, self)
def tearDown(self):
self.SignOut()
pyauto.PyUITest.tearDown(self)
def testNetflixDroppedFrames(self):
"""Measures the Netflix video dropped frames/second. Runs for 60 secs."""
self.LoginAndStartPlaying()
self.CheckNetflixPlaying(self.IS_PLAYING,
'Player did not start playing the title.')
# Ignore first 10 seconds of video playing so we get smooth videoplayback.
time.sleep(10)
init_dropped_frames = self._GetVideoDroppedFrames()
dropped_frames = []
prev_dropped_frames = 0
for iteration in xrange(60):
# Ignoring initial dropped frames of first 10 seconds.
total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
dropped_frames_last_sec = total_dropped_frames - prev_dropped_frames
dropped_frames.append(dropped_frames_last_sec)
logging.info('Iteration %d of %d: %f dropped frames in the last second',
iteration + 1, 60, dropped_frames_last_sec)
prev_dropped_frames = total_dropped_frames
# Play the video for some time.
time.sleep(1)
self._PrintSummaryResults('NetflixDroppedFrames', dropped_frames, 'frames',
'netflix_dropped_frames')
def testNetflixCPU(self):
"""Measures the Netflix video CPU usage. Runs for 60 seconds."""
self.LoginAndStartPlaying()
self.CheckNetflixPlaying(self.IS_PLAYING,
'Player did not start playing the title.')
# Ignore first 10 seconds of video playing so we get smooth videoplayback.
time.sleep(10)
init_dropped_frames = self._GetVideoDroppedFrames()
init_video_frames = self._GetVideoFrames()
cpu_usage_start = self._GetCPUUsage()
total_shown_frames = 0
# Play the video for some time.
time.sleep(60)
total_video_frames = self._GetVideoFrames() - init_video_frames
total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = \
self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
# Counting extrapolation for utilization to play the video.
extrapolation_value = fraction_non_idle_time * \
(float(total_video_frames) + total_dropped_frames) / total_video_frames
logging.info('Netflix CPU extrapolation: %f', extrapolation_value)
self._OutputPerfGraphValue('NetflixCPUExtrapolation', extrapolation_value,
'extrapolation', 'netflix_cpu_extrapolation')
class YoutubePerfTest(BasePerfTest, YoutubeTestHelper):
"""Test Youtube video performance."""
def __init__(self, methodName='runTest', **kwargs):
pyauto.PyUITest.__init__(self, methodName, **kwargs)
YoutubeTestHelper.__init__(self, self)
def _VerifyVideoTotalBytes(self):
"""Returns true if video total bytes information is available."""
return self.GetVideoTotalBytes() > 0
def _VerifyVideoLoadedBytes(self):
"""Returns true if video loaded bytes information is available."""
return self.GetVideoLoadedBytes() > 0
def StartVideoForPerformance(self, video_id='zuzaxlddWbk'):
"""Start the test video with all required buffering."""
self.PlayVideoAndAssert(video_id)
self.ExecuteJavascript("""
ytplayer.setPlaybackQuality('hd720');
window.domAutomationController.send('');
""")
self.AssertPlayerState(state=self.is_playing,
msg='Player did not enter the playing state')
self.assertTrue(
self.WaitUntil(self._VerifyVideoTotalBytes, expect_retval=True),
msg='Failed to get video total bytes information.')
self.assertTrue(
self.WaitUntil(self._VerifyVideoLoadedBytes, expect_retval=True),
msg='Failed to get video loaded bytes information')
loaded_video_bytes = self.GetVideoLoadedBytes()
total_video_bytes = self.GetVideoTotalBytes()
self.PauseVideo()
logging.info('total_video_bytes: %f', total_video_bytes)
# Wait for the video to finish loading.
while total_video_bytes > loaded_video_bytes:
loaded_video_bytes = self.GetVideoLoadedBytes()
logging.info('loaded_video_bytes: %f', loaded_video_bytes)
time.sleep(1)
self.PlayVideo()
# Ignore first 10 seconds of video playing so we get smooth videoplayback.
time.sleep(10)
def testYoutubeDroppedFrames(self):
"""Measures the Youtube video dropped frames/second. Runs for 60 secs.
This test measures Youtube video dropped frames for three different types
of videos like slow, normal and fast motion.
"""
youtube_video = {'Slow': 'VT1-sitWRtY',
'Normal': '2tqK_3mKQUw',
'Fast': '8ETDE0VGJY4',
}
for video_type in youtube_video:
logging.info('Running %s video.', video_type)
self.StartVideoForPerformance(youtube_video[video_type])
init_dropped_frames = self.GetVideoDroppedFrames()
total_dropped_frames = 0
dropped_fps = []
for iteration in xrange(60):
frames = self.GetVideoDroppedFrames() - init_dropped_frames
current_dropped_frames = frames - total_dropped_frames
dropped_fps.append(current_dropped_frames)
logging.info('Iteration %d of %d: %f dropped frames in the last '
'second', iteration + 1, 60, current_dropped_frames)
total_dropped_frames = frames
# Play the video for some time
time.sleep(1)
graph_description = 'YoutubeDroppedFrames' + video_type
self._PrintSummaryResults(graph_description, dropped_fps, 'frames',
'youtube_dropped_frames')
def testYoutubeCPU(self):
"""Measures the Youtube video CPU usage. Runs for 60 seconds.
Measures the Youtube video CPU usage (between 0 and 1), extrapolated to
totalframes in the video by taking dropped frames into account. For smooth
videoplayback this number should be < 0.5..1.0 on a hyperthreaded CPU.
"""
self.StartVideoForPerformance()
init_dropped_frames = self.GetVideoDroppedFrames()
logging.info('init_dropped_frames: %f', init_dropped_frames)
cpu_usage_start = self._GetCPUUsage()
total_shown_frames = 0
for sec_num in xrange(60):
# Play the video for some time.
time.sleep(1)
total_shown_frames = total_shown_frames + self.GetVideoFrames()
logging.info('total_shown_frames: %f', total_shown_frames)
total_dropped_frames = self.GetVideoDroppedFrames() - init_dropped_frames
logging.info('total_dropped_frames: %f', total_dropped_frames)
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
cpu_usage_start, cpu_usage_end)
logging.info('fraction_non_idle_time: %f', fraction_non_idle_time)
total_frames = total_shown_frames + total_dropped_frames
# Counting extrapolation for utilization to play the video.
extrapolation_value = (fraction_non_idle_time *
(float(total_frames) / total_shown_frames))
logging.info('Youtube CPU extrapolation: %f', extrapolation_value)
# Video is still running so log some more detailed data.
self._LogProcessActivity()
self._OutputPerfGraphValue('YoutubeCPUExtrapolation', extrapolation_value,
'extrapolation', 'youtube_cpu_extrapolation')
class FlashVideoPerfTest(BasePerfTest):
"""General flash video performance tests."""
def FlashVideo1080P(self):
"""Measures total dropped frames and average FPS for a 1080p flash video.
This is a temporary test to be run manually for now, needed to collect some
performance statistics across different ChromeOS devices.
"""
# Open up the test webpage; it's assumed the test will start automatically.
webpage_url = 'http://www/~arscott/fl/FlashVideoTests.html'
self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
msg='Failed to append tab for webpage.')
# Wait until the test is complete.
js_is_done = """
window.domAutomationController.send(JSON.stringify(tests_done));
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_is_done, tab_index=1) == 'true',
timeout=300, expect_retval=True, retry_sleep=1),
msg='Timed out when waiting for test result.')
# Retrieve and output the test results.
js_results = """
window.domAutomationController.send(JSON.stringify(tests_results));
"""
test_result = eval(self.ExecuteJavascript(js_results, tab_index=1))
test_result[0] = test_result[0].replace('true', 'True')
test_result = eval(test_result[0]) # Webpage only does 1 test right now.
description = 'FlashVideo1080P'
result = test_result['averageFPS']
logging.info('Result for %s: %f FPS (average)', description, result)
self._OutputPerfGraphValue(description, result, 'FPS',
'flash_video_1080p_fps')
result = test_result['droppedFrames']
logging.info('Result for %s: %f dropped frames', description, result)
self._OutputPerfGraphValue(description, result, 'DroppedFrames',
'flash_video_1080p_dropped_frames')
class WebGLTest(BasePerfTest):
"""Tests for WebGL performance."""
def _RunWebGLTest(self, url, description, graph_name):
"""Measures FPS using a specified WebGL demo.
Args:
url: The string URL that, once loaded, will run the WebGL demo (default
WebGL demo settings are used, since this test does not modify any
settings in the demo).
description: A string description for this demo, used as a performance
value description. Should not contain any spaces.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
"""
self.assertTrue(self.AppendTab(pyauto.GURL(url)),
msg='Failed to append tab for %s.' % description)
get_fps_js = """
var fps_field = document.getElementById("fps");
var result = -1;
if (fps_field)
result = fps_field.innerHTML;
window.domAutomationController.send(JSON.stringify(result));
"""
# Wait until we start getting FPS values.
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(get_fps_js, tab_index=1) != '-1',
timeout=300, retry_sleep=1),
msg='Timed out when waiting for FPS values to be available.')
# Let the experiment run for 5 seconds before we start collecting perf
# measurements.
time.sleep(5)
# Collect the current FPS value each second for the next 30 seconds. The
# final result of this test will be the average of these FPS values.
fps_vals = []
for iteration in xrange(30):
fps = self.ExecuteJavascript(get_fps_js, tab_index=1)
fps = float(fps.replace('"', ''))
fps_vals.append(fps)
logging.info('Iteration %d of %d: %f FPS', iteration + 1, 30, fps)
time.sleep(1)
self._PrintSummaryResults(description, fps_vals, 'fps', graph_name)
def testWebGLAquarium(self):
"""Measures performance using the WebGL Aquarium demo."""
self._RunWebGLTest(
self.GetFileURLForDataPath('pyauto_private', 'webgl', 'aquarium',
'aquarium.html'),
'WebGLAquarium', 'webgl_demo')
def testWebGLField(self):
"""Measures performance using the WebGL Field demo."""
self._RunWebGLTest(
self.GetFileURLForDataPath('pyauto_private', 'webgl', 'field',
'field.html'),
'WebGLField', 'webgl_demo')
def testWebGLSpaceRocks(self):
"""Measures performance using the WebGL SpaceRocks demo."""
self._RunWebGLTest(
self.GetFileURLForDataPath('pyauto_private', 'webgl', 'spacerocks',
'spacerocks.html'),
'WebGLSpaceRocks', 'webgl_demo')
class GPUPerfTest(BasePerfTest):
"""Tests for GPU performance."""
def setUp(self):
"""Performs necessary setup work before running each test in this class."""
self._gpu_info_dict = self.EvalDataFrom(os.path.join(self.DataDir(),
'gpu', 'gpuperf.txt'))
self._demo_name_url_dict = self._gpu_info_dict['demo_info']
pyauto.PyUITest.setUp(self)
def _MeasureFpsOverTime(self, tab_index=0):
"""Measures FPS using a specified demo.
This function assumes that the demo is already loaded in the specified tab
index.
Args:
tab_index: The tab index, default is 0.
"""
# Let the experiment run for 5 seconds before we start collecting FPS
# values.
time.sleep(5)
# Collect the current FPS value each second for the next 10 seconds.
# Then return the average FPS value from among those collected.
fps_vals = []
for iteration in xrange(10):
fps = self.GetFPS(tab_index=tab_index)
fps_vals.append(fps['fps'])
time.sleep(1)
return Mean(fps_vals)
def _GetStdAvgAndCompare(self, avg_fps, description, ref_dict):
"""Computes the average and compare set of values with reference data.
Args:
avg_fps: Average fps value.
description: A string description for this demo, used as a performance
value description.
ref_dict: Dictionary which contains reference data for this test case.
Returns:
True, if the actual FPS value is within 10% of the reference FPS value,
or False, otherwise.
"""
std_fps = 0
status = True
# Load reference data according to platform.
platform_ref_dict = None
if self.IsWin():
platform_ref_dict = ref_dict['win']
elif self.IsMac():
platform_ref_dict = ref_dict['mac']
elif self.IsLinux():
platform_ref_dict = ref_dict['linux']
else:
self.assertFail(msg='This platform is unsupported.')
std_fps = platform_ref_dict[description]
# Compare reference data to average fps.
# We allow the average FPS value to be within 10% of the reference
# FPS value.
if avg_fps < (0.9 * std_fps):
logging.info('FPS difference exceeds threshold for: %s', description)
logging.info(' Average: %f fps', avg_fps)
logging.info('Reference Average: %f fps', std_fps)
status = False
else:
logging.info('Average FPS is actually greater than 10 percent '
'more than the reference FPS for: %s', description)
logging.info(' Average: %f fps', avg_fps)
logging.info(' Reference Average: %f fps', std_fps)
return status
def testLaunchDemosParallelInSeparateTabs(self):
"""Measures performance of demos in different tabs in same browser."""
# Launch all the demos parallel in separate tabs
counter = 0
all_demos_passed = True
ref_dict = self._gpu_info_dict['separate_tab_ref_data']
# Iterate through dictionary and append all url to browser
for url in self._demo_name_url_dict.iterkeys():
self.assertTrue(
self.AppendTab(pyauto.GURL(self._demo_name_url_dict[url])),
msg='Failed to append tab for %s.' % url)
counter += 1
# Assert number of tab count is equal to number of tabs appended.
self.assertEqual(self.GetTabCount(), counter + 1)
# Measures performance using different demos and compare it golden
# reference.
for url in self._demo_name_url_dict.iterkeys():
avg_fps = self._MeasureFpsOverTime(tab_index=counter)
# Get the reference value of fps and compare the results
if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
all_demos_passed = False
counter -= 1
self.assertTrue(
all_demos_passed,
msg='One or more demos failed to yield an acceptable FPS value')
def testLaunchDemosInSeparateBrowser(self):
"""Measures performance by launching each demo in a separate tab."""
# Launch demos in the browser
ref_dict = self._gpu_info_dict['separate_browser_ref_data']
all_demos_passed = True
for url in self._demo_name_url_dict.iterkeys():
self.NavigateToURL(self._demo_name_url_dict[url])
# Measures performance using different demos.
avg_fps = self._MeasureFpsOverTime()
self.RestartBrowser()
# Get the standard value of fps and compare the rseults
if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
all_demos_passed = False
self.assertTrue(
all_demos_passed,
msg='One or more demos failed to yield an acceptable FPS value')
def testLaunchDemosBrowseForwardBackward(self):
"""Measures performance of various demos in browser going back and forth."""
ref_dict = self._gpu_info_dict['browse_back_forward_ref_data']
url_array = []
desc_array = []
all_demos_passed = True
# Get URL/Description from dictionary and put in individual array
for url in self._demo_name_url_dict.iterkeys():
url_array.append(self._demo_name_url_dict[url])
desc_array.append(url)
for index in range(len(url_array) - 1):
# Launch demo in the Browser
if index == 0:
self.NavigateToURL(url_array[index])
# Measures performance using the first demo.
avg_fps = self._MeasureFpsOverTime()
status1 = self._GetStdAvgAndCompare(avg_fps, desc_array[index],
ref_dict)
# Measures performance using the second demo.
self.NavigateToURL(url_array[index + 1])
avg_fps = self._MeasureFpsOverTime()
status2 = self._GetStdAvgAndCompare(avg_fps, desc_array[index + 1],
ref_dict)
# Go Back to previous demo
self.TabGoBack()
# Measures performance for first demo when moved back
avg_fps = self._MeasureFpsOverTime()
status3 = self._GetStdAvgAndCompare(
avg_fps, desc_array[index] + '_backward',
ref_dict)
# Go Forward to previous demo
self.TabGoForward()
# Measures performance for second demo when moved forward
avg_fps = self._MeasureFpsOverTime()
status4 = self._GetStdAvgAndCompare(
avg_fps, desc_array[index + 1] + '_forward',
ref_dict)
if not all([status1, status2, status3, status4]):
all_demos_passed = False
self.assertTrue(
all_demos_passed,
msg='One or more demos failed to yield an acceptable FPS value')
class HTML5BenchmarkTest(BasePerfTest):
"""Tests for HTML5 performance."""
def testHTML5Benchmark(self):
"""Measures performance using the benchmark at html5-benchmark.com."""
self.NavigateToURL('http://html5-benchmark.com')
start_benchmark_js = """
benchmark();
window.domAutomationController.send("done");
"""
self.ExecuteJavascript(start_benchmark_js)
js_final_score = """
var score = "-1";
var elem = document.getElementById("score");
if (elem)
score = elem.innerHTML;
window.domAutomationController.send(score);
"""
# Wait for the benchmark to complete, which is assumed to be when the value
# of the 'score' DOM element changes to something other than '87485'.
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_final_score) != '87485',
timeout=900, retry_sleep=1),
msg='Timed out when waiting for final score to be available.')
score = self.ExecuteJavascript(js_final_score)
logging.info('HTML5 Benchmark final score: %f', float(score))
self._OutputPerfGraphValue('HTML5Benchmark', float(score), 'score',
'html5_benchmark')
class FileUploadDownloadTest(BasePerfTest):
"""Tests that involve measuring performance of upload and download."""
def setUp(self):
"""Performs necessary setup work before running each test in this class."""
self._temp_dir = tempfile.mkdtemp()
self._test_server = PerfTestServer(self._temp_dir)
self._test_server_port = self._test_server.GetPort()
self._test_server.Run()
self.assertTrue(self.WaitUntil(self._IsTestServerRunning),
msg='Failed to start local performance test server.')
BasePerfTest.setUp(self)
def tearDown(self):
"""Performs necessary cleanup work after running each test in this class."""
BasePerfTest.tearDown(self)
self._test_server.ShutDown()
pyauto_utils.RemovePath(self._temp_dir)
def _IsTestServerRunning(self):
"""Determines whether the local test server is ready to accept connections.
Returns:
True, if a connection can be made to the local performance test server, or
False otherwise.
"""
conn = None
try:
conn = urllib2.urlopen('http://localhost:%d' % self._test_server_port)
return True
except IOError, e:
return False
finally:
if conn:
conn.close()
def testDownload100MBFile(self):
"""Measures the time to download a 100 MB file from a local server."""
CREATE_100MB_URL = (
'http://localhost:%d/create_file_of_size?filename=data&mb=100' %
self._test_server_port)
DOWNLOAD_100MB_URL = 'http://localhost:%d/data' % self._test_server_port
DELETE_100MB_URL = ('http://localhost:%d/delete_file?filename=data' %
self._test_server_port)
# Tell the local server to create a 100 MB file.
self.NavigateToURL(CREATE_100MB_URL)
# Cleaning up downloaded files is done in the same way as in downloads.py.
# We first identify all existing downloaded files, then remove only those
# new downloaded files that appear during the course of this test.
download_dir = self.GetDownloadDirectory().value()
orig_downloads = []
if os.path.isdir(download_dir):
orig_downloads = os.listdir(download_dir)
def _CleanupAdditionalFilesInDir(directory, orig_files):
"""Removes the additional files in the specified directory.
This function will remove all files from |directory| that are not
specified in |orig_files|.
Args:
directory: A string directory path.
orig_files: A list of strings representing the original set of files in
the specified directory.
"""
downloads_to_remove = []
if os.path.isdir(directory):
downloads_to_remove = [os.path.join(directory, name)
for name in os.listdir(directory)
if name not in orig_files]
for file_name in downloads_to_remove:
pyauto_utils.RemovePath(file_name)
def _DownloadFile(url):
self.DownloadAndWaitForStart(url)
self.WaitForAllDownloadsToComplete(timeout=2 * 60 * 1000) # 2 minutes.
timings = []
for iteration in range(self._num_iterations + 1):
elapsed_time = self._MeasureElapsedTime(
lambda: _DownloadFile(DOWNLOAD_100MB_URL), num_invocations=1)
# Ignore the first iteration.
if iteration:
timings.append(elapsed_time)
logging.info('Iteration %d of %d: %f milliseconds', iteration,
self._num_iterations, elapsed_time)
self.SetDownloadShelfVisible(False)
_CleanupAdditionalFilesInDir(download_dir, orig_downloads)
self._PrintSummaryResults('Download100MBFile', timings, 'milliseconds',
'download_file')
# Tell the local server to delete the 100 MB file.
self.NavigateToURL(DELETE_100MB_URL)
def testUpload50MBFile(self):
"""Measures the time to upload a 50 MB file to a local server."""
# TODO(dennisjeffrey): Replace the use of XMLHttpRequest in this test with
# FileManager automation to select the upload file when crosbug.com/17903
# is complete.
START_UPLOAD_URL = (
'http://localhost:%d/start_upload?mb=50' % self._test_server_port)
EXPECTED_SUBSTRING = 'Upload complete'
def _IsUploadComplete():
js = """
result = "";
var div = document.getElementById("upload_result");
if (div)
result = div.innerHTML;
window.domAutomationController.send(result);
"""
return self.ExecuteJavascript(js).find(EXPECTED_SUBSTRING) >= 0
def _RunSingleUpload():
self.NavigateToURL(START_UPLOAD_URL)
self.assertTrue(
self.WaitUntil(_IsUploadComplete, timeout=120, expect_retval=True,
retry_sleep=0.10),
msg='Upload failed to complete before the timeout was hit.')
timings = []
for iteration in range(self._num_iterations + 1):
elapsed_time = self._MeasureElapsedTime(_RunSingleUpload)
# Ignore the first iteration.
if iteration:
timings.append(elapsed_time)
logging.info('Iteration %d of %d: %f milliseconds', iteration,
self._num_iterations, elapsed_time)
self._PrintSummaryResults('Upload50MBFile', timings, 'milliseconds',
'upload_file')
class ScrollResults(object):
"""Container for ScrollTest results."""
def __init__(self, first_paint_seconds, results_list):
assert len(results_list) == 2, 'Expecting initial and repeat results.'
self._first_paint_time = 1000.0 * first_paint_seconds
self._results_list = results_list
def GetFirstPaintTime(self):
return self._first_paint_time
def GetFrameCount(self, index):
results = self._results_list[index]
return results.get('numFramesSentToScreen', results['numAnimationFrames'])
def GetFps(self, index):
return (self.GetFrameCount(index) /
self._results_list[index]['totalTimeInSeconds'])
def GetMeanFrameTime(self, index):
return (self._results_list[index]['totalTimeInSeconds'] /
self.GetFrameCount(index))
def GetPercentBelow60Fps(self, index):
return (float(self._results_list[index]['droppedFrameCount']) /
self.GetFrameCount(index))
class BaseScrollTest(BasePerfTest):
"""Base class for tests measuring scrolling performance."""
def setUp(self):
"""Performs necessary setup work before running each test."""
super(BaseScrollTest, self).setUp()
scroll_file = os.path.join(self.DataDir(), 'scroll', 'scroll.js')
with open(scroll_file) as f:
self._scroll_text = f.read()
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
# Extra flag used by scroll performance tests.
return (super(BaseScrollTest, self).ExtraChromeFlags() +
['--enable-gpu-benchmarking'])
def RunSingleInvocation(self, url, is_gmail_test=False):
"""Runs a single invocation of the scroll test.
Args:
url: The string url for the webpage on which to run the scroll test.
is_gmail_test: True iff the test is a GMail test.
Returns:
Instance of ScrollResults.
"""
self.assertTrue(self.AppendTab(pyauto.GURL(url)),
msg='Failed to append tab for webpage.')
timeout = pyauto.PyUITest.ActionTimeoutChanger(self, 300 * 1000) # ms
test_js = """%s;
new __ScrollTest(function(results) {
var stringify = JSON.stringify || JSON.encode;
window.domAutomationController.send(stringify(results));
}, %s);
""" % (self._scroll_text, 'true' if is_gmail_test else 'false')
results = simplejson.loads(self.ExecuteJavascript(test_js, tab_index=1))
first_paint_js = ('window.domAutomationController.send('
'(chrome.loadTimes().firstPaintTime - '
'chrome.loadTimes().requestTime).toString());')
first_paint_time = float(self.ExecuteJavascript(first_paint_js,
tab_index=1))
self.CloseTab(tab_index=1)
return ScrollResults(first_paint_time, results)
def RunScrollTest(self, url, description, graph_name, is_gmail_test=False):
"""Runs a scroll performance test on the specified webpage.
Args:
url: The string url for the webpage on which to run the scroll test.
description: A string description for the particular test being run.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
is_gmail_test: True iff the test is a GMail test.
"""
results = []
for iteration in range(self._num_iterations + 1):
result = self.RunSingleInvocation(url, is_gmail_test)
# Ignore the first iteration.
if iteration:
fps = result.GetFps(1)
assert fps, '%s did not scroll' % url
logging.info('Iteration %d of %d: %f fps', iteration,
self._num_iterations, fps)
results.append(result)
self._PrintSummaryResults(
description, [r.GetFps(1) for r in results],
'FPS', graph_name)
class PopularSitesScrollTest(BaseScrollTest):
"""Measures scrolling performance on recorded versions of popular sites."""
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
return super(PopularSitesScrollTest,
self).ExtraChromeFlags() + PageCyclerReplay.CHROME_FLAGS
def _GetUrlList(self, test_name):
"""Returns list of recorded sites."""
sites_path = PageCyclerReplay.Path('page_sets', test_name=test_name)
with open(sites_path) as f:
sites_text = f.read()
js = """
%s
window.domAutomationController.send(JSON.stringify(pageSets));
""" % sites_text
page_sets = eval(self.ExecuteJavascript(js))
return list(itertools.chain(*page_sets))[1:] # Skip first.
def _PrintScrollResults(self, results):
self._PrintSummaryResults(
'initial', [r.GetMeanFrameTime(0) for r in results],
'ms', 'FrameTimes')
self._PrintSummaryResults(
'repeat', [r.GetMeanFrameTime(1) for r in results],
'ms', 'FrameTimes')
self._PrintSummaryResults(
'initial',
[r.GetPercentBelow60Fps(0) for r in results],
'percent', 'PercentBelow60FPS')
self._PrintSummaryResults(
'repeat',
[r.GetPercentBelow60Fps(1) for r in results],
'percent', 'PercentBelow60FPS')
self._PrintSummaryResults(
'first_paint_time', [r.GetFirstPaintTime() for r in results],
'ms', 'FirstPaintTime')
def test2012Q3(self):
test_name = '2012Q3'
urls = self._GetUrlList(test_name)
results = []
with PageCyclerReplay.ReplayServer(test_name) as replay_server:
if replay_server.is_record_mode:
self._num_iterations = 1
for iteration in range(self._num_iterations):
for url in urls:
result = self.RunSingleInvocation(url)
fps = result.GetFps(0)
assert fps, '%s did not scroll' % url
logging.info('Iteration %d of %d: %f fps', iteration + 1,
self._num_iterations, fps)
results.append(result)
self._PrintScrollResults(results)
class ScrollTest(BaseScrollTest):
"""Tests to measure scrolling performance."""
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
# Extra flag needed by scroll performance tests.
return super(ScrollTest, self).ExtraChromeFlags() + ['--disable-gpu-vsync']
def testBlankPageScroll(self):
"""Runs the scroll test on a blank page."""
self.RunScrollTest(
self.GetFileURLForDataPath('scroll', 'blank.html'), 'ScrollBlankPage',
'scroll_fps')
def testTextScroll(self):
"""Runs the scroll test on a text-filled page."""
self.RunScrollTest(
self.GetFileURLForDataPath('scroll', 'text.html'), 'ScrollTextPage',
'scroll_fps')
def testGooglePlusScroll(self):
"""Runs the scroll test on a Google Plus anonymized page."""
self.RunScrollTest(
self.GetFileURLForDataPath('scroll', 'plus.html'),
'ScrollGooglePlusPage', 'scroll_fps')
def testGmailScroll(self):
"""Runs the scroll test using the live Gmail site."""
self._LoginToGoogleAccount(account_key='test_google_account_gmail')
self.RunScrollTest('http://www.gmail.com', 'ScrollGmail',
'scroll_fps', True)
class FlashTest(BasePerfTest):
"""Tests to measure flash performance."""
def _RunFlashTestForAverageFPS(self, webpage_url, description, graph_name):
"""Runs a single flash test that measures an average FPS value.
Args:
webpage_url: The string URL to a webpage that will run the test.
description: A string description for this test.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
"""
# Open up the test webpage; it's assumed the test will start automatically.
self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
msg='Failed to append tab for webpage.')
# Wait until the final result is computed, then retrieve and output it.
js = """
window.domAutomationController.send(
JSON.stringify(final_average_fps));
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js, tab_index=1) != '-1',
timeout=300, expect_retval=True, retry_sleep=1),
msg='Timed out when waiting for test result.')
result = float(self.ExecuteJavascript(js, tab_index=1))
logging.info('Result for %s: %f FPS (average)', description, result)
self._OutputPerfGraphValue(description, result, 'FPS', graph_name)
def testFlashGaming(self):
"""Runs a simple flash gaming benchmark test."""
webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
'FlashGamingTest2.html')
self._RunFlashTestForAverageFPS(webpage_url, 'FlashGaming', 'flash_fps')
def testFlashText(self):
"""Runs a simple flash text benchmark test."""
webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
'FlashTextTest2.html')
self._RunFlashTestForAverageFPS(webpage_url, 'FlashText', 'flash_fps')
def testScimarkGui(self):
"""Runs the ScimarkGui benchmark tests."""
webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
'scimarkGui.html')
self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
msg='Failed to append tab for webpage.')
js = 'window.domAutomationController.send(JSON.stringify(tests_done));'
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js, tab_index=1), timeout=300,
expect_retval='true', retry_sleep=1),
msg='Timed out when waiting for tests to complete.')
js_result = """
var result = {};
for (var i = 0; i < tests_results.length; ++i) {
var test_name = tests_results[i][0];
var mflops = tests_results[i][1];
var mem = tests_results[i][2];
result[test_name] = [mflops, mem]
}
window.domAutomationController.send(JSON.stringify(result));
"""
result = eval(self.ExecuteJavascript(js_result, tab_index=1))
for benchmark in result:
mflops = float(result[benchmark][0])
mem = float(result[benchmark][1])
if benchmark.endswith('_mflops'):
benchmark = benchmark[:benchmark.find('_mflops')]
logging.info('Results for ScimarkGui_%s:', benchmark)
logging.info(' %f MFLOPS', mflops)
logging.info(' %f MB', mem)
self._OutputPerfGraphValue('ScimarkGui-%s-MFLOPS' % benchmark, mflops,
'MFLOPS', 'scimark_gui_mflops')
self._OutputPerfGraphValue('ScimarkGui-%s-Mem' % benchmark, mem, 'MB',
'scimark_gui_mem')
class LiveGamePerfTest(BasePerfTest):
"""Tests to measure performance of live gaming webapps."""
def _RunLiveGamePerfTest(self, url, url_title_substring,
description, graph_name):
"""Measures performance metrics for the specified live gaming webapp.
This function connects to the specified URL to launch the gaming webapp,
waits for a period of time for the webapp to run, then collects some
performance metrics about the running webapp.
Args:
url: The string URL of the gaming webapp to analyze.
url_title_substring: A string that is expected to be a substring of the
webpage title for the specified gaming webapp. Used to verify that
the webapp loads correctly.
description: A string description for this game, used in the performance
value description. Should not contain any spaces.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
"""
self.NavigateToURL(url)
loaded_tab_title = self.GetActiveTabTitle()
self.assertTrue(url_title_substring in loaded_tab_title,
msg='Loaded tab title missing "%s": "%s"' %
(url_title_substring, loaded_tab_title))
cpu_usage_start = self._GetCPUUsage()
# Let the app run for 1 minute.
time.sleep(60)
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
cpu_usage_start, cpu_usage_end)
logging.info('Fraction of CPU time spent non-idle: %f',
fraction_non_idle_time)
self._OutputPerfGraphValue(description + 'CpuBusy', fraction_non_idle_time,
'Fraction', graph_name + '_cpu_busy')
v8_heap_stats = self.GetV8HeapStats()
v8_heap_size = v8_heap_stats['v8_memory_used'] / (1024.0 * 1024.0)
logging.info('Total v8 heap size: %f MB', v8_heap_size)
self._OutputPerfGraphValue(description + 'V8HeapSize', v8_heap_size, 'MB',
graph_name + '_v8_heap_size')
def testAngryBirds(self):
"""Measures performance for Angry Birds."""
self._RunLiveGamePerfTest('http://chrome.angrybirds.com', 'Angry Birds',
'AngryBirds', 'angry_birds')
class BasePageCyclerTest(BasePerfTest):
"""Page class for page cycler tests.
Derived classes must implement StartUrl().
Environment Variables:
PC_NO_AUTO: if set, avoids automatically loading pages.
"""
MAX_ITERATION_SECONDS = 60
TRIM_PERCENT = 20
DEFAULT_USE_AUTO = True
# Page Cycler lives in src/data/page_cycler rather than src/chrome/test/data
DATA_PATH = os.path.abspath(
os.path.join(BasePerfTest.DataDir(), os.pardir, os.pardir,
os.pardir, 'data', 'page_cycler'))
def setUp(self):
"""Performs necessary setup work before running each test."""
super(BasePageCyclerTest, self).setUp()
self.use_auto = 'PC_NO_AUTO' not in os.environ
@classmethod
def DataPath(cls, subdir):
return os.path.join(cls.DATA_PATH, subdir)
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
# Extra flags required to run these tests.
# The first two are needed for the test.
# The plugins argument is to prevent bad scores due to pop-ups from
# running an old version of something (like Flash).
return (super(BasePageCyclerTest, self).ExtraChromeFlags() +
['--js-flags="--expose_gc"',
'--enable-file-cookies',
'--allow-outdated-plugins'])
def WaitUntilStarted(self, start_url):
"""Check that the test navigates away from the start_url."""
js_is_started = """
var is_started = document.location.href !== "%s";
window.domAutomationController.send(JSON.stringify(is_started));
""" % start_url
self.assertTrue(
self.WaitUntil(lambda: self.ExecuteJavascript(js_is_started) == 'true',
timeout=10),
msg='Timed out when waiting to leave start page.')
def WaitUntilDone(self, url, iterations):
"""Check cookies for "__pc_done=1" to know the test is over."""
def IsDone():
cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0
return '__pc_done=1' in cookies
self.assertTrue(
self.WaitUntil(
IsDone,
timeout=(self.MAX_ITERATION_SECONDS * iterations),
retry_sleep=1),
msg='Timed out waiting for page cycler test to complete.')
def CollectPagesAndTimes(self, url):
"""Collect the results from the cookies."""
pages, times = None, None
cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0
for cookie in cookies.split(';'):
if '__pc_pages' in cookie:
pages_str = cookie.split('=', 1)[1]
pages = pages_str.split(',')
elif '__pc_timings' in cookie:
times_str = cookie.split('=', 1)[1]
times = [float(t) for t in times_str.split(',')]
self.assertTrue(pages and times,
msg='Unable to find test results in cookies: %s' % cookies)
return pages, times
def IteratePageTimes(self, pages, times, iterations):
"""Regroup the times by the page.
Args:
pages: the list of pages
times: e.g. [page1_iter1, page2_iter1, ..., page1_iter2, page2_iter2, ...]
iterations: the number of times for each page
Yields:
(pageN, [pageN_iter1, pageN_iter2, ...])
"""
num_pages = len(pages)
num_times = len(times)
expected_num_times = num_pages * iterations
self.assertEqual(
expected_num_times, num_times,
msg=('num_times != num_pages * iterations: %s != %s * %s, times=%s' %
(num_times, num_pages, iterations, times)))
for i, page in enumerate(pages):
yield page, list(itertools.islice(times, i, None, num_pages))
def CheckPageTimes(self, pages, times, iterations):
"""Assert that all the times are greater than zero."""
failed_pages = []
for page, times in self.IteratePageTimes(pages, times, iterations):
failed_times = [t for t in times if t <= 0.0]
if failed_times:
failed_pages.append((page, failed_times))
if failed_pages:
self.fail('Pages with unexpected times: %s' % failed_pages)
def TrimTimes(self, times, percent):
"""Return a new list with |percent| number of times trimmed for each page.
Removes the largest and smallest values.
"""
iterations = len(times)
times = sorted(times)
num_to_trim = int(iterations * float(percent) / 100.0)
logging.debug('Before trimming %d: %s' % (num_to_trim, times))
a = num_to_trim / 2
b = iterations - (num_to_trim / 2 + num_to_trim % 2)
trimmed_times = times[a:b]
logging.debug('After trimming: %s', trimmed_times)
return trimmed_times
def ComputeFinalResult(self, pages, times, iterations):
"""The final score that is calculated is a geometric mean of the
arithmetic means of each page's load time, and we drop the
upper/lower 20% of the times for each page so they don't skew the
mean. The geometric mean is used for the final score because the
time range for any given site may be very different, and we don't
want slower sites to weight more heavily than others.
"""
self.CheckPageTimes(pages, times, iterations)
page_means = [
Mean(self.TrimTimes(times, percent=self.TRIM_PERCENT))
for _, times in self.IteratePageTimes(pages, times, iterations)]
return GeometricMean(page_means)
def StartUrl(self, test_name, iterations):
"""Return the URL to used to start the test.
Derived classes must implement this.
"""
raise NotImplemented
def RunPageCyclerTest(self, name, description):
"""Runs the specified PageCycler test.
Args:
name: the page cycler test name (corresponds to a directory or test file)
description: a string description for the test
"""
iterations = self._num_iterations
start_url = self.StartUrl(name, iterations)
self.NavigateToURL(start_url)
if self.use_auto:
self.WaitUntilStarted(start_url)
self.WaitUntilDone(start_url, iterations)
pages, times = self.CollectPagesAndTimes(start_url)
final_result = self.ComputeFinalResult(pages, times, iterations)
logging.info('%s page cycler final result: %f' %
(description, final_result))
self._OutputPerfGraphValue(description + '_PageCycler', final_result,
'milliseconds', graph_name='PageCycler')
class PageCyclerTest(BasePageCyclerTest):
"""Tests to run various page cyclers.
Environment Variables:
PC_NO_AUTO: if set, avoids automatically loading pages.
"""
def _PreReadDataDir(self, subdir):
"""This recursively reads all of the files in a given url directory.
The intent is to get them into memory before they are used by the benchmark.
Args:
subdir: a subdirectory of the page cycler data directory.
"""
def _PreReadDir(dirname, names):
for rfile in names:
with open(os.path.join(dirname, rfile)) as fp:
fp.read()
for root, dirs, files in os.walk(self.DataPath(subdir)):
_PreReadDir(root, files)
def StartUrl(self, test_name, iterations):
# Must invoke GetFileURLForPath before appending parameters to the URL,
# otherwise those parameters will get quoted.
start_url = self.GetFileURLForPath(self.DataPath(test_name), 'start.html')
start_url += '?iterations=%d' % iterations
if self.use_auto:
start_url += '&auto=1'
return start_url
def RunPageCyclerTest(self, dirname, description):
"""Runs the specified PageCycler test.
Args:
dirname: directory containing the page cycler test
description: a string description for the test
"""
self._PreReadDataDir('common')
self._PreReadDataDir(dirname)
super(PageCyclerTest, self).RunPageCyclerTest(dirname, description)
def testMoreJSFile(self):
self.RunPageCyclerTest('morejs', 'MoreJSFile')
def testAlexaFile(self):
self.RunPageCyclerTest('alexa_us', 'Alexa_usFile')
def testBloatFile(self):
self.RunPageCyclerTest('bloat', 'BloatFile')
def testDHTMLFile(self):
self.RunPageCyclerTest('dhtml', 'DhtmlFile')
def testIntl1File(self):
self.RunPageCyclerTest('intl1', 'Intl1File')
def testIntl2File(self):
self.RunPageCyclerTest('intl2', 'Intl2File')
def testMozFile(self):
self.RunPageCyclerTest('moz', 'MozFile')
def testMoz2File(self):
self.RunPageCyclerTest('moz2', 'Moz2File')
class PageCyclerReplay(object):
"""Run page cycler tests with network simulation via Web Page Replay.
Web Page Replay is a proxy that can record and "replay" web pages with
simulated network characteristics -- without having to edit the pages
by hand. With WPR, tests can use "real" web content, and catch
performance issues that may result from introducing network delays and
bandwidth throttling.
"""
_PATHS = {
'archive': 'src/data/page_cycler/webpagereplay/{test_name}.wpr',
'page_sets': 'src/tools/page_cycler/webpagereplay/tests/{test_name}.js',
'start_page': 'src/tools/page_cycler/webpagereplay/start.html',
'extension': 'src/tools/page_cycler/webpagereplay/extension',
}
WEBPAGEREPLAY_HOST = '127.0.0.1'
WEBPAGEREPLAY_HTTP_PORT = 8080
WEBPAGEREPLAY_HTTPS_PORT = 8413
CHROME_FLAGS = webpagereplay.GetChromeFlags(
WEBPAGEREPLAY_HOST,
WEBPAGEREPLAY_HTTP_PORT,
WEBPAGEREPLAY_HTTPS_PORT) + [
'--log-level=0',
'--disable-background-networking',
'--enable-experimental-extension-apis',
'--enable-logging',
'--enable-benchmarking',
'--enable-net-benchmarking',
'--metrics-recording-only',
'--activate-on-launch',
'--no-first-run',
'--no-proxy-server',
]
@classmethod
def Path(cls, key, **kwargs):
return FormatChromePath(cls._PATHS[key], **kwargs)
@classmethod
def ReplayServer(cls, test_name, replay_options=None):
archive_path = cls.Path('archive', test_name=test_name)
return webpagereplay.ReplayServer(archive_path,
cls.WEBPAGEREPLAY_HOST,
cls.WEBPAGEREPLAY_HTTP_PORT,
cls.WEBPAGEREPLAY_HTTPS_PORT,
replay_options)
class PageCyclerNetSimTest(BasePageCyclerTest):
"""Tests to run Web Page Replay backed page cycler tests."""
MAX_ITERATION_SECONDS = 180
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
flags = super(PageCyclerNetSimTest, self).ExtraChromeFlags()
flags.append('--load-extension=%s' % PageCyclerReplay.Path('extension'))
flags.extend(PageCyclerReplay.CHROME_FLAGS)
return flags
def StartUrl(self, test_name, iterations):
start_path = PageCyclerReplay.Path('start_page')
start_url = 'file://%s?test=%s&iterations=%d' % (
start_path, test_name, iterations)
if self.use_auto:
start_url += '&auto=1'
return start_url
def RunPageCyclerTest(self, test_name, description):
"""Runs the specified PageCycler test.
Args:
test_name: name for archive (.wpr) and config (.js) files.
description: a string description for the test
"""
replay_options = None
with PageCyclerReplay.ReplayServer(test_name, replay_options) as server:
if server.is_record_mode:
self._num_iterations = 1
super_self = super(PageCyclerNetSimTest, self)
super_self.RunPageCyclerTest(test_name, description)
def test2012Q2(self):
self.RunPageCyclerTest('2012Q2', '2012Q2')
class MemoryTest(BasePerfTest):
"""Tests to measure memory consumption under different usage scenarios."""
def ExtraChromeFlags(self):
"""Launches Chrome with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
# Ensure Chrome assigns one renderer process to each tab.
return super(MemoryTest, self).ExtraChromeFlags() + ['--process-per-tab']
def _RecordMemoryStats(self, description, when, duration):
"""Outputs memory statistics to be graphed.
Args:
description: A string description for the test. Should not contain
spaces. For example, 'MemCtrl'.
when: A string description of when the memory stats are being recorded
during test execution (since memory stats may be recorded multiple
times during a test execution at certain "interesting" times). Should
not contain spaces.
duration: The number of seconds to sample data before outputting the
memory statistics.
"""
mem = self.GetMemoryStatsChromeOS(duration)
measurement_types = [
('gem_obj', 'GemObj'),
('gtt', 'GTT'),
('mem_free', 'MemFree'),
('mem_available', 'MemAvail'),
('mem_shared', 'MemShare'),
('mem_cached', 'MemCache'),
('mem_anon', 'MemAnon'),
('mem_file', 'MemFile'),
('mem_slab', 'MemSlab'),
('browser_priv', 'BrowPriv'),
('browser_shared', 'BrowShar'),
('gpu_priv', 'GpuPriv'),
('gpu_shared', 'GpuShar'),
('renderer_priv', 'RendPriv'),
('renderer_shared', 'RendShar'),
]
for type_key, type_string in measurement_types:
if type_key not in mem:
continue
self._OutputPerfGraphValue(
'%s-Min%s-%s' % (description, type_string, when),
mem[type_key]['min'], 'KB', '%s-%s' % (description, type_string))
self._OutputPerfGraphValue(
'%s-Max%s-%s' % (description, type_string, when),
mem[type_key]['max'], 'KB', '%s-%s' % (description, type_string))
self._OutputPerfGraphValue(
'%s-End%s-%s' % (description, type_string, when),
mem[type_key]['end'], 'KB', '%s-%s' % (description, type_string))
def _RunTest(self, tabs, description, duration):
"""Runs a general memory test.
Args:
tabs: A list of strings representing the URLs of the websites to open
during this test.
description: A string description for the test. Should not contain
spaces. For example, 'MemCtrl'.
duration: The number of seconds to sample data before outputting memory
statistics.
"""
self._RecordMemoryStats(description, '0Tabs0', duration)
for iteration_num in xrange(2):
for site in tabs:
self.AppendTab(pyauto.GURL(site))
self._RecordMemoryStats(description,
'%dTabs%d' % (len(tabs), iteration_num + 1),
duration)
for _ in xrange(len(tabs)):
self.CloseTab(tab_index=1)
self._RecordMemoryStats(description, '0Tabs%d' % (iteration_num + 1),
duration)
def testOpenCloseTabsControl(self):
"""Measures memory usage when opening/closing tabs to about:blank."""
tabs = ['about:blank'] * 10
self._RunTest(tabs, 'MemCtrl', 15)
def testOpenCloseTabsLiveSites(self):
"""Measures memory usage when opening/closing tabs to live sites."""
tabs = [
'http://www.google.com/gmail',
'http://www.google.com/calendar',
'http://www.google.com/plus',
'http://www.google.com/youtube',
'http://www.nytimes.com',
'http://www.cnn.com',
'http://www.facebook.com/zuck',
'http://www.techcrunch.com',
'http://www.theverge.com',
'http://www.yahoo.com',
]
# Log in to a test Google account to make connections to the above Google
# websites more interesting.
self._LoginToGoogleAccount()
self._RunTest(tabs, 'MemLive', 20)
class PerfTestServerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Request handler for the local performance test server."""
def _IgnoreHandler(self, unused_args):
"""A GET request handler that simply replies with status code 200.
Args:
unused_args: A dictionary of arguments for the current GET request.
The arguments are ignored.
"""
self.send_response(200)
self.end_headers()
def _CreateFileOfSizeHandler(self, args):
"""A GET handler that creates a local file with the specified size.
Args:
args: A dictionary of arguments for the current GET request. Must
contain 'filename' and 'mb' keys that refer to the name of the file
to create and its desired size, respectively.
"""
megabytes = None
filename = None
try:
megabytes = int(args['mb'][0])
filename = args['filename'][0]
except (ValueError, KeyError, IndexError), e:
logging.exception('Server error creating file: %s', e)
assert megabytes and filename
with open(os.path.join(self.server.docroot, filename), 'wb') as f:
f.write('X' * 1024 * 1024 * megabytes)
self.send_response(200)
self.end_headers()
def _DeleteFileHandler(self, args):
"""A GET handler that deletes the specified local file.
Args:
args: A dictionary of arguments for the current GET request. Must
contain a 'filename' key that refers to the name of the file to
delete, relative to the server's document root.
"""
filename = None
try:
filename = args['filename'][0]
except (KeyError, IndexError), e:
logging.exception('Server error deleting file: %s', e)
assert filename
try:
os.remove(os.path.join(self.server.docroot, filename))
except OSError, e:
logging.warning('OS error removing file: %s', e)
self.send_response(200)
self.end_headers()
def _StartUploadHandler(self, args):
"""A GET handler to serve a page that uploads the given amount of data.
When the page loads, the specified amount of data is automatically
uploaded to the same local server that is handling the current request.
Args:
args: A dictionary of arguments for the current GET request. Must
contain an 'mb' key that refers to the size of the data to upload.
"""
megabytes = None
try:
megabytes = int(args['mb'][0])
except (ValueError, KeyError, IndexError), e:
logging.exception('Server error starting upload: %s', e)
assert megabytes
script = """
<html>
<head>
<script type='text/javascript'>
function startUpload() {
var megabytes = %s;
var data = Array((1024 * 1024 * megabytes) + 1).join('X');
var boundary = '***BOUNDARY***';
var xhr = new XMLHttpRequest();
xhr.open('POST', 'process_upload', true);
xhr.setRequestHeader(
'Content-Type',
'multipart/form-data; boundary="' + boundary + '"');
xhr.setRequestHeader('Content-Length', data.length);
xhr.onreadystatechange = function() {
if (xhr.readyState == 4 && xhr.status == 200) {
document.getElementById('upload_result').innerHTML =
xhr.responseText;
}
};
var body = '--' + boundary + '\\r\\n';
body += 'Content-Disposition: form-data;' +
'file_contents=' + data;
xhr.send(body);
}
</script>
</head>
<body onload="startUpload();">
<div id='upload_result'>Uploading...</div>
</body>
</html>
""" % megabytes
self.send_response(200)
self.end_headers()
self.wfile.write(script)
def _ProcessUploadHandler(self, form):
"""A POST handler that discards uploaded data and sends a response.
Args:
form: A dictionary containing posted form data, as returned by
urlparse.parse_qs().
"""
upload_processed = False
file_size = 0
if 'file_contents' in form:
file_size = len(form['file_contents'][0])
upload_processed = True
self.send_response(200)
self.end_headers()
if upload_processed:
self.wfile.write('Upload complete (%d bytes)' % file_size)
else:
self.wfile.write('No file contents uploaded')
GET_REQUEST_HANDLERS = {
'create_file_of_size': _CreateFileOfSizeHandler,
'delete_file': _DeleteFileHandler,
'start_upload': _StartUploadHandler,
'favicon.ico': _IgnoreHandler,
}
POST_REQUEST_HANDLERS = {
'process_upload': _ProcessUploadHandler,
}
def translate_path(self, path):
"""Ensures files are served from the given document root.
Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
"""
path = urlparse.urlparse(path)[2]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words) # Remove empty strings from |words|.
path = self.server.docroot
for word in words:
_, word = os.path.splitdrive(word)
_, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
def do_GET(self):
"""Processes a GET request to the local server.
Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
"""
split_url = urlparse.urlsplit(self.path)
base_path = split_url[2]
if base_path.startswith('/'):
base_path = base_path[1:]
args = urlparse.parse_qs(split_url[3])
if base_path in self.GET_REQUEST_HANDLERS:
self.GET_REQUEST_HANDLERS[base_path](self, args)
else:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
"""Processes a POST request to the local server.
Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
"""
form = urlparse.parse_qs(
self.rfile.read(int(self.headers.getheader('Content-Length'))))
path = urlparse.urlparse(self.path)[2]
if path.startswith('/'):
path = path[1:]
if path in self.POST_REQUEST_HANDLERS:
self.POST_REQUEST_HANDLERS[path](self, form)
else:
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('No handler for POST request "%s".' % path)
class ThreadedHTTPServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
def __init__(self, server_address, handler_class):
BaseHTTPServer.HTTPServer.__init__(self, server_address, handler_class)
class PerfTestServer(object):
"""Local server for use by performance tests."""
def __init__(self, docroot):
"""Initializes the performance test server.
Args:
docroot: The directory from which to serve files.
"""
# The use of 0 means to start the server on an arbitrary available port.
self._server = ThreadedHTTPServer(('', 0),
PerfTestServerRequestHandler)
self._server.docroot = docroot
self._server_thread = threading.Thread(target=self._server.serve_forever)
def Run(self):
"""Starts the server thread."""
self._server_thread.start()
def ShutDown(self):
"""Shuts down the server."""
self._server.shutdown()
self._server_thread.join()
def GetPort(self):
"""Identifies the port number to which the server is currently bound.
Returns:
The numeric port number to which the server is currently bound.
"""
return self._server.server_address[1]
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
jcarnu/misc | GAndSMS/GAndroSMS.py | 1 | 5791 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import gi, os, time,sqlite3
gi.require_version("Gtk","3.0")
from gi.repository import Gtk,GdkPixbuf,GObject,Pango,Gdk
class GAndroSMS:
def __init__(self):
GObject.threads_init()
#On crée l'accès à la base
#On peut la créer directement :
#create table contact2(id integer primary key autoincrement, nom text not null, phone text not null);
self.engine = sqlite3.connect('GAndroSMS.db')
self.curs = self.engine.cursor()
# On charge l'IHM
self.interface = Gtk.Builder()
self.interface.add_from_file('GAndroSMS.glade')
#Conteneur des contacts (M du MVC)
self.contactstore = self.interface.get_object("lstContacts")
# Contenu du SMS
self.edit = self.interface.get_object("textview1")
# Liste des contacts
self.tree = self.interface.get_object("treeview1")
# On détruit le conteneur existant et on charge à partir de la DB
self.contactstore.clear()
for ctc in self.curs.execute("SELECT nom,phone,id from contact ORDER BY nom"):
self.contactstore.append(ctc)
#contact.append(ctc)
# On crée les renderer de colonnes
column = Gtk.TreeViewColumn('Contact', Gtk.CellRendererText(), text=0)
column.set_clickable(True)
column.set_resizable(True)
self.tree.append_column(column)
column = Gtk.TreeViewColumn('N°', Gtk.CellRendererText(), text=1)
column.set_clickable(True)
column.set_resizable(True)
self.tree.append_column(column)
# Signaux
self.interface.connect_signals(self)
#Dialogue de saisie/modification d'une entrée
self.dial = self.interface.get_object('dialog1')
self.nom = self.interface.get_object('entrynom')
self.phone = self.interface.get_object('entryphone')
# Création ou edition
self.editing = False
self.window = self.interface.get_object("window1")
self.window.connect("delete-event", self.on_mainWindow_destroy)
self.window.show_all()
def on_mainWindow_destroy(self, widget,arg=None):
"""Fin de l'application"""
if self.engine:
self.curs.close()
self.engine.close()
print "Base sauvegardee"
Gtk.main_quit()
def onSendClick(self,widget):
"""Envoi du SMS avec ADB shell et activity manager"""
print "Send"
txt = self.edit.get_buffer()
texte = txt.get_text(txt.get_start_iter(),txt.get_end_iter(),False)
texte = texte.replace("\"","\\\"").replace("\n","\r\n")
model,treeiter = self.tree.get_selection().get_selected()
print texte
# Récupération des données et transformation des LF en CRLF
tel = model[treeiter][1].replace("\n","\r\n")
qui = model[treeiter][0]
print "To ",qui," (",tel,")"
adb_seq=[' am start -a android.intent.action.SENDTO -d "sms:%s" --es sms_body "%s" --ez exit_on_sent true'%(tel,texte),
' input keyevent 22',' input keyevent 66']
for shellcmd in adb_seq:
os.system("adb shell %s"%shellcmd)
print "adb shell %s"%shellcmd
time.sleep(1) # Ne pas aller trop vite on simule les touches.
def onAddButton(self,widget):
"""On affiche un dialogue permettant la saisie"""
response = self.dial.run()
def onEditButton(self,widget):
"""On affiche un dialogue de saisie déjà prérempli avec les données de l'élément concerné"""
model,treeiter = self.tree.get_selection().get_selected()
tel = model[treeiter][1]
qui = model[treeiter][0]
self.currentid = model[treeiter][2]
self.nom.set_text(qui)
self.phone.set_text(tel)
self.editing = True
response = self.dial.run()
def onCancelAdd(self,widget):
"""On abandonne la saisie/modification"""
self.dial.hide()
def onAddItem(self,widget):
"""Add or edit contact entry"""
if self.editing:
query = 'UPDATE contact SET nom="{0}",phone="{1}" WHERE id="{2}"'.format(self.nom.get_text(),self.phone.get_text(),self.currentid)
else:
query = 'INSERT INTO contact(nom,phone) VALUES ("{0}","{1}")'.format(self.nom.get_text(),self.phone.get_text())
print query
self.curs.execute(query)
self.engine.commit()
if not self.editing:
self.curs.execute('SELECT id FROM contact WHERE nom="{0}" AND phone="{1}"'.format(self.nom.get_text(),self.phone.get_text()))
self.currentid = self.curs.fetchone()[0]
self.contactstore.append([self.nom.get_text(),self.phone.get_text(),self.currentid])
else:
model,treeiter = self.tree.get_selection().get_selected()
model[treeiter][1]=self.phone.get_text()
model[treeiter][0]=self.nom.get_text()
self.currentid=-1
self.editing=False;
self.dial.hide()
def onDeleteItem(self,widget):
"""Delete contact entry"""
model,treeiter = self.tree.get_selection().get_selected()
phone = model[treeiter][1]
nom = model[treeiter][0]
id_ = model[treeiter][2]
dialog = Gtk.MessageDialog(self.window, 0, Gtk.MessageType.QUESTION, Gtk.ButtonsType.YES_NO, "Etes vous sûr de vouloir supprimer l'enregisrement suivant ?")
dialog.format_secondary_text("Contact %s (%s)."%(nom,phone))
response = dialog.run()
if response == Gtk.ResponseType.YES:
print("QUESTION dialog closed by clicking YES button")
#Checker si ok/Ko
query = 'DELETE FROM contact WHERE id = "{0}"'.format(id_)
print query
self.curs.execute(query)
self.contactstore.remove(treeiter)
self.engine.commit()
elif response == Gtk.ResponseType.NO:
print("QUESTION dialog closed by clicking NO button")
dialog.destroy()
def initContactList(self, widget):
pass
def onQuitClick(self,widget):
"""On quitte l'application"""
if self.engine:
print "cloture de la base"
self.curs.close()
self.engine.close()
print "ok"
Gtk.main_quit()
def on_window_destroy(self, widget, data=None):
"""L'application se ferme"""
if self.engine:
self.curs.close()
self.engine.close()
Gtk.main_quit()
if __name__ == "__main__":
"""Basique"""
GAndroSMS()
Gtk.main()
| gpl-2.0 |
Censio/ansible-dev | lib/ansible/cli/console.py | 3 | 15641 | # (c) 2014, Nandor Sivok <dominis@haxor.hu>
# (c) 2016, Redhat Inc
#
# ansible-console is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ansible-console is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
########################################################
# ansible-console is an interactive REPL shell for ansible
# with built-in tab completion for all the documented modules
#
# Available commands:
# cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*)
# list - list available hosts in the current path
# forks - change fork
# become - become
# ! - forces shell module instead of the ansible module (!yum update -y)
import atexit
import cmd
import getpass
import readline
import os
import sys
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.vars import VariableManager
from ansible.utils import module_docs
from ansible.utils.color import stringc
from ansible.utils.unicode import to_unicode, to_str
from ansible.plugins import module_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ConsoleCLI(CLI, cmd.Cmd):
modules = []
def __init__(self, args):
super(ConsoleCLI, self).__init__(args)
self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n'
self.groups = []
self.hosts = []
self.pattern = None
self.variable_manager = None
self.loader = None
self.passwords = dict()
self.modules = None
cmd.Cmd.__init__(self)
def parse(self):
self.parser = CLI.base_parser(
usage='%prog <host-pattern> [options]',
runas_opts=True,
inventory_opts=True,
connect_opts=True,
check_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
)
# options unique to shell
self.parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
self.parser.set_defaults(cwd='*')
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
return True
def get_names(self):
return dir(self)
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
self.do_exit(self)
def set_prompt(self):
login_user = self.options.remote_user or getpass.getuser()
self.selected = self.inventory.list_hosts(self.options.cwd)
prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks)
if self.options.become and self.options.become_user in [None, 'root']:
prompt += "# "
color = C.COLOR_ERROR
else:
prompt += "$ "
color = C.COLOR_HIGHLIGHT
self.prompt = stringc(prompt, color)
def list_modules(self):
modules = set()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
module_paths = module_loader._get_paths()
for path in module_paths:
if path is not None:
modules.update(self._find_modules_in_path(path))
return modules
def _find_modules_in_path(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self._find_modules_in_path(module)
elif module.startswith('__'):
continue
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = module.replace('_', '', 1)
module = os.path.splitext(module)[0] # removes the extension
yield module
def default(self, arg, forceshell=False):
""" actually runs modules """
if arg.startswith("#"):
return False
if not self.options.cwd:
display.error("No host found")
return False
if arg.split()[0] in self.modules:
module = arg.split()[0]
module_args = ' '.join(arg.split()[1:])
else:
module = 'shell'
module_args = arg
if forceshell is True:
module = 'shell'
module_args = arg
self.options.module_name = module
result = None
try:
check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw')
play_ds = dict(
name = "Ansible Shell",
hosts = self.options.cwd,
gather_facts = 'no',
tasks = [ dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))]
)
play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
except Exception as e:
display.error(u"Unable to build command: %s" % to_unicode(e))
return False
try:
cb = 'minimal' #FIXME: make callbacks configurable
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=self.passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=False,
)
result = self._tqm.run(play)
finally:
if self._tqm:
self._tqm.cleanup()
if self.loader:
self.loader.cleanup_all_tmp_files()
if result is None:
display.error("No hosts found")
return False
except KeyboardInterrupt:
display.error('User interrupted execution')
return False
except Exception as e:
display.error(to_unicode(e))
#FIXME: add traceback in very very verbose mode
return False
def emptyline(self):
return
def do_shell(self, arg):
"""
You can run shell commands through the shell module.
eg.:
shell ps uax | grep java | wc -l
shell killall python
shell halt -n
You can use the ! to force the shell module. eg.:
!ps aux | grep java | wc -l
"""
self.default(arg, True)
def do_forks(self, arg):
"""Set the number of forks"""
if not arg:
display.display('Usage: forks <number>')
return
self.options.forks = int(arg)
self.set_prompt()
do_serial = do_forks
def do_verbosity(self, arg):
"""Set verbosity level"""
if not arg:
display.display('Usage: verbosity <number>')
else:
display.verbosity = int(arg)
display.v('verbosity level set to %s' % arg)
def do_cd(self, arg):
"""
Change active host/group. You can use hosts patterns as well eg.:
cd webservers
cd webservers:dbservers
cd webservers:!phoenix
cd webservers:&staging
cd webservers:dbservers:&staging:!phoenix
"""
if not arg:
self.options.cwd = '*'
elif arg == '..':
try:
self.options.cwd = self.inventory.groups_for_host(self.options.cwd)[1].name
except Exception:
self.options.cwd = ''
elif arg in '/*':
self.options.cwd = 'all'
elif self.inventory.get_hosts(arg):
self.options.cwd = arg
else:
display.display("no host matched")
self.set_prompt()
def do_list(self, arg):
"""List the hosts in the current group"""
if arg == 'groups':
for group in self.groups:
display.display(group)
else:
for host in self.selected:
display.display(host.name)
def do_become(self, arg):
"""Toggle whether plays run with become"""
if arg:
self.options.become = C.mk_boolean(arg)
display.v("become changed to %s" % self.options.become)
self.set_prompt()
else:
display.display("Please specify become value, e.g. `become yes`")
def do_remote_user(self, arg):
"""Given a username, set the remote user plays are run by"""
if arg:
self.options.remote_user = arg
self.set_prompt()
else:
display.display("Please specify a remote user, e.g. `remote_user root`")
def do_become_user(self, arg):
"""Given a username, set the user that plays are run by when using become"""
if arg:
self.options.become_user = arg
else:
display.display("Please specify a user, e.g. `become_user jenkins`")
display.v("Current user is %s" % self.options.become_user)
self.set_prompt()
def do_become_method(self, arg):
"""Given a become_method, set the privilege escalation method when using become"""
if arg:
self.options.become_method = arg
display.v("become_method changed to %s" % self.options.become_method)
else:
display.display("Please specify a become_method, e.g. `become_method su`")
def do_exit(self, args):
"""Exits from the console"""
sys.stdout.write('\n')
return -1
do_EOF = do_exit
def helpdefault(self, module_name):
if module_name in self.modules:
in_path = module_loader.find_plugin(module_name)
if in_path:
oc, a, _ = module_docs.get_docstring(in_path)
if oc:
display.display(oc['short_description'])
display.display('Parameters:')
for opt in oc['options'].keys():
display.display(' ' + stringc(opt, C.COLOR_HIGHLIGHT) + ' ' + oc['options'][opt]['description'][0])
else:
display.error('No documentation found for %s.' % module_name)
else:
display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)
def complete_cd(self, text, line, begidx, endidx):
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
if self.options.cwd in ('all','*','\\'):
completions = self.hosts + self.groups
else:
completions = [x.name for x in self.inventory.list_hosts(self.options.cwd)]
return [to_str(s)[offs:] for s in completions if to_str(s).startswith(to_str(mline))]
def completedefault(self, text, line, begidx, endidx):
if line.split()[0] in self.modules:
mline = line.split(' ')[-1]
offs = len(mline) - len(text)
completions = self.module_args(line.split()[0])
return [s[offs:] + '=' for s in completions if s.startswith(mline)]
def module_args(self, module_name):
in_path = module_loader.find_plugin(module_name)
oc, a, _ = module_docs.get_docstring(in_path)
return oc['options'].keys()
def run(self):
super(ConsoleCLI, self).run()
sshpass = None
becomepass = None
vault_pass = None
# hosts
if len(self.args) != 1:
self.pattern = 'all'
else:
self.pattern = self.args[0]
self.options.cwd = self.pattern
# dynamically add modules as commands
self.modules = self.list_modules()
for module in self.modules:
setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
self.passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
self.loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader)
self.loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords()[0]
self.loader.set_vault_password(vault_pass)
self.variable_manager = VariableManager()
self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=self.options.inventory)
self.variable_manager.set_inventory(self.inventory)
no_hosts = False
if len(self.inventory.list_hosts()) == 0:
# Empty inventory
no_hosts = True
display.warning("provided hosts list is empty, only localhost is available")
self.inventory.subset(self.options.subset)
hosts = self.inventory.list_hosts(self.pattern)
if len(hosts) == 0 and not no_hosts:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
self.groups = self.inventory.list_groups()
self.hosts = [x.name for x in hosts]
# This hack is to work around readline issues on a mac:
# http://stackoverflow.com/a/7116997/541202
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
self.set_prompt()
self.cmdloop()
| gpl-3.0 |
barnone/EigenD | plg_loop/metronome_plg.py | 2 | 8878 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import piw
from pi import agent,atom,toggle,domain,bundles,action,upgrade,policy,utils,timeout,const,node,logic
from pi.logic.shortcuts import T
from . import metronome_version as version,loop_native
def sc(f):
return piw.slowchange(utils.changify(f))
class Agent(agent.Agent):
def __init__(self, address, ordinal):
agent.Agent.__init__(self, signature=version,names='metronome',container=4,protocols='browse metronome',ordinal=ordinal)
self[1] = atom.Atom(names='outputs')
self[1][1] = bundles.Output(1,False,names='bar beat output')
self[1][2] = bundles.Output(2,False,names='song beat output')
self[1][3] = bundles.Output(3,False,names='running output')
self[1][4] = bundles.Output(4,False,names='bar output')
self[1][5] = bundles.Output(5,False,names='tempo output')
self[14]=bundles.Output(1,False,names='status output')
self.domain = piw.clockdomain_ctl()
self.domain.set_source(piw.makestring('*',0))
self.clk_output = bundles.Splitter(self.domain,self[1][1],self[1][2],self[1][3],self[1][4])
self.tempo_output = bundles.Splitter(self.domain,self[1][5])
self.status_output = bundles.Splitter(self.domain,self[14])
self.pinger = loop_native.pinger(self.clk_output.cookie(), self.tempo_output.cookie(), self.status_output.cookie(), self.domain, sc(self.__tempo_set), sc(self.__beats_set), sc(self.__preroll_set))
self.aggregator = piw.aggregator(self.pinger.cookie(),self.domain)
self.tap_input = bundles.ScalarInput(self.aggregator.get_output(1),self.domain, signals=(1,2))
self.midi_clock_input = bundles.ScalarInput(self.aggregator.get_output(2),self.domain,signals=(1,))
self[2] = atom.Atom(domain=domain.BoundedFloat(0,500,hints=(T('stageinc',1),T('inc',1),T('biginc',10),T('control','updown'))), init=120, names='tempo input', policy=atom.default_policy(self.__set_tempo))
self[3] = atom.Atom(domain=domain.BoundedFloat(0,100,rest=4,hints=(T('stageinc',1),T('inc',1),)), names='beat input', policy=atom.default_policy(self.__set_beats))
# self[4] is the verb container
self[5] = atom.Atom(domain=domain.BoundedFloat(0,1,rest=0,hints=(T('control','trigger'),)),policy=self.tap_input.nodefault_policy(1,policy.ImpulseStreamPolicy()),names='beat trigger',transient=True)
self[6] = atom.Atom(domain=domain.BoundedFloat(0,1,rest=0,hints=(T('control','trigger'),)),policy=self.tap_input.nodefault_policy(2,policy.ImpulseStreamPolicy()),names='bar trigger',transient=True)
self[7] = atom.Atom(domain=domain.BoundedFloat(1,500), init=30, names='tap tempo minimum', policy=atom.default_policy(self.__set_tempo_lbound))
self[8] = atom.Atom(domain=domain.BoundedFloat(1,500), init=240, names='tap tempo maximum', policy=atom.default_policy(self.__set_tempo_ubound))
self[9] = atom.Atom(domain=domain.Bool(hints=(T('control','toggle'),)),policy=atom.default_policy(self.__preroll),names='preroll trigger',transient=True)
self[10] = atom.Atom(domain=domain.BoundedInt(1,32), init=4, names='preroll', policy=atom.default_policy(self.__set_preroll_count))
self[15] = atom.Atom(domain=domain.Aniso(),policy=self.midi_clock_input.nodefault_policy(1,False),names='midi clock input')
self[16] = toggle.Toggle(self.__set_midi_clock_enable,self.domain,container=(None,'midi clock enable',self.verb_container()),names='midi clock enable')
self[17] = atom.Atom(domain=domain.BoundedFloat(-1000,1000,hints=(T('inc',1),T('control','updown'))),init=0,names='midi clock latency', policy=atom.default_policy(self.__midi_clock_set_latency))
self[18] = atom.Atom(domain=domain.BoundedIntOrNull(0,2000),init=0,names='beat flash persistence',policy=atom.default_policy(self.__set_beat_flash_persistence))
self.add_verb2(1,'start([],None)',self.__start,status_action=self.__status)
self.add_verb2(2,'stop([],None)',self.__stop,status_action=self.__status)
self.add_verb2(3,'start([toggle],None)',self.__toggle,status_action=self.__status)
print 'init tempo=',self[2].get_value()
self.pinger.set_tempo(self[2].get_value())
self.pinger.set_beats(self[3].get_value())
self.pinger.set_range(self[8].get_value(),self[7].get_value())
self.__playing=False
self.__timestamp = piw.tsd_time()
self.__selected=None
self.update()
def update(self):
#print 'update'
self.__timestamp = self.__timestamp+1
self.set_property_string('timestamp',str(self.__timestamp))
def rpc_enumerate(self,arg):
print 'metronome enumerate'
#return logic.render_term((1,0))
return logic.render_term((0,0))
def rpc_cinfo(self,arg):
print 'metronome __cinfo'
return '[]'
def rpc_finfo(self,arg):
print 'metronome __finfo'
return '[]'
def rpc_dinfo(self,arg):
l=[]
dsc=self.get_description()
l.append(('dinfo_id',dsc))
v1=self[2].get_value()
l.append(('Tempo','%.1f' % v1))
v2=self[3].get_value()
l.append(('Beats',v2))
if self.__playing:
v3='Yes'
else:
v3='No'
l.append(('Running',v3))
if self[16].get_value():
v5='Yes'
else:
v5='No'
l.append(('Midi clock sync',v5))
v6=self[17].get_value()
l.append(('Midi clock latency (ms)',v6))
return logic.render_term(T('keyval',tuple(l) ))
def rpc_current(self,arg):
#return '[]'
uid=0
return '[[%d,[]]]' % uid
def rpc_setselected(self,arg):
pass
def rpc_activated(self,arg):
return logic.render_term(('',''))
def __setup_playstate(self,playing):
self.__playing=playing
print '__setup_playstate',self.__playing
self.update()
def __playchanged(self,d):
if d.is_bool():
self.__playing=d.as_bool()
self.update()
if self.__playing:
self.pinger.play()
else:
self.pinger.stop()
def __set_midi_clock_enable(self,e):
print 'midi clock enable',e
self.pinger.midi_clock_enable(e)
self.update()
return True
def __midi_clock_set_latency(self,latency):
# set midi clock latency in ms
self.pinger.midi_clock_set_latency(latency)
self.update()
return True
def __set_beat_flash_persistence(self,time):
self.pinger.set_beat_flash_persistence(time)
self[18].set_value(time)
return True
def __preroll_set(self,t):
self[9].set_value(t.as_bool())
def __preroll(self,t):
self.pinger.start_preroll(self[10].get_value() if t else 0)
def __set_preroll_count(self,c):
return True
def __beats_set(self,t):
self[3].set_value(t.as_float())
self.update()
def __tempo_set(self,t):
#print 'tempo set to',t.as_float()
self[2].set_value(t.as_float())
self.update()
def __set_tempo(self,t):
print '__set_tempo to',t
self.pinger.set_tempo(t)
return False
def __set_tempo_lbound(self,l):
u=self[8].get_value()
if u<=l:
return False
self.pinger.set_range(u,l)
def __set_tempo_ubound(self,u):
l=self[7].get_value()
if l<1 or l>=u:
return False
self.pinger.set_range(u,l)
def __set_beats(self,t):
self.pinger.set_beats(t)
return False
def __start(self,subj):
print 'start'
self.pinger.play()
self.__setup_playstate(True)
return action.nosync_return()
def __toggle(self,subj):
print 'toggle'
self.pinger.toggle()
return action.nosync_return()
def __stop(self,subj):
print 'stop'
self.pinger.stop()
self.__setup_playstate(False)
return action.nosync_return()
def __status(self,subj):
return 'dsc(~(s)"#14",None)'
agent.main(Agent)
| gpl-3.0 |
BackupGGCode/python-for-android | python3-alpha/python3-src/Lib/importlib/test/extension/test_case_sensitivity.py | 50 | 1152 | import sys
from test import support
import unittest
from importlib import _bootstrap
from .. import util
from . import util as ext_util
@util.case_insensitive_tests
class ExtensionModuleCaseSensitivityTest(unittest.TestCase):
def find_module(self):
good_name = ext_util.NAME
bad_name = good_name.upper()
assert good_name != bad_name
finder = _bootstrap._FileFinder(ext_util.PATH,
_bootstrap._ExtensionFinderDetails())
return finder.find_module(bad_name)
def test_case_sensitive(self):
with support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
loader = self.find_module()
self.assertIsNone(loader)
def test_case_insensitivity(self):
with support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
loader = self.find_module()
self.assertTrue(hasattr(loader, 'load_module'))
def test_main():
if ext_util.FILENAME is None:
return
support.run_unittest(ExtensionModuleCaseSensitivityTest)
if __name__ == '__main__':
test_main()
| apache-2.0 |
pp-mo/iris | lib/iris/fileformats/abf.py | 5 | 6003 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Provides ABF (and ABL) file format capabilities.
ABF and ABL files are satellite file formats defined by Boston University.
Including this module adds ABF and ABL loading to the session's capabilities.
The documentation for this file format can be found
`here <http://cliveg.bu.edu/modismisr/lai3g-fpar3g.html>`_.
"""
import calendar
import datetime
import glob
import os.path
import numpy as np
import numpy.ma as ma
import iris
from iris.coords import AuxCoord, DimCoord
from iris.coord_systems import GeogCS
import iris.fileformats
import iris.io.format_picker
X_SIZE = 4320
Y_SIZE = 2160
month_numbers = {
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}
class ABFField:
"""
A data field from an ABF (or ABL) file.
Capable of creating a :class:`~iris.cube.Cube`.
"""
def __init__(self, filename):
"""
Create an ABFField object from the given filename.
Args:
* filename - An ABF filename.
Example::
field = ABFField("AVHRRBUVI01.1985feba.abl")
"""
basename = os.path.basename(filename)
if len(basename) != 24:
raise ValueError(
"ABFField expects a filename of 24 characters: "
"{}".format(basename)
)
self._filename = filename
def __getattr__(self, key):
# Do we need to load now?
if key == "data" and "data" not in self.__dict__:
self._read()
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("ABFField has no attribute '{}'".format(key))
def _read(self):
"""Read the field from the given filename."""
basename = os.path.basename(self._filename)
self.version = int(basename[9:11])
self.year = int(basename[12:16])
self.month = basename[16:19]
self.period = basename[19:20]
self.format = basename[21:24]
self.month = month_numbers[self.month]
# Data is 8 bit bigendian.
data = np.fromfile(self._filename, dtype=">u1").reshape(X_SIZE, Y_SIZE)
# Iris' preferred dimensional ordering is (y,x).
data = data.transpose()
# Flip, for a positive step through the Y dimension.
data = data[::-1]
# Any percentages greater than 100 represent missing data.
data = ma.masked_greater(data, 100)
# The default fill value is 999999(!), so we choose something
# more sensible. NB. 999999 % 256 = 63 = bad.
data.fill_value = 255
self.data = data
def to_cube(self):
"""Return a new :class:`~iris.cube.Cube` from this ABFField."""
cube = iris.cube.Cube(self.data)
# Name.
if self.format.lower() == "abf":
cube.rename("FAPAR")
elif self.format.lower() == "abl":
cube.rename("leaf_area_index")
else:
msg = "Unknown ABF/ABL format: {}".format(self.format)
raise iris.exceptions.TranslationError(msg)
cube.units = "%"
# Grid.
step = 1.0 / 12.0
llcs = GeogCS(semi_major_axis=6378137.0, semi_minor_axis=6356752.31424)
x_coord = DimCoord(
np.arange(X_SIZE) * step + (step / 2) - 180,
standard_name="longitude",
units="degrees",
coord_system=llcs,
)
y_coord = DimCoord(
np.arange(Y_SIZE) * step + (step / 2) - 90,
standard_name="latitude",
units="degrees",
coord_system=llcs,
)
x_coord.guess_bounds()
y_coord.guess_bounds()
cube.add_dim_coord(x_coord, 1)
cube.add_dim_coord(y_coord, 0)
# Time.
if self.period == "a":
start = 1
end = 15
elif self.period == "b":
start = 16
end = calendar.monthrange(self.year, self.month)[1]
else:
raise iris.exceptions.TranslationError(
"Unknown period: " "{}".format(self.period)
)
start = datetime.date(year=self.year, month=self.month, day=start)
end = datetime.date(year=self.year, month=self.month, day=end)
# Convert to "days since 0001-01-01".
# Iris will have proper datetime objects in the future.
# This step will not be necessary.
start = start.toordinal() - 1
end = end.toordinal() - 1
# TODO: Should we put the point in the middle of the period instead?
cube.add_aux_coord(
AuxCoord(
start,
standard_name="time",
units="days since 0001-01-01",
bounds=[start, end],
)
)
# TODO: Do they only come from Boston?
# Attributes.
cube.attributes["source"] = "Boston University"
return cube
def load_cubes(filespecs, callback=None):
"""
Loads cubes from a list of ABF filenames.
Args:
* filenames - list of ABF filenames to load
Kwargs:
* callback - a function that can be passed to :func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the same order as in the file.
"""
if isinstance(filespecs, str):
filespecs = [filespecs]
for filespec in filespecs:
for filename in glob.glob(filespec):
field = ABFField(filename)
cube = field.to_cube()
# Were we given a callback?
if callback is not None:
cube = iris.io.run_callback(callback, cube, field, filename)
if cube is None:
continue
yield cube
| lgpl-3.0 |
shivylp/xLisp | xLisp/utils.py | 1 | 3766 | import imp
import os
import inspect
py_builtins = ["math", "sys"]
def load_module(mpath, path = []):
m = None
if mpath in py_builtins:
m = __import__(mpath)
else:
try:
mod = imp.find_module(mpath, path)
m = imp.load_module(mpath, *mod)
f , _ , _ = mod
if not f is None:
f.close()
except ImportError,ex:
raise
return m
def scan_module_object(m, include_all = False, prefix = ""):
"""
Returns a list of functions and variables that can be imported.
Only following will be in the import list
int, float, list, tuple, long, str, function, builtin_function
"""
import_list = []
if prefix.strip() != "":
prefix = prefix.strip() + "-"
if not m is None:
for itm in dir(m):
if itm.startswith("__"):
continue
val = getattr(m, itm)
name = "%s%s" % (prefix, itm)
if inspect.isfunction(val) or inspect.isbuiltin(val):
import_list.append((name, val))
elif type(val) in [str, int, float, long, list, tuple]:
import_list.append((name, val))
else:
if hasattr(val, "__class__") and \
val.__class__.__name__ == "xLispFunction":
import_list.append((val.__name__, val))
return import_list
def is_env_required(fx):
"""
Given a function 'fx' returns True if the first argument
to be passed is 'env'. False otherwise
"""
argspec = inspect.getargspec(fx)
if inspect.ismethod(fx):
# first arg in this refers to 'self' or 'cls' for class-methods
args = argspec.args[1:]
return ((len(args) > 0) and (args[0] == "env"))
else:
args = argspec.args
return ((len(args) > 0) and (args[0] == "env"))
print(argspec.args)
return False
def get_argc(fx):
"""
Given a function 'fx' returns the number of minimum
and maximum arguments required as a tuple (min, max)
Note: max = -1 for functions taking variable number
of arguments
If 'fx' is a builtin method, None will be returned
indicating that the arg-spec is not available for it
"""
if inspect.isbuiltin(fx):
return None
argspec = inspect.getargspec(fx)
args = argspec.args[::-1]
defs = []
if not argspec.defaults is None:
defs = zip(args, argspec.defaults)
minimum = len(argspec.args) - len(defs)
maximum = -1
if argspec.varargs is None and argspec.keywords is None:
maximum = len(argspec.args)
if inspect.ismethod(fx):
minimum = minimum - 1
return (minimum, maximum)
if __name__ == '__main__':
from sys import getsizeof
def test(env, x, y, test = 5, *args, **kwargs):
pass
class Test(object):
def __init__(self):
pass
def unboundmeth(self,env, x,y, test = 0, *args):
pass
@classmethod
def clsmethod(cls,env,y, t = 0):
print(cls,env,y)
@staticmethod
def staticmeth(env,x, *args):
pass
def main():
test_funcs = [getsizeof, test, Test.clsmethod, Test.staticmeth, Test.unboundmeth]
for meth in test_funcs:
retval = get_argc(meth)
print("+ %s()" % getattr(meth, "func_name", meth.__name__))
if retval:
minargc, maxargc = retval
print("+--+ atleast %s args required" % (minargc))
print("+--+ atlmost %s args required" % (maxargc))
print("+--+ 'env' required : %s" % (is_env_required(meth)))
else:
print("+--+ builtin function")
main()
| gpl-3.0 |
vigilv/scikit-learn | sklearn/externals/joblib/logger.py | 359 | 5135 | """
Helpers for logging.
This module needs much love to become useful.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2008 Gael Varoquaux
# License: BSD Style, 3 clauses.
from __future__ import print_function
import time
import sys
import os
import shutil
import logging
import pprint
from .disk import mkdirp
def _squeeze_time(t):
"""Remove .1s to the time under Windows: this is the time it take to
stat files. This is needed to make results similar to timings under
Unix, for tests
"""
if sys.platform.startswith('win'):
return max(0, t - .1)
else:
return t
def format_time(t):
t = _squeeze_time(t)
return "%.1fs, %.1fmin" % (t, t / 60.)
def short_format_time(t):
t = _squeeze_time(t)
if t > 60:
return "%4.1fmin" % (t / 60.)
else:
return " %5.1fs" % (t)
def pformat(obj, indent=0, depth=3):
if 'numpy' in sys.modules:
import numpy as np
print_options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=1)
else:
print_options = None
out = pprint.pformat(obj, depth=depth, indent=indent)
if print_options:
np.set_printoptions(**print_options)
return out
###############################################################################
# class `Logger`
###############################################################################
class Logger(object):
""" Base class for logging messages.
"""
def __init__(self, depth=3):
"""
Parameters
----------
depth: int, optional
The depth of objects printed.
"""
self.depth = depth
def warn(self, msg):
logging.warn("[%s]: %s" % (self, msg))
def debug(self, msg):
# XXX: This conflicts with the debug flag used in children class
logging.debug("[%s]: %s" % (self, msg))
def format(self, obj, indent=0):
""" Return the formated representation of the object.
"""
return pformat(obj, indent=indent, depth=self.depth)
###############################################################################
# class `PrintTime`
###############################################################################
class PrintTime(object):
""" Print and log messages while keeping track of time.
"""
def __init__(self, logfile=None, logdir=None):
if logfile is not None and logdir is not None:
raise ValueError('Cannot specify both logfile and logdir')
# XXX: Need argument docstring
self.last_time = time.time()
self.start_time = self.last_time
if logdir is not None:
logfile = os.path.join(logdir, 'joblib.log')
self.logfile = logfile
if logfile is not None:
mkdirp(os.path.dirname(logfile))
if os.path.exists(logfile):
# Rotate the logs
for i in range(1, 9):
try:
shutil.move(logfile + '.%i' % i,
logfile + '.%i' % (i + 1))
except:
"No reason failing here"
# Use a copy rather than a move, so that a process
# monitoring this file does not get lost.
try:
shutil.copy(logfile, logfile + '.1')
except:
"No reason failing here"
try:
with open(logfile, 'w') as logfile:
logfile.write('\nLogging joblib python script\n')
logfile.write('\n---%s---\n' % time.ctime(self.last_time))
except:
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
computation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
def __call__(self, msg='', total=False):
""" Print the time elapsed between the last call and the current
call, with an optional message.
"""
if not total:
time_lapse = time.time() - self.last_time
full_msg = "%s: %s" % (msg, format_time(time_lapse))
else:
# FIXME: Too much logic duplicated
time_lapse = time.time() - self.start_time
full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse,
time_lapse / 60)
print(full_msg, file=sys.stderr)
if self.logfile is not None:
try:
with open(self.logfile, 'a') as f:
print(full_msg, file=f)
except:
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
calculation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
self.last_time = time.time()
| bsd-3-clause |
tdtrask/ansible | lib/ansible/modules/cloud/amazon/cloudformation.py | 13 | 30664 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: cloudformation
short_description: Create or delete an AWS CloudFormation stack
description:
- Launches or updates an AWS CloudFormation stack and waits for it complete.
notes:
- As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML.
This will change (in fact, it may change before 2.3 is out).
version_added: "1.1"
options:
stack_name:
description:
- name of the cloudformation stack
required: true
disable_rollback:
description:
- If a stacks fails to form, rollback will remove the stack
required: false
default: "false"
choices: [ "true", "false" ]
template_parameters:
description:
- a list of hashes of all the template variables for the stack
required: false
default: {}
state:
description:
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
If state is "absent", stack will be removed.
default: present
choices: [ present, absent ]
required: false
template:
description:
- The local path of the cloudformation template.
- This must be the full path to the file, relative to the working directory. If using roles this may look
like "roles/cloudformation/files/cloudformation-example.json".
- If 'state' is 'present' and the stack does not exist yet, either 'template', 'template_body' or 'template_url'
must be specified (but only one of them). If 'state' ispresent, the stack does exist, and neither 'template',
'template_body' nor 'template_url' are specified, the previous template will be reused.
required: false
default: null
notification_arns:
description:
- The Simple Notification Service (SNS) topic ARNs to publish stack related events.
required: false
default: null
version_added: "2.0"
stack_policy:
description:
- the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified.
(for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
required: false
default: null
version_added: "1.9"
tags:
description:
- Dictionary of tags to associate with stack and its resources during stack creation. Can be updated later, updating tags removes previous entries.
required: false
default: null
version_added: "1.4"
template_url:
description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
as the stack.
- If 'state' is 'present' and the stack does not exist yet, either 'template', 'template_body' or 'template_url'
must be specified (but only one of them). If 'state' ispresent, the stack does exist, and neither 'template',
'template_body' nor 'template_url' are specified, the previous template will be reused.
required: false
version_added: "2.0"
create_changeset:
description:
- "If stack already exists create a changeset instead of directly applying changes.
See the AWS Change Sets docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html).
WARNING: if the stack does not exist, it will be created without changeset. If the state is absent, the stack will be deleted immediately with no
changeset."
required: false
default: false
version_added: "2.4"
changeset_name:
description:
- Name given to the changeset when creating a changeset, only used when create_changeset is true. By default a name prefixed with Ansible-STACKNAME
is generated based on input parameters.
See the AWS Change Sets docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
required: false
default: null
version_added: "2.4"
template_format:
description:
- (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format.
This parameter is ignored since Ansible 2.3.
default: json
choices: [ json, yaml ]
required: false
version_added: "2.0"
role_arn:
description:
- The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
required: false
default: null
version_added: "2.3"
termination_protection:
description:
- enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
version_added: "2.5"
template_body:
description:
- Template body. Use this to pass in the actual body of the Cloudformation template.
- If 'state' is 'present' and the stack does not exist yet, either 'template', 'template_body' or 'template_url'
must be specified (but only one of them). If 'state' ispresent, the stack does exist, and neither 'template',
'template_body' nor 'template_url' are specified, the previous template will be reused.
required: false
version_added: "2.5"
author: "James S. Martin (@jsmartin)"
extends_documentation_fragment:
- aws
- ec2
requirements: [ boto3, botocore>=1.4.57 ]
'''
EXAMPLES = '''
- name: create a cloudformation stack
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Basic role example
- name: create a stack, specify role that cloudformation assumes
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
- name: delete a stack
cloudformation:
stack_name: "ansible-cloudformation-old"
state: "absent"
# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template via an URL
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template body via lookup template
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_body: "{{ lookup('template', 'cloudformation.j2') }}"
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Enable termination protection on a stack.
# If the stack already exists, this will update its termination protection
- name: enable termination protection during stack creation
cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
termination_protection: yes
'''
RETURN = '''
events:
type: list
description: Most recent events in Cloudformation's event log. This may be from a previous run in some cases.
returned: always
sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
log:
description: Debugging logs. Useful when modifying or finding an error.
returned: always
type: list
sample: ["updating stack"]
stack_resources:
description: AWS stack resources and their status. List of dictionaries, one dict per resource.
returned: state == present
type: list
sample: [
{
"last_updated_time": "2016-10-11T19:40:14.979000+00:00",
"logical_resource_id": "CFTestSg",
"physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
"resource_type": "AWS::EC2::SecurityGroup",
"status": "UPDATE_COMPLETE",
"status_reason": null
}
]
stack_outputs:
type: dict
description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
returned: state == present
sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
''' # NOQA
import json
import time
import uuid
import traceback
from hashlib import sha1
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import ansible.module_utils.ec2
# import a class, otherwise we'll use a fully qualified path
from ansible.module_utils.ec2 import AWSRetry, boto_exception
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
def get_stack_events(cfn, stack_name, token_filter=None):
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
ret = {'events': [], 'log': []}
try:
pg = cfn.get_paginator(
'describe_stack_events'
).paginate(
StackName=stack_name
)
if token_filter is not None:
events = list(pg.search(
"StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
))
else:
events = list(pg.search("StackEvents[*]"))
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
ret['log'].append('Stack does not exist.')
return ret
ret['log'].append('Unknown error: ' + str(error_msg))
return ret
for e in events:
eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
ret['events'].append(eventline)
if e['ResourceStatus'].endswith('FAILED'):
failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
ret['log'].append(failline)
return ret
def create_stack(module, stack_params, cfn):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
# 'disablerollback' and 'EnableTerminationProtection' only
# apply on creation, not update.
stack_params['DisableRollback'] = module.params['disable_rollback']
if module.params.get('termination_protection') is not None:
if boto_supports_termination_protection(cfn):
stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
else:
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
try:
cfn.create_stack(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE', stack_params.get('ClientRequestToken', None))
except Exception as err:
error_msg = boto_exception(err)
module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def list_changesets(cfn, stack_name):
res = cfn.list_change_sets(StackName=stack_name)
return [cs['ChangeSetName'] for cs in res['Summaries']]
def create_changeset(module, stack_params, cfn):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template' or 'template_url' is required.")
if module.params['changeset_name'] is not None:
stack_params['ChangeSetName'] = module.params['changeset_name']
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
changeset_name = build_changeset_name(stack_params)
stack_params['ChangeSetName'] = changeset_name
# Determine if this changeset already exists
pending_changesets = list_changesets(cfn, stack_params['StackName'])
if changeset_name in pending_changesets:
warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
else:
cs = cfn.create_change_set(**stack_params)
# Make sure we don't enter an infinite loop
time_end = time.time() + 600
while time.time() < time_end:
try:
newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
except botocore.exceptions.BotoCoreError as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg)
if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
time.sleep(1)
elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
cfn.delete_change_set(ChangeSetName=cs['Id'])
result = dict(changed=False,
output='Stack is already up-to-date, Change Set refused to create due to lack of changes.')
module.exit_json(**result)
else:
break
# Lets not hog the cpu/spam the AWS API
time.sleep(1)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET')
result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
'NOTE that dependencies on this stack might fail due to pending changes!']
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def update_stack(module, stack_params, cfn):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
stack_params['UsePreviousTemplate'] = True
# if the state is present and the stack already exists, we try to update it.
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
try:
cfn.update_stack(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', stack_params.get('ClientRequestToken', None))
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
'''updates termination protection of a stack'''
if not boto_supports_termination_protection(cfn):
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
stack = get_stack_facts(cfn, stack_name)
if stack:
if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
try:
cfn.update_termination_protection(
EnableTerminationProtection=desired_termination_protection_state,
StackName=stack_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=boto_exception(e), exception=traceback.format_exc())
def boto_supports_termination_protection(cfn):
'''termination protection was added in botocore 1.7.18'''
return hasattr(cfn, "update_termination_protection")
def stack_operation(cfn, stack_name, operation, op_token=None):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
while True:
try:
stack = get_stack_facts(cfn, stack_name)
existed.append('yes')
except:
# If the stack previously existed, and now can't be found then it's
# been deleted successfully.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
ret = get_stack_events(cfn, stack_name, op_token)
if not stack:
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
return ret
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
return ret
# note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases.
elif stack['StackStatus'].endswith('_COMPLETE'):
ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
return ret
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
return ret
# note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
elif stack['StackStatus'].endswith('_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
return ret
else:
# this can loop forever :/
time.sleep(5)
return {'failed': True, 'output': 'Failed for unknown reasons.'}
def build_changeset_name(stack_params):
if 'ChangeSetName' in stack_params:
return stack_params['ChangeSetName']
json_params = json.dumps(stack_params, sort_keys=True)
return 'Ansible-{0}-{1}'.format(
stack_params['StackName'],
sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
)
def check_mode_changeset(module, stack_params, cfn):
"""Create a change set, describe it and delete it before returning check mode outputs."""
stack_params['ChangeSetName'] = build_changeset_name(stack_params)
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
change_set = cfn.create_change_set(**stack_params)
for i in range(60): # total time 5 min
description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
break
time.sleep(5)
else:
# if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
cfn.delete_change_set(ChangeSetName=change_set['Id'])
reason = description.get('StatusReason')
if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
return {'changed': True, 'msg': reason, 'meta': description['Changes']}
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg, exception=traceback.format_exc())
def get_stack_facts(cfn, stack_name):
try:
stack_response = cfn.describe_stacks(StackName=stack_name)
stack_info = stack_response['Stacks'][0]
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
return None
# other error, bail.
raise err
if stack_response and stack_response.get('Stacks', None):
stacks = stack_response['Stacks']
if len(stacks):
stack_info = stacks[0]
return stack_info
def main():
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False, type='path'),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
template_url=dict(default=None, required=False),
template_body=dict(default=None, require=False),
template_format=dict(default=None, choices=['json', 'yaml'], required=False),
create_changeset=dict(default=False, type='bool'),
changeset_name=dict(default=None, required=False),
role_arn=dict(default=None, required=False),
tags=dict(default=None, type='dict'),
termination_protection=dict(default=None, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template', 'template_body']],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
# collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
stack_params = {
'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
'ClientRequestToken': to_native(uuid.uuid4()),
}
state = module.params['state']
stack_params['StackName'] = module.params['stack_name']
if module.params['template'] is not None:
stack_params['TemplateBody'] = open(module.params['template'], 'r').read()
elif module.params['template_body'] is not None:
stack_params['TemplateBody'] = module.params['template_body']
elif module.params['template_url'] is not None:
stack_params['TemplateURL'] = module.params['template_url']
if module.params.get('notification_arns'):
stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
else:
stack_params['NotificationARNs'] = []
# can't check the policy when verifying.
if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read()
template_parameters = module.params['template_parameters']
stack_params['Parameters'] = [{'ParameterKey': k, 'ParameterValue': str(v)} for k, v in template_parameters.items()]
if isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
if module.params.get('role_arn'):
stack_params['RoleARN'] = module.params['role_arn']
result = {}
try:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
cfn = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=boto_exception(e))
# Wrap the cloudformation client methods that this module uses with
# automatic backoff / retry for throttling error codes
backoff_wrapper = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30)
cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
cfn.create_stack = backoff_wrapper(cfn.create_stack)
cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
cfn.update_stack = backoff_wrapper(cfn.update_stack)
cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
if boto_supports_termination_protection(cfn):
cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
stack_info = get_stack_facts(cfn, stack_params['StackName'])
if module.check_mode:
if state == 'absent' and stack_info:
module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
elif state == 'absent' and not stack_info:
module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
elif state == 'present' and not stack_info:
module.exit_json(changed=True, msg='New stack would be created', meta=[])
else:
module.exit_json(**check_mode_changeset(module, stack_params, cfn))
if state == 'present':
if not stack_info:
result = create_stack(module, stack_params, cfn)
elif module.params.get('create_changeset'):
result = create_changeset(module, stack_params, cfn)
else:
if module.params.get('termination_protection') is not None:
update_termination_protection(module, cfn, stack_params['StackName'],
bool(module.params.get('termination_protection')))
result = update_stack(module, stack_params, cfn)
# format the stack output
stack = get_stack_facts(cfn, stack_params['StackName'])
if result.get('stack_outputs') is None:
# always define stack_outputs, but it may be empty
result['stack_outputs'] = {}
for output in stack.get('Outputs', []):
result['stack_outputs'][output['OutputKey']] = output['OutputValue']
stack_resources = []
reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
for res in reslist.get('StackResourceSummaries', []):
stack_resources.append({
"logical_resource_id": res['LogicalResourceId'],
"physical_resource_id": res.get('PhysicalResourceId', ''),
"resource_type": res['ResourceType'],
"last_updated_time": res['LastUpdatedTimestamp'],
"status": res['ResourceStatus'],
"status_reason": res.get('ResourceStatusReason') # can be blank, apparently
})
result['stack_resources'] = stack_resources
elif state == 'absent':
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
# so must describe the stack first
try:
stack = get_stack_facts(cfn, stack_params['StackName'])
if not stack:
result = {'changed': False, 'output': 'Stack not found.'}
else:
if stack_params.get('RoleARN') is None:
cfn.delete_stack(StackName=stack_params['StackName'])
else:
cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
result = stack_operation(cfn, stack_params['StackName'], 'DELETE', stack_params.get('ClientRequestToken', None))
except Exception as err:
module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
if module.params['template_format'] is not None:
result['warnings'] = [('Argument `template_format` is deprecated '
'since Ansible 2.3, JSON and YAML templates are now passed '
'directly to the CloudFormation API.')]
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
cherrygirl/micronaet7 | crm_order_analysis/order.py | 1 | 16072 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
# Utility:
def Prepare(valore):
# For problems: input win output ubuntu; trim extra spaces
#valore=valore.decode('ISO-8859-1')
valore=valore.decode('cp1252')
valore=valore.encode('utf-8')
return valore.strip()
def PrepareDate(valore):
if valore: # TODO test correct date format
return valore
else:
return time.strftime("%d/%m/%Y")
def PrepareFloat(valore):
valore=valore.strip()
if valore: # TODO test correct date format
return float(valore.replace(",","."))
else:
return 0.0 # for empty values
class etl_order_line(osv.osv):
''' List of document lines used for analysis in partner view
'''
_name='etl.order.line'
_order ='name'
_columns = {
'name':fields.char('Number', size=10, required=True, readonly=False),
'date': fields.date('Date', help="Date when order is created"),
'deadline': fields.date('Deadline', help="Deadline for statistic evaluation of delivery"),
#'amount': fields.float('Total amount', digits=(16, 2)),
'partner_id':fields.many2one('res.partner', 'Partner', required=False),
'product_id':fields.many2one('product.product', 'Product', required=False),
'chemical_category_id': fields.related('product_id', 'chemical_category_id', type = 'many2one', relation = "chemical.product.category", string='Category', readonly = True, store = True),
'quantity': fields.float('Total amount', digits=(16, 2)), # TODO serve??
'note': fields.text('Note'),
'total': fields.float('Total amount', digits=(16, 2)),
'delivered': fields.float('Total delivered', digits=(16, 2)),
'expected': fields.float('Total expected', digits=(16, 2)),
'left': fields.float('Total left', digits=(16, 2)),
'state':fields.selection([
('ok','OK'),
('ko','KO'),
('borderline','Border line'),
('unknow','Unknow period'),
],'State', select=True, readonly=False),
}
etl_order_line()
class etl_order(osv.osv):
''' List of document lines used for analysis in partner view
'''
_name='etl.order'
_order ='date,name'
# Scheduled action: ########################################################
def schedule_etl_order_import(self, cr, uid, path, file_name, header_filename, context=None):
''' ETL operations for import partner order in OpenERP
(parameter setted up in scheduled action for file name)
In this scheduled action there also importation for movement in
general accont like BC or FT (only invoice lines)
'''
import logging, os, csv
from datetime import datetime, timedelta
_logger = logging.getLogger('crm_order_analysis')
partner_proxy=self.pool.get("res.partner")
order_line_proxy=self.pool.get("etl.order.line")
product_proxy=self.pool.get('product.product')
partner_proxy=self.pool.get('res.partner')
total_order_id={} # for next importation
counter = {'tot':0,'upd':0, 'err':0, 'err_upd':0, 'new':0}
# Import BC e FT according to order line: ******************************
delete_all=self.unlink(cr, uid, self.search(cr, uid, [])) # clean all DB
# Import order:
year_now = datetime.now().year
for year in [year_now, year_now - 1, year_now - 2]:
try:
file_complete = os.path.expanduser(os.path.join(path, "%s%s"%(year, file_name)))
lines = csv.reader(open(file_complete,'rb'), delimiter = ";")
tot_colonne=0
for line in lines:
if counter['tot'] < 0: # jump n lines of header
counter['tot'] += 1
else:
if not tot_colonne:
tot_colonne=len(line)
_logger.info('Start sync of documents lines year %s [cols=%s, file=%s]'%(year, tot_colonne, file_name))
if len(line): # jump empty lines
if tot_colonne != len(line): # tot # of colums must be equal to # of column in first line
_logger.error('Colums not the same')
continue
counter['tot']+=1
csv_id=0
acronym = Prepare(line[csv_id])
csv_id+=1
number = Prepare(line[csv_id])
csv_id+=1
order = Prepare(line[csv_id])
csv_id+=1
product_id = Prepare(line[csv_id])
csv_id+=1
# descrizione aggiuntiva
csv_id+=1
unit = PrepareFloat(line[csv_id])
csv_id+=1
amount = PrepareFloat(line[csv_id])
csv_id+=1
date = Prepare(line[csv_id])
csv_id+=1
partner_id = Prepare(line[csv_id])
# calculated fields:
name = "%s-%s-%s"%(acronym, year, number)
date = "%s-%s-%s"%(date[:4],date[4:6],date[-2:]) if date else False
year_analysis=date[:4] if date else False
product_id = product_proxy.search(cr, uid, [('default_code','=',product_id)])
if product_id:
product_id = product_id[0]
else:
product_id = False
partner_id = partner_proxy.search(cr, uid, [('mexal_c','=',partner_id)])
if partner_id:
partner_id = partner_id[0]
else:
partner_id = False
# Total for update in next importation
if (order,product_id) not in total_order_id:
total_order_id[(order,product_id)] = amount or 0.0
else:
total_order_id[(order,product_id)] += amount or 0.0
data_line = {
'name': name,
'type': acronym,
'date': date,
'year': year_analysis,
'product_id': product_id,
'partner_id': partner_id,
'quantity': unit,
'unit': amount / unit if unit!=0.0 else 0.0,
'amount': amount,
'order': order,
}
new_id=self.create(cr, uid, data_line)
_logger.info('End import movement lines year %s, total: [%s]'%(year, counter['tot']))
except:
_logger.error('Error generic import movement lines year %s, total: [%s]'%(year, counter['tot']))
counter = {'tot':0,'upd':0, 'err':0, 'err_upd':0, 'new':0}
# Import etl.order.line **********************************************
delete_all=order_line_proxy.unlink(cr, uid, order_line_proxy.search(cr, uid, [])) # clean all DB
try: # test error during importation:
file_complete = os.path.expanduser(os.path.join(path, header_filename))
lines = csv.reader(open(file_complete,'rb'), delimiter = ";")
tot_colonne=0
for line in lines:
if counter['tot'] < 0: # jump n lines of header
counter['tot'] += 1
else:
if not tot_colonne:
tot_colonne=len(line)
_logger.info('Start sync of order header [cols=%s, file=%s]'%(tot_colonne, header_filename))
if len(line): # jump empty lines
if tot_colonne != len(line): # tot # of colums must be equal to # of column in first line
_logger.error('Order file: Colums not the same')
continue
counter['tot']+=1
csv_id=0
partner_code = Prepare(line[csv_id]) # ref customer
csv_id+=1
# Description partner
csv_id+=1
order = Prepare(line[csv_id]) # num order
csv_id+=1
date = Prepare(line[csv_id]) # date
csv_id+=1
deadline = Prepare(line[csv_id]) # deadline
csv_id+=1
default_code = Prepare(line[csv_id]) # ref product
csv_id+=1
# product name
csv_id+=1
quantity = PrepareFloat(line[csv_id]) # quantity
csv_id+=1
line_state = Prepare(line[csv_id]) # state
csv_id+=1
note = Prepare(line[csv_id]) # note
csv_id+=1
# get product:
product_id = product_proxy.search(cr, uid, [('default_code','=',default_code)])
if product_id:
product_id = product_id[0]
else:
product_id = False
# get partner:
partner_id = partner_proxy.search(cr, uid, [('mexal_c','=',partner_code)])
if partner_id:
partner_id = partner_id[0]
else:
partner_id = False
# calculated fields:
date = "%s-%s-%s"%(date[:4],date[4:6],date[-2:]) if date else False
deadline = "%s-%s-%s"%(deadline[:4],deadline[4:6],deadline[-2:]) if deadline else False
# statistic calculation:
delivered = (total_order_id[(order,product_id)] if (order,product_id) in total_order_id else 0.0)
total = quantity + delivered
now_quantity=0.0
if date and deadline:
now_date = datetime.strptime(datetime.now().strftime("%Y-%m-%d"),"%Y-%m-%d") #datetime.now()
from_date = datetime.strptime(date, "%Y-%m-%d")
to_date = datetime.strptime(deadline, "%Y-%m-%d")
interval= (to_date - from_date).days
now_interval = (now_date - from_date).days
now_quantity = total * now_interval / interval
if now_quantity > quantity:
state='ok'
else:
state='ko'
# TODO borderline status to evaluate in perc. !!!!
else:
state='unknow' # no from / to period so no statistics
data = {
'name': order,
'partner_id': partner_id,
'quantity': quantity,
'date': date,
'deadline': deadline,
'product_id': product_id,
'note': note,
'total': total, # Total order
'delivered': delivered,
'expected': now_quantity, # Total order
'left': quantity, # To delivery (order value)
'state': state, # 3 state value depend on period valutation
}
create_order_id = order_line_proxy.create(cr, uid, data)
_logger.info('End import order headers %s, total: [%s]'%(year, counter['tot']))
except:
_logger.error('Error generic import order headers, total: [%s]'%(counter['tot']))
return
_columns = {
'name': fields.char('Document', size=15, help="Document-Year-Number"),
'type':fields.selection([
('BC','DDT'),
('FT','Invoice'),
('NC','Credit note'),
('RC','Refund'),
],'Type', select=True, readonly=False),
'year': fields.char('Year', size=4), #fields.date('Date'),
'date': fields.date('Date'),
'product_id': fields.many2one('product.product', 'Product'),
'chemical_category_id': fields.related('product_id', 'chemical_category_id', type = 'many2one', relation = "chemical.product.category", string='Category', readonly = True, store = True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'amount': fields.float('Total', digits=(16,2),),
'unit': fields.float('Price unit.', digits=(16,2),),
'quantity': fields.float('Q.', digits=(16,2),),
# refer to order:
'order': fields.char('Order number', size=15, help="Number of order that start the sale"),
}
etl_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EnderCheng/pyspider | pyspider/libs/utils.py | 10 | 11520 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2012-11-06 11:50:13
import logging
import hashlib
import datetime
import base64
import six
from six import iteritems
md5string = lambda x: hashlib.md5(utf8(x)).hexdigest()
class ReadOnlyDict(dict):
"""A Read Only Dict"""
def __setitem__(self, key, value):
raise Exception("dict is read-only")
def getitem(obj, key=0, default=None):
"""Get first element of list or return default"""
try:
return obj[key]
except:
return default
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return ("1 second ago" if seconds <= 1 else
"%(seconds)d seconds ago") % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return ("1 minute ago" if minutes <= 1 else
"%(minutes)d minutes ago") % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return ("1 hour ago" if hours <= 1 else
"%(hours)d hours ago") % {"hours": hours}
if days == 0:
format = "%(time)s"
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = "yesterday" if shorter else "yesterday at %(time)s"
elif days < 5:
format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s"
elif days < 334: # 11mo, since confusing for same month last year
format = "%(month_name)s-%(day)s" if shorter else \
"%(month_name)s-%(day)s at %(time)s"
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.month - 1,
"weekday": local_date.weekday(),
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
class TimeoutError(Exception):
pass
try:
import signal
if not hasattr(signal, 'SIGALRM'):
raise ImportError('signal')
class timeout:
"""
Time limit of command
with timeout(3):
time.sleep(10)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if self.seconds:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self.seconds:
signal.alarm(0)
except ImportError:
class timeout:
"""
Time limit of command (for windows)
"""
def __init__(self, seconds=1, error_message='Timeout'):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
elif isinstance(string, six.binary_type):
return string
else:
return unicode(string).encode('utf8')
def text(string, encoding='utf8'):
"""
Make sure string is unicode type, decode with given encoding if it's not.
If parameter is a object, object.__str__ will been called
"""
if isinstance(string, six.text_type):
return string
elif isinstance(string, six.binary_type):
return string.decode(encoding)
else:
return six.text_type(string)
def pretty_unicode(string):
"""
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return string.decode('Latin-1').encode('unicode_escape')
def unicode_string(string):
"""
Make sure string is unicode, try to default with utf8, or base64 if failed.
can been decode by `decode_unicode_string`
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_string(k)] = unicode_obj(v)
return r
def unicode_list(_list):
"""
Make sure every element in list is unicode. bytes will encode in base64
"""
return [unicode_obj(x) for x in _list]
def unicode_obj(obj):
"""
Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64.
Can been decode by `decode_unicode_obj`
"""
if isinstance(obj, dict):
return unicode_dict(obj)
elif isinstance(obj, (list, tuple)):
return unicode_list(obj)
elif isinstance(obj, six.string_types):
return unicode_string(obj)
elif isinstance(obj, (int, float)):
return obj
elif obj is None:
return obj
else:
try:
return text(obj)
except:
return text(repr(obj))
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string
def decode_unicode_obj(obj):
"""
Decode unicoded dict/list/tuple encoded by `unicode_obj`
"""
if isinstance(obj, dict):
r = {}
for k, v in iteritems(obj):
r[decode_unicode_string(k)] = decode_unicode_obj(v)
return r
elif isinstance(obj, six.string_types):
return decode_unicode_string(obj)
elif isinstance(obj, (list, tuple)):
return [decode_unicode_obj(x) for x in obj]
else:
return obj
class Get(object):
"""
Lazy value calculate for object
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter()
class ObjectDict(dict):
"""
Object like dict, every dict[key] can visite by dict.key
If dict[key] is `Get`, calculate it's value.
"""
def __getattr__(self, name):
ret = self.__getitem__(name)
if hasattr(ret, '__get__'):
return ret.__get__(self, ObjectDict)
return ret
def load_object(name):
"""Load object from module"""
if "." not in name:
raise Exception('load object need module.object')
module_name, object_name = name.rsplit('.', 1)
if six.PY2:
module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1)
else:
module = __import__(module_name, globals(), locals(), [object_name])
return getattr(module, object_name)
def get_python_console(namespace=None):
"""
Return a interactive python console instance with caller's stack
"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
shell = TerminalInteractiveShell(user_ns=namespace)
except ImportError:
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(namespace).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
import code
shell = code.InteractiveConsole(namespace)
shell._quit = False
def exit():
shell._quit = True
def readfunc(prompt=""):
if shell._quit:
raise EOFError
return six.moves.input(prompt)
# inject exit method
shell.ask_exit = exit
shell.raw_input = readfunc
return shell
def python_console(namespace=None):
"""Start a interactive python console with caller's stack"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
return get_python_console(namespace=namespace).interact()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.