repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
171121130/SWI
venv/Scripts/activate_this.py
1076
1137
"""By using execfile(this_file, dict(__file__=this_file)) you will activate this virtualenv environment. This can be used when you must use an existing Python interpreter, not the virtualenv bin/python """ try: __file__ except NameError: raise AssertionError( "You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))") import sys import os old_os_path = os.environ.get('PATH', '') os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path base = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if sys.platform == 'win32': site_packages = os.path.join(base, 'Lib', 'site-packages') else: site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages') prev_sys_path = list(sys.path) import site site.addsitedir(site_packages) sys.real_prefix = sys.prefix sys.prefix = base # Move the added items to the front of the path: new_sys_path = [] for item in list(sys.path): if item not in prev_sys_path: new_sys_path.append(item) sys.path.remove(item) sys.path[:0] = new_sys_path
mit
isauragalafate/drupal8
vendor/psy/psysh/test/tools/vis.py
710
3428
""" vis.py ====== Ctypes based module to access libbsd's strvis & strunvis functions. The `vis` function is the equivalent of strvis. The `unvis` function is the equivalent of strunvis. All functions accept unicode string as input and return a unicode string. Constants: ---------- * to select alternate encoding format `VIS_OCTAL`: use octal \ddd format `VIS_CSTYLE`: use \[nrft0..] where appropiate * to alter set of characters encoded (default is to encode all non-graphic except space, tab, and newline). `VIS_SP`: also encode space `VIS_TAB`: also encode tab `VIS_NL`: also encode newline `VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL) `VIS_SAFE`: only encode "unsafe" characters * other `VIS_NOSLASH`: inhibit printing '\' `VIS_HTTP1808`: http-style escape % hex hex `VIS_HTTPSTYLE`: http-style escape % hex hex `VIS_MIMESTYLE`: mime-style escape = HEX HEX `VIS_HTTP1866`: http-style &#num; or &string; `VIS_NOESCAPE`: don't decode `\' `VIS_GLOB`: encode glob(3) magic characters :Authors: - ju1ius (http://github.com/ju1ius) :Version: 1 :Date: 2014-01-05 """ from ctypes import CDLL, c_char_p, c_int from ctypes.util import find_library __all__ = [ 'vis', 'unvis', 'VIS_OCTAL', 'VIS_CSTYLE', 'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE', 'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE', 'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB' ] ############################################################# # Constants from bsd/vis.h ############################################################# #to select alternate encoding format VIS_OCTAL = 0x0001 VIS_CSTYLE = 0x0002 # to alter set of characters encoded # (default is to encode all non-graphic except space, tab, and newline). VIS_SP = 0x0004 VIS_TAB = 0x0008 VIS_NL = 0x0010 VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL VIS_SAFE = 0x0020 # other VIS_NOSLASH = 0x0040 VIS_HTTP1808 = 0x0080 VIS_HTTPSTYLE = 0x0080 VIS_MIMESTYLE = 0x0100 VIS_HTTP1866 = 0x0200 VIS_NOESCAPE = 0x0400 VIS_GLOB = 0x1000 ############################################################# # Import libbsd/vis functions ############################################################# _libbsd = CDLL(find_library('bsd')) _strvis = _libbsd.strvis _strvis.argtypes = [c_char_p, c_char_p, c_int] _strvis.restype = c_int _strunvis = _libbsd.strunvis _strvis.argtypes = [c_char_p, c_char_p] _strvis.restype = c_int def vis(src, flags=VIS_WHITE): """ Encodes the string `src` into libbsd's vis encoding. `flags` must be one of the VIS_* constants C definition: int strvis(char *dst, char *src, int flags); """ src = bytes(src, 'utf-8') dst_p = c_char_p(bytes(len(src) * 4)) src_p = c_char_p(src) flags = c_int(flags) bytes_written = _strvis(dst_p, src_p, flags) if -1 == bytes_written: raise RuntimeError('vis failed to encode string "{}"'.format(src)) return dst_p.value.decode('utf-8') def unvis(src): """ Decodes a string encoded by vis. C definition: int strunvis(char *dst, char *src); """ src = bytes(src, 'utf-8') dst_p = c_char_p(bytes(len(src))) src_p = c_char_p(src) bytes_written = _strunvis(dst_p, src_p) if -1 == bytes_written: raise RuntimeError('unvis failed to decode string "{}"'.format(src)) return dst_p.value.decode('utf-8')
gpl-2.0
asinyagin/solidity
docs/conf.py
1
8102
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Solidity documentation build configuration file, created by # sphinx-quickstart on Mon Dec 7 12:32:57 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. def setup(sphinx): sys.path.insert(0, os.path.abspath('./utils')) from SolidityLexer import SolidityLexer sphinx.add_lexer('Solidity', SolidityLexer()) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Solidity' copyright = '2016, Ethereum' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.4.5' # The full version, including alpha/beta/rc tags. release = '0.4.5-develop' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' highlight_language = 'Solidity' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Soliditydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'solidity.tex', 'Solidity Documentation', 'Ethereum', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
gpl-3.0
matthiasdiener/spack
var/spack/repos/builtin/packages/py-psyclone/package.py
2
2637
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## # from spack import * class PyPsyclone(PythonPackage): """Code generation for the PSyKAl framework from the GungHo project, as used by the LFRic model at the UK Met Office.""" homepage = "https://github.com/stfc/PSyclone" url = "https://github.com/stfc/PSyclone/archive/1.5.1.tar.gz" giturl = "https://github.com/stfc/PSyclone.git" version('1.5.1', git=giturl, commit='eba7a097175b02f75dec70616cf267b7b3170d78') version('develop', git=giturl, branch='master') depends_on('py-setuptools', type='build') depends_on('py-pyparsing', type=('build', 'run')) # Test cases fail without compatible versions of py-fparser: depends_on('py-fparser@0.0.5', type=('build', 'run'), when='@1.5.1') depends_on('py-fparser', type=('build', 'run'), when='@1.5.2:') # Dependencies only required for tests: depends_on('py-numpy', type='test') depends_on('py-nose', type='test') depends_on('py-pytest', type='test') @run_after('install') @on_package_attributes(run_tests=True) def check_build(self): # Limit py.test to search inside the build tree: touch('pytest.ini') with working_dir('src'): Executable('py.test')() def setup_environment(self, spack_env, run_env): # Allow testing with installed executables: spack_env.prepend_path('PATH', self.prefix.bin)
lgpl-2.1
cs-hse-projects/DataSpider_Sergienko
newsgrab/lifenews.py
1
6970
#!/usr/bin/env python # -*- coding: utf-8 -*- # For python version 2! import json, unittest from grab.spider import Task import utils, likes from utils import FakeGrab, FakeTask class Module(object): name = 'lifenews' def __init__(self, config): default_config = { 'conflict' : 'ignore', # conflict option for tables 'likes_types' : None, # list of like types from likes module, # or None --- all possible likes 'ids_range' : [0, 'auto'], # ids can be positive integers --- actual id, # last id may be "auto" --- get last id from index page, # first id may be negative integer --- first id minus value 'order' : 'desc', # "asc" / "desc" / "rand" 'limit' : None, # None --- get all or positive integer } self.config = config or default_config tables = [ ('lifenews_news', ['url', 'timestamp', 'title', 'description', 'image', 'time', 'views_count', 'comments_count', 'article_title', 'article_subtitle', 'tags', 'article_content']), ('lifenews_statuses', ['url', 'timestamp', 'status']), ('lifenews_comments', ['url', 'timestamp', 'count']), ] self.repair_cache = [] self.schema = utils.mono_schema(tables, self.config['conflict'], 20) self.ids_range = self.config['ids_range'][:] self.major_tasks = ['task_lifenews_index', 'task_lifenews_page'] self.generator_state = '' def generate_tasks(self): if self.ids_range[1] == 'auto' or self.ids_range[1] < 0: # print('first task') yield Task('lifenews_index', url='http://lifenews.ru/') while self.ids_range[1] == 'auto' or self.ids_range[1] < 0: yield None begin, end = self.ids_range if isinstance(begin, int) and begin < 0: begin = end + begin for page_id in utils.indexes_generator(begin, end, self.config['limit'], self.config['order']): self.generator_state = page_id url = 'http://lifenews.ru/news/' + str(page_id) if self.db.can_write('lifenews_statuses', url): yield Task('lifenews_page', url=url) comments_url = 'http://lifenews.ru/comments/post/' + str(page_id) if self.db.can_write('lifenews_comments', url): # print('target_url = ' + str(url)) yield Task('lifenews_comments', url=comments_url, target_url=url) for t in likes.generate_tasks(self.db, url, self.config['likes_types']): yield t def task_lifenews_index(self, grab, task): article_id = grab.doc.select('//meta[@itemprop="url"]/@content').number() if self.ids_range[1] == 'auto': self.ids_range[1] = article_id else: self.ids_range[1] = article_id + self.ids_range[1] def task_lifenews_comments(self, grab, task): if grab.response.code == 404: self.db.save_data('lifenews_comments', { 'url' : task.target_url, 'count': -1 }) return body = grab.response.unicode_body() count = len(json.loads(body)) self.db.save_data('lifenews_comments', { 'url' : task.target_url, 'count': count }) def task_lifenews_page(self, grab, task): self.db.save_data('lifenews_statuses', { 'url' : task.url, 'status' : str(grab.response.code) }) if grab.response.code == 404: return article = grab.doc.select('//section[@id="publication"]/article') tags = [] for tag in article.select('//ul[@class="tags"]/li/a'): tags.append(tag.text()) tags_str = ";".join(tags) views_elem = article.select('//div[@class="counters"]/div[@class="views"]') comments_elem = article.select('//div[@class="counters"]/div[@class="comments"]/span[@class="counter"]') self.db.save_data('lifenews_news', { 'url' : task.url, 'title' : grab.doc.select('//title').text(), 'description' : grab.doc.select('//meta[@name="description"]/@content').text(), 'image' : grab.doc.select('//meta[@name="twitter:image:src"]/@content').text(), 'time' : article.select('//time/@datetime').text(), 'views_count' : views_elem.number() if views_elem else -239, 'comments_count' : comments_elem.number() if comments_elem else -239, 'article_title' : article.select('//h1').text(), 'article_subtitle' : article.select('//h2').text(), 'tags' : tags_str, 'article_content' : article.select('//div[@class="note"]').html(), }) ############## # UNIT TESTS # ############## class Test(unittest.TestCase): def test_generator(self): parser = utils.set_test_db(Module(None)) gen = parser.generate_tasks() first = gen.next() self.assertIsNotNone(first) self.assertIsNone(gen.next()) self.assertEqual(first.url, 'http://lifenews.ru/') parser.ids_range = [10, 20] gen = list(parser.generate_tasks()) self.assertEqual(len(gen), 10 * len(likes.all_likes_types) + 10 + 10) parser.ids_range = [-11, 30] gen = list(parser.generate_tasks()) self.assertEqual(len(gen), 11 * len(likes.all_likes_types) + 11 + 11) def test_index(self): parser = utils.set_test_db(Module(None)) self.assertEqual(parser.ids_range[1], 'auto') parser.task_lifenews_index(FakeGrab('lifenews_index_1'), FakeTask(url='fake.url')) self.assertEqual(parser.ids_range[1], 155692) parser.ids_range[1] = -100 parser.task_lifenews_index(FakeGrab('lifenews_index_1'), FakeTask(url='fake.url')) self.assertEqual(parser.ids_range[1], 155592) def test_lifenews_page(self): parser = utils.set_test_db(Module(None)) grub = FakeGrab('lifenews_page_1') parser.task_lifenews_page(grub, FakeTask(url='test.url')) self.assertEqual(parser.db.xdump('lifenews_statuses'), [('test.url', '200')]) grub = FakeGrab('lifenews_page_1') grub.response.code = 404 parser.task_lifenews_page(grub, FakeTask(url='test2.url')) self.assertEqual(parser.db.xdump('lifenews_statuses'), [('test.url', '200'), ('test2.url', '404')]) def test_lifenews_comments(self): parser = utils.set_test_db(Module(None)) parser.task_lifenews_comments(FakeGrab('lifenews_comments_1'), FakeTask(url='test.url', target_url='target.url')) grub = FakeGrab('lifenews_comments_1') grub.response.code = 404 parser.task_lifenews_comments(grub, FakeTask(url='test.url', target_url='target.url')) if __name__ == '__main__': unittest.main()
mit
orion1024/Sick-Beard
lib/hachoir_core/field/parser.py
90
1440
from lib.hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN from lib.hachoir_core.field import GenericFieldSet from lib.hachoir_core.log import Logger import lib.hachoir_core.config as config class Parser(GenericFieldSet): """ A parser is the root of all other fields. It create first level of fields and have special attributes and methods: - endian: Byte order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}) of input data ; - stream: Data input stream (set in L{__init__()}) ; - size: Field set size will be size of input stream. """ def __init__(self, stream, description=None): """ Parser constructor @param stream: Data input stream (see L{InputStream}) @param description: (optional) String description """ # Check arguments assert hasattr(self, "endian") \ and self.endian in (BIG_ENDIAN, LITTLE_ENDIAN) # Call parent constructor GenericFieldSet.__init__(self, None, "root", stream, description, stream.askSize(self)) def _logger(self): return Logger._logger(self) def _setSize(self, size): self._truncate(size) self.raiseEvent("field-resized", self) size = property(lambda self: self._size, doc="Size in bits") path = property(lambda self: "/") # dummy definition to prevent hachoir-core from depending on hachoir-parser autofix = property(lambda self: config.autofix)
gpl-3.0
gauravbose/digital-menu
django/core/management/templates.py
82
13089
import cgi import errno import mimetypes import os import posixpath import re import shutil import stat import sys import tempfile from os import path import django from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import handle_extensions from django.template import Context, Engine from django.utils import archive from django.utils._os import rmtree_errorhandler from django.utils.six.moves.urllib.request import urlretrieve from django.utils.version import get_docs_version _drive_re = re.compile('^([a-z]):', re.I) _url_drive_re = re.compile('^([a-z])[:|]', re.I) class TemplateCommand(BaseCommand): """ Copies either a Django application layout template or a Django project layout template into the specified directory. :param style: A color style object (see django.core.management.color). :param app_or_project: The string 'app' or 'project'. :param name: The name of the application or project. :param directory: The directory to which the template should be copied. :param options: The additional variables passed to project or app templates """ requires_system_checks = False # Can't import settings during this command, because they haven't # necessarily been created. can_import_settings = False # The supported URL schemes url_schemes = ['http', 'https', 'ftp'] # Can't perform any active locale changes during this command, because # setting might not be available at all. leave_locale_alone = True def add_arguments(self, parser): parser.add_argument('name', help='Name of the application or project.') parser.add_argument('directory', nargs='?', help='Optional destination directory') parser.add_argument('--template', help='The path or URL to load the template from.') parser.add_argument('--extension', '-e', dest='extensions', action='append', default=['py'], help='The file extension(s) to render (default: "py"). ' 'Separate multiple extensions with commas, or use ' '-e multiple times.') parser.add_argument('--name', '-n', dest='files', action='append', default=[], help='The file name(s) to render. ' 'Separate multiple extensions with commas, or use ' '-n multiple times.') def handle(self, app_or_project, name, target=None, **options): self.app_or_project = app_or_project self.paths_to_remove = [] self.verbosity = options['verbosity'] self.validate_name(name, app_or_project) # if some directory is given, make sure it's nicely expanded if target is None: top_dir = path.join(os.getcwd(), name) try: os.makedirs(top_dir) except OSError as e: if e.errno == errno.EEXIST: message = "'%s' already exists" % top_dir else: message = e raise CommandError(message) else: top_dir = os.path.abspath(path.expanduser(target)) if not os.path.exists(top_dir): raise CommandError("Destination directory '%s' does not " "exist, please create it first." % top_dir) extensions = tuple(handle_extensions(options['extensions'])) extra_files = [] for file in options['files']: extra_files.extend(map(lambda x: x.strip(), file.split(','))) if self.verbosity >= 2: self.stdout.write("Rendering %s template files with " "extensions: %s\n" % (app_or_project, ', '.join(extensions))) self.stdout.write("Rendering %s template files with " "filenames: %s\n" % (app_or_project, ', '.join(extra_files))) base_name = '%s_name' % app_or_project base_subdir = '%s_template' % app_or_project base_directory = '%s_directory' % app_or_project context = Context(dict(options, **{ base_name: name, base_directory: top_dir, 'docs_version': get_docs_version(), 'django_version': django.__version__, }), autoescape=False) # Setup a stub settings environment for template rendering from django.conf import settings if not settings.configured: settings.configure() template_dir = self.handle_template(options['template'], base_subdir) prefix_length = len(template_dir) + 1 for root, dirs, files in os.walk(template_dir): path_rest = root[prefix_length:] relative_dir = path_rest.replace(base_name, name) if relative_dir: target_dir = path.join(top_dir, relative_dir) if not path.exists(target_dir): os.mkdir(target_dir) for dirname in dirs[:]: if dirname.startswith('.') or dirname == '__pycache__': dirs.remove(dirname) for filename in files: if filename.endswith(('.pyo', '.pyc', '.py.class')): # Ignore some files as they cause various breakages. continue old_path = path.join(root, filename) new_path = path.join(top_dir, relative_dir, filename.replace(base_name, name)) if path.exists(new_path): raise CommandError("%s already exists, overlaying a " "project or app into an existing " "directory won't replace conflicting " "files" % new_path) # Only render the Python files, as we don't want to # accidentally render Django templates files with open(old_path, 'rb') as template_file: content = template_file.read() if filename.endswith(extensions) or filename in extra_files: content = content.decode('utf-8') template = Engine().from_string(content) content = template.render(context) content = content.encode('utf-8') with open(new_path, 'wb') as new_file: new_file.write(content) if self.verbosity >= 2: self.stdout.write("Creating %s\n" % new_path) try: shutil.copymode(old_path, new_path) self.make_writeable(new_path) except OSError: self.stderr.write( "Notice: Couldn't set permission bits on %s. You're " "probably using an uncommon filesystem setup. No " "problem." % new_path, self.style.NOTICE) if self.paths_to_remove: if self.verbosity >= 2: self.stdout.write("Cleaning up temporary files.\n") for path_to_remove in self.paths_to_remove: if path.isfile(path_to_remove): os.remove(path_to_remove) else: shutil.rmtree(path_to_remove, onerror=rmtree_errorhandler) def handle_template(self, template, subdir): """ Determines where the app or project templates are. Use django.__path__[0] as the default because we don't know into which directory Django has been installed. """ if template is None: return path.join(django.__path__[0], 'conf', subdir) else: if template.startswith('file://'): template = template[7:] expanded_template = path.expanduser(template) expanded_template = path.normpath(expanded_template) if path.isdir(expanded_template): return expanded_template if self.is_url(template): # downloads the file and returns the path absolute_path = self.download(template) else: absolute_path = path.abspath(expanded_template) if path.exists(absolute_path): return self.extract(absolute_path) raise CommandError("couldn't handle %s template %s." % (self.app_or_project, template)) def validate_name(self, name, app_or_project): if name is None: raise CommandError("you must provide %s %s name" % ( "an" if app_or_project == "app" else "a", app_or_project)) # If it's not a valid directory name. if not re.search(r'^[_a-zA-Z]\w*$', name): # Provide a smart error message, depending on the error. if not re.search(r'^[_a-zA-Z]', name): message = 'make sure the name begins with a letter or underscore' else: message = 'use only numbers, letters and underscores' raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message)) def download(self, url): """ Downloads the given URL and returns the file name. """ def cleanup_url(url): tmp = url.rstrip('/') filename = tmp.split('/')[-1] if url.endswith('/'): display_url = tmp + '/' else: display_url = url return filename, display_url prefix = 'django_%s_template_' % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download') self.paths_to_remove.append(tempdir) filename, display_url = cleanup_url(url) if self.verbosity >= 2: self.stdout.write("Downloading %s\n" % display_url) try: the_path, info = urlretrieve(url, path.join(tempdir, filename)) except IOError as e: raise CommandError("couldn't download URL %s to %s: %s" % (url, filename, e)) used_name = the_path.split('/')[-1] # Trying to get better name from response headers content_disposition = info.get('content-disposition') if content_disposition: _, params = cgi.parse_header(content_disposition) guessed_filename = params.get('filename') or used_name else: guessed_filename = used_name # Falling back to content type guessing ext = self.splitext(guessed_filename)[1] content_type = info.get('content-type') if not ext and content_type: ext = mimetypes.guess_extension(content_type) if ext: guessed_filename += ext # Move the temporary file to a filename that has better # chances of being recognized by the archive utils if used_name != guessed_filename: guessed_path = path.join(tempdir, guessed_filename) shutil.move(the_path, guessed_path) return guessed_path # Giving up return the_path def splitext(self, the_path): """ Like os.path.splitext, but takes off .tar, too """ base, ext = posixpath.splitext(the_path) if base.lower().endswith('.tar'): ext = base[-4:] + ext base = base[:-4] return base, ext def extract(self, filename): """ Extracts the given file to a temporarily and returns the path of the directory with the extracted content. """ prefix = 'django_%s_template_' % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract') self.paths_to_remove.append(tempdir) if self.verbosity >= 2: self.stdout.write("Extracting %s\n" % filename) try: archive.extract(filename, tempdir) return tempdir except (archive.ArchiveException, IOError) as e: raise CommandError("couldn't extract file %s to %s: %s" % (filename, tempdir, e)) def is_url(self, template): """ Returns True if the name looks like a URL """ if ':' not in template: return False scheme = template.split(':', 1)[0].lower() return scheme in self.url_schemes def make_writeable(self, filename): """ Make sure that the file is writeable. Useful if our source is read-only. """ if sys.platform.startswith('java'): # On Jython there is no os.access() return if not os.access(filename, os.W_OK): st = os.stat(filename) new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR os.chmod(filename, new_permissions)
bsd-3-clause
bruecksen/isimip
isi_mip/pages/migrations/0009_auto_20160503_2054.py
1
97534
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-05-03 18:54 from __future__ import unicode_literals from django.db import migrations import isi_mip.contrib.blocks import isi_mip.pages.blocks import wagtail.core.blocks import wagtail.core.fields import wagtail.documents.blocks import wagtail.embeds.blocks import wagtail.images.blocks class Migration(migrations.Migration): dependencies = [ ('pages', '0008_auto_20160425_1243'), ] operations = [ migrations.AlterField( model_name='aboutpage', name='content', field=wagtail.core.fields.StreamField((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('columns_1_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_2', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_2_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_1_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('center_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_1_to_1_to_1', wagtail.core.blocks.StructBlock((('first_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('second_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('third_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('fourth_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))), ('paper', wagtail.core.blocks.StructBlock((('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('author', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('journal', wagtail.core.blocks.CharBlock()), ('link', wagtail.core.blocks.URLBlock())), template='widgets/page-teaser-wide.html')), ('bigteaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))))), ), migrations.AlterField( model_name='faqpage', name='content', field=wagtail.core.fields.StreamField((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('columns_1_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_2', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_2_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_1_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('center_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_1_to_1_to_1', wagtail.core.blocks.StructBlock((('first_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('second_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('third_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('fourth_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))))), ), migrations.AlterField( model_name='gettingstartedpage', name='content', field=wagtail.core.fields.StreamField((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('columns_1_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_2', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_2_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_1_to_1', wagtail.core.blocks.StructBlock((('left_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('right_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('center_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('columns_1_to_1_to_1_to_1', wagtail.core.blocks.StructBlock((('first_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('second_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('third_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))))), ('fourth_column', wagtail.core.blocks.StreamBlock((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('small_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('big_teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('isinumbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('link', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock(required=False)), ('text', wagtail.core.blocks.RichTextBlock(required=False)), ('link', wagtail.core.blocks.URLBlock(required=False))))), ('faqs', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('faqs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('question', wagtail.core.blocks.CharBlock()), ('answer', wagtail.core.blocks.RichTextBlock())))))))), ('pdf', wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock())))))))))), ('protocol', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('pdfs', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('file', wagtail.documents.blocks.DocumentChooserBlock()), ('description', wagtail.core.blocks.CharBlock()))))), ('image', isi_mip.pages.blocks.ImageBlock()), ('version', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.TextBlock())))), ('input_data', wagtail.core.blocks.StructBlock((('description', wagtail.core.blocks.RichTextBlock()), ('row_limit', isi_mip.contrib.blocks.IntegerBlock(default=10, max_value=30, min_value=1))))), ('contact', wagtail.core.blocks.StructBlock((('description', wagtail.core.blocks.RichTextBlock()), ('sectors', wagtail.core.blocks.ListBlock(isi_mip.pages.blocks.SectorBlock))))), ('blog', wagtail.core.blocks.StructBlock((('blog_index', isi_mip.contrib.blocks.SpecificPageChooserBlock(help_text='Select blog index page.', required=False)), ('title', wagtail.core.blocks.CharBlock(help_text='Per default, the title of the blog index will be used.', required=False)), ('entry_count', isi_mip.contrib.blocks.IntegerBlock(default=4, help_text='How many blog entries should be displayed?', max_value=5, min_value=1, required=True))), template='blocks/flat_blog_block.html')))), ), migrations.AlterField( model_name='homepage', name='content', field=wagtail.core.fields.StreamField((('row', wagtail.core.blocks.StreamBlock((('teaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.TextBlock(required=True)), ('link', wagtail.core.blocks.PageChooserBlock(required=True))))), ('bigteaser', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('picture', wagtail.images.blocks.ImageChooserBlock()), ('text', wagtail.core.blocks.RichTextBlock()), ('external_link', wagtail.core.blocks.URLBlock(help_text='Will be ignored if an internal link is provided', required=False)), ('internal_link', wagtail.core.blocks.PageChooserBlock(help_text='If set, this has precedence over the external link.', required=False)), ('from_date', wagtail.core.blocks.DateBlock(required=False)), ('to_date', wagtail.core.blocks.DateBlock(required=False))))), ('blog', wagtail.core.blocks.StructBlock((('blog_index', isi_mip.contrib.blocks.SpecificPageChooserBlock(help_text='Select blog index page.', required=False)), ('title', wagtail.core.blocks.CharBlock(help_text='Per default, the title of the blog index will be used.', required=False)), ('entry_count', isi_mip.contrib.blocks.IntegerBlock(default=4, help_text='How many blog entries should be displayed?', max_value=5, min_value=1, required=True))))), ('numbers', wagtail.core.blocks.StructBlock((('number1', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock())))), ('number2', wagtail.core.blocks.StructBlock((('number', wagtail.core.blocks.CharBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.CharBlock()))))))), ('twitter', wagtail.core.blocks.StructBlock((('username', wagtail.core.blocks.CharBlock(required=True)), ('count', isi_mip.contrib.blocks.IntegerBlock(default=20)))))))),)), ), migrations.AlterField( model_name='impactmodelspage', name='content', field=wagtail.core.fields.StreamField((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('impact_models', wagtail.core.blocks.StructBlock((('description', wagtail.core.blocks.RichTextBlock()), ('rows_per_page', isi_mip.contrib.blocks.IntegerBlock(default=20, min_value=1, required=True))))), ('blog', wagtail.core.blocks.StructBlock((('blog_index', isi_mip.contrib.blocks.SpecificPageChooserBlock(help_text='Select blog index page.', required=False)), ('title', wagtail.core.blocks.CharBlock(help_text='Per default, the title of the blog index will be used.', required=False)), ('entry_count', isi_mip.contrib.blocks.IntegerBlock(default=4, help_text='How many blog entries should be displayed?', max_value=5, min_value=1, required=True))), template='blocks/flat_blog_block.html')))), ), migrations.AlterField( model_name='outcomespage', name='content', field=wagtail.core.fields.StreamField((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('papers', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('papers', wagtail.core.blocks.ListBlock(isi_mip.pages.blocks.PaperBlock))))))), ), migrations.AlterField( model_name='outputdatapage', name='content', field=wagtail.core.fields.StreamField((('rich_text', wagtail.core.blocks.RichTextBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock()), ('image', isi_mip.pages.blocks.ImageBlock()), ('output_data', wagtail.core.blocks.StructBlock((('description', wagtail.core.blocks.RichTextBlock()),))), ('blog', wagtail.core.blocks.StructBlock((('blog_index', isi_mip.contrib.blocks.SpecificPageChooserBlock(help_text='Select blog index page.', required=False)), ('title', wagtail.core.blocks.CharBlock(help_text='Per default, the title of the blog index will be used.', required=False)), ('entry_count', isi_mip.contrib.blocks.IntegerBlock(default=4, help_text='How many blog entries should be displayed?', max_value=5, min_value=1, required=True))), template='blocks/flat_blog_block.html')))), ), ]
mit
agry/NGECore2
scripts/mobiles/tatooine/moisture_thief.py
2
1277
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resources.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def addTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('mission_moisture_thief') mobileTemplate.setLevel(2) mobileTemplate.setDifficulty(Difficulty.NORMAL) mobileTemplate.setMinSpawnDistance(4) mobileTemplate.setMaxSpawnDistance(8) mobileTemplate.setDeathblow(False) mobileTemplate.setScale(1) mobileTemplate.setSocialGroup("jabba") mobileTemplate.setAssistRange(0) mobileTemplate.setStalker(False) templates = Vector() templates.add('object/mobile/shared_dressed_tatooine_moisture_thief.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/melee/sword/shared_sword_01.iff', WeaponType.ONEHANDEDMELEE, 1.0, 5, 'kinetic') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() mobileTemplate.setDefaultAttack('meleehit') mobileTemplate.setAttacks(attacks) core.spawnService.addMobileTemplate('moisture_thief', mobileTemplate) return
lgpl-3.0
juliusbierk/scikit-image
skimage/io/_plugins/q_color_mixer.py
38
11989
# the module for the qt color_mixer plugin from PyQt4 import QtGui, QtCore from PyQt4.QtGui import (QWidget, QStackedWidget, QSlider, QGridLayout, QLabel) from .util import ColorMixer class IntelligentSlider(QWidget): ''' A slider that adds a 'name' attribute and calls a callback with 'name' as an argument to the registerd callback. This allows you to create large groups of sliders in a loop, but still keep track of the individual events It also prints a label below the slider. The range of the slider is hardcoded from zero - 1000, but it supports a conversion factor so you can scale the results''' def __init__(self, name, a, b, callback): QWidget.__init__(self) self.name = name self.callback = callback self.a = a self.b = b self.manually_triggered = False self.slider = QSlider() self.slider.setRange(0, 1000) self.slider.setValue(500) self.slider.valueChanged.connect(self.slider_changed) self.name_label = QLabel() self.name_label.setText(self.name) self.name_label.setAlignment(QtCore.Qt.AlignCenter) self.value_label = QLabel() self.value_label.setText('%2.2f' % (self.slider.value() * self.a + self.b)) self.value_label.setAlignment(QtCore.Qt.AlignCenter) self.layout = QGridLayout(self) self.layout.addWidget(self.name_label, 0, 0) self.layout.addWidget(self.slider, 1, 0, QtCore.Qt.AlignHCenter) self.layout.addWidget(self.value_label, 2, 0) # bind this to the valueChanged signal of the slider def slider_changed(self, val): val = self.val() self.value_label.setText(str(val)[:4]) if not self.manually_triggered: self.callback(self.name, val) def set_conv_fac(self, a, b): self.a = a self.b = b def set_value(self, val): self.manually_triggered = True self.slider.setValue(int((val - self.b) / self.a)) self.value_label.setText('%2.2f' % val) self.manually_triggered = False def val(self): return self.slider.value() * self.a + self.b class MixerPanel(QtGui.QFrame): '''A color mixer to hook up to an image. You pass the image you the panel to operate on and it operates on that image in place. You also pass a callback to be called to trigger a refresh. This callback is called every time the mixer modifies your image.''' def __init__(self, img): QtGui.QFrame.__init__(self) #self.setFrameStyle(QtGui.QFrame.Box|QtGui.QFrame.Sunken) self.img = img self.mixer = ColorMixer(self.img) self.callback = None #--------------------------------------------------------------- # ComboBox #--------------------------------------------------------------- self.combo_box_entries = ['RGB Color', 'HSV Color', 'Brightness/Contrast', 'Gamma', 'Gamma (Sigmoidal)'] self.combo_box = QtGui.QComboBox() for entry in self.combo_box_entries: self.combo_box.addItem(entry) self.combo_box.currentIndexChanged.connect(self.combo_box_changed) #--------------------------------------------------------------- # RGB color sliders #--------------------------------------------------------------- # radio buttons self.rgb_add = QtGui.QRadioButton('Additive') self.rgb_mul = QtGui.QRadioButton('Multiplicative') self.rgb_mul.toggled.connect(self.rgb_radio_changed) self.rgb_add.toggled.connect(self.rgb_radio_changed) # sliders rs = IntelligentSlider('R', 0.51, -255, self.rgb_changed) gs = IntelligentSlider('G', 0.51, -255, self.rgb_changed) bs = IntelligentSlider('B', 0.51, -255, self.rgb_changed) self.rs = rs self.gs = gs self.bs = bs self.rgb_widget = QWidget() self.rgb_widget.layout = QGridLayout(self.rgb_widget) self.rgb_widget.layout.addWidget(self.rgb_add, 0, 0, 1, 3) self.rgb_widget.layout.addWidget(self.rgb_mul, 1, 0, 1, 3) self.rgb_widget.layout.addWidget(self.rs, 2, 0) self.rgb_widget.layout.addWidget(self.gs, 2, 1) self.rgb_widget.layout.addWidget(self.bs, 2, 2) #--------------------------------------------------------------- # HSV sliders #--------------------------------------------------------------- # radio buttons self.hsv_add = QtGui.QRadioButton('Additive') self.hsv_mul = QtGui.QRadioButton('Multiplicative') self.hsv_mul.toggled.connect(self.hsv_radio_changed) self.hsv_mul.toggled.connect(self.hsv_radio_changed) # sliders hs = IntelligentSlider('H', 0.36, -180, self.hsv_changed) ss = IntelligentSlider('S', 0.002, 0, self.hsv_changed) vs = IntelligentSlider('V', 0.002, 0, self.hsv_changed) self.hs = hs self.ss = ss self.vs = vs self.hsv_widget = QWidget() self.hsv_widget.layout = QGridLayout(self.hsv_widget) self.hsv_widget.layout.addWidget(self.hsv_add, 0, 0, 1, 3) self.hsv_widget.layout.addWidget(self.hsv_mul, 1, 0, 1, 3) self.hsv_widget.layout.addWidget(self.hs, 2, 0) self.hsv_widget.layout.addWidget(self.ss, 2, 1) self.hsv_widget.layout.addWidget(self.vs, 2, 2) #--------------------------------------------------------------- # Brightness/Contrast sliders #--------------------------------------------------------------- # sliders cont = IntelligentSlider('x', 0.002, 0, self.bright_changed) bright = IntelligentSlider('+', 0.51, -255, self.bright_changed) self.cont = cont self.bright = bright # layout self.bright_widget = QWidget() self.bright_widget.layout = QtGui.QGridLayout(self.bright_widget) self.bright_widget.layout.addWidget(self.cont, 0, 0) self.bright_widget.layout.addWidget(self.bright, 0, 1) #---------------------------------------------------------------------- # Gamma Slider #---------------------------------------------------------------------- gamma = IntelligentSlider('gamma', 0.005, 0, self.gamma_changed) self.gamma = gamma # layout self.gamma_widget = QWidget() self.gamma_widget.layout = QtGui.QGridLayout(self.gamma_widget) self.gamma_widget.layout.addWidget(self.gamma, 0, 0) #--------------------------------------------------------------- # Sigmoid Gamma sliders #--------------------------------------------------------------- # sliders alpha = IntelligentSlider('alpha', 0.011, 1, self.sig_gamma_changed) beta = IntelligentSlider('beta', 0.012, 0, self.sig_gamma_changed) self.a_gamma = alpha self.b_gamma = beta # layout self.sig_gamma_widget = QWidget() self.sig_gamma_widget.layout = QtGui.QGridLayout(self.sig_gamma_widget) self.sig_gamma_widget.layout.addWidget(self.a_gamma, 0, 0) self.sig_gamma_widget.layout.addWidget(self.b_gamma, 0, 1) #--------------------------------------------------------------- # Buttons #--------------------------------------------------------------- self.commit_button = QtGui.QPushButton('Commit') self.commit_button.clicked.connect(self.commit_changes) self.revert_button = QtGui.QPushButton('Revert') self.revert_button.clicked.connect(self.revert_changes) #--------------------------------------------------------------- # Mixer Layout #--------------------------------------------------------------- self.sliders = QStackedWidget() self.sliders.addWidget(self.rgb_widget) self.sliders.addWidget(self.hsv_widget) self.sliders.addWidget(self.bright_widget) self.sliders.addWidget(self.gamma_widget) self.sliders.addWidget(self.sig_gamma_widget) self.layout = QtGui.QGridLayout(self) self.layout.addWidget(self.combo_box, 0, 0) self.layout.addWidget(self.sliders, 1, 0) self.layout.addWidget(self.commit_button, 2, 0) self.layout.addWidget(self.revert_button, 3, 0) #--------------------------------------------------------------- # State Initialization #--------------------------------------------------------------- self.combo_box.setCurrentIndex(0) self.rgb_mul.setChecked(True) self.hsv_mul.setChecked(True) def set_callback(self, callback): self.callback = callback def combo_box_changed(self, index): self.sliders.setCurrentIndex(index) self.reset() def rgb_radio_changed(self): self.reset() def hsv_radio_changed(self): self.reset() def reset(self): self.reset_sliders() self.mixer.set_to_stateimg() if self.callback: self.callback() def reset_sliders(self): # handle changing the conversion factors necessary if self.rgb_add.isChecked(): self.rs.set_conv_fac(0.51, -255) self.rs.set_value(0) self.gs.set_conv_fac(0.51, -255) self.gs.set_value(0) self.bs.set_conv_fac(0.51, -255) self.bs.set_value(0) else: self.rs.set_conv_fac(0.002, 0) self.rs.set_value(1.) self.gs.set_conv_fac(0.002, 0) self.gs.set_value(1.) self.bs.set_conv_fac(0.002, 0) self.bs.set_value(1.) self.hs.set_value(0) if self.hsv_add.isChecked(): self.ss.set_conv_fac(0.002, -1) self.ss.set_value(0) self.vs.set_conv_fac(0.002, -1) self.vs.set_value(0) else: self.ss.set_conv_fac(0.002, 0) self.ss.set_value(1.) self.vs.set_conv_fac(0.002, 0) self.vs.set_value(1.) self.bright.set_value(0) self.cont.set_value(1.) self.gamma.set_value(1) self.a_gamma.set_value(1) self.b_gamma.set_value(0.5) def rgb_changed(self, name, val): if name == 'R': channel = self.mixer.RED elif name == 'G': channel = self.mixer.GREEN else: channel = self.mixer.BLUE if self.rgb_mul.isChecked(): self.mixer.multiply(channel, val) elif self.rgb_add.isChecked(): self.mixer.add(channel, val) else: pass if self.callback: self.callback() def hsv_changed(self, name, val): h = self.hs.val() s = self.ss.val() v = self.vs.val() if self.hsv_mul.isChecked(): self.mixer.hsv_multiply(h, s, v) elif self.hsv_add.isChecked(): self.mixer.hsv_add(h, s, v) else: pass if self.callback: self.callback() def bright_changed(self, name, val): b = self.bright.val() c = self.cont.val() self.mixer.brightness(c, b) if self.callback: self.callback() def gamma_changed(self, name, val): self.mixer.gamma(val) if self.callback: self.callback() def sig_gamma_changed(self, name, val): ag = self.a_gamma.val() bg = self.b_gamma.val() self.mixer.sigmoid_gamma(ag, bg) if self.callback: self.callback() def commit_changes(self): self.mixer.commit_changes() self.reset_sliders() def revert_changes(self): self.mixer.revert() self.reset_sliders() if self.callback: self.callback()
bsd-3-clause
tobegit3hub/deep_cnn
java_predict_client/src/main/proto/tensorflow/python/platform/app_test.py
202
1391
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for our flags implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from tensorflow.python.platform import app from tensorflow.python.platform import flags FLAGS = flags.FLAGS flags.DEFINE_boolean('myflag', False, '') def main(argv): if (len(argv) != 3): print("Length of argv was not 3: ", argv) sys.exit(-1) if argv[1] != "--passthrough": print("--passthrough argument not in argv") sys.exit(-1) if argv[2] != "extra": print("'extra' argument not in argv") sys.exit(-1) if __name__ == '__main__': sys.argv.extend(["--myflag", "--passthrough", "extra"]) app.run()
apache-2.0
nguyenkims/projecteuler-python
src/p61.py
1
1335
limit = 10 ** 4 def f(k,n): '''if k==3 return triangle : n *(n+1) /2 if k == 4: return square n*n ...''' if k==3:return n*(n+1)/2 if k ==4: return n*n if k==5: return n*(3*n-1)/2 if k==6: return n * (2*n-1) if k==7: return n * (5*n-3)/2 if k==8: return n * (3*n-2) P=[] # P[3]:list of all triangle numbers smaller than limit # P[4]: squares numbers .... for k in range(0,9): P.append([]) def fillP() : for n in range(10,200): for k in range(3,9): if f(k,n) < limit and f(k,n) > 999: P[k].append(f(k,n)) fillP() print 'P is filled' # print P[3] def isCyclic(a,b): '''if a = xyzt and b = ztmn -> return true''' sa,sb=str(a),str(b) if sa[2] == sb[0] and sa[3] == sb[1]: return True def generate(N, S): '''N=2, S=[a,b,c] return [ab,ba,ac,ca,bc,cb]''' if N==1:return S return [int(str(N+2)+str(x)) for x in generate(N-1)] + \ [int(str(x) + str(N+2)) for x in generate(N-1)] print generate(2) def findTarget(N=3): '''find the set of N numbers that is cyclic and the first one is triangle, the second is square and so forth''' for a3 in P[3]: for a4 in [x for x in P[5] if isCyclic(a3,x)]: for a5 in [x for x in P[4] if isCyclic(a4,x) and isCyclic(x,a3)]: print a3,a4,a5 # findTarget() def test(): a3 = 8128 for a4 in [x for x in P[4] if isCyclic(a3,x)]: print a4 # test()
mit
sv-dev1/odoo
addons/base_iban/base_iban.py
278
8657
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import string from openerp.osv import fields, osv from openerp.tools.translate import _ # Reference Examples of IBAN _ref_iban = { 'al':'ALkk BBBS SSSK CCCC CCCC CCCC CCCC', 'ad':'ADkk BBBB SSSS CCCC CCCC CCCC', 'at':'ATkk BBBB BCCC CCCC CCCC', 'be': 'BEkk BBBC CCCC CCKK', 'ba': 'BAkk BBBS SSCC CCCC CCKK', 'bg': 'BGkk BBBB SSSS DDCC CCCC CC', 'bh': 'BHkk BBBB SSSS SSSS SSSS SS', 'cr': 'CRkk BBBC CCCC CCCC CCCC C', 'hr': 'HRkk BBBB BBBC CCCC CCCC C', 'cy': 'CYkk BBBS SSSS CCCC CCCC CCCC CCCC', 'cz': 'CZkk BBBB SSSS SSCC CCCC CCCC', 'dk': 'DKkk BBBB CCCC CCCC CC', 'do': 'DOkk BBBB CCCC CCCC CCCC CCCC CCCC', 'ee': 'EEkk BBSS CCCC CCCC CCCK', 'fo': 'FOkk CCCC CCCC CCCC CC', 'fi': 'FIkk BBBB BBCC CCCC CK', 'fr': 'FRkk BBBB BGGG GGCC CCCC CCCC CKK', 'ge': 'GEkk BBCC CCCC CCCC CCCC CC', 'de': 'DEkk BBBB BBBB CCCC CCCC CC', 'gi': 'GIkk BBBB CCCC CCCC CCCC CCC', 'gr': 'GRkk BBBS SSSC CCCC CCCC CCCC CCC', 'gl': 'GLkk BBBB CCCC CCCC CC', 'hu': 'HUkk BBBS SSSC CCCC CCCC CCCC CCCC', 'is':'ISkk BBBB SSCC CCCC XXXX XXXX XX', 'ie': 'IEkk BBBB SSSS SSCC CCCC CC', 'il': 'ILkk BBBS SSCC CCCC CCCC CCC', 'it': 'ITkk KBBB BBSS SSSC CCCC CCCC CCC', 'kz': 'KZkk BBBC CCCC CCCC CCCC', 'kw': 'KWkk BBBB CCCC CCCC CCCC CCCC CCCC CC', 'lv': 'LVkk BBBB CCCC CCCC CCCC C', 'lb': 'LBkk BBBB CCCC CCCC CCCC CCCC CCCC', 'li': 'LIkk BBBB BCCC CCCC CCCC C', 'lt': 'LTkk BBBB BCCC CCCC CCCC', 'lu': 'LUkk BBBC CCCC CCCC CCCC' , 'mk': 'MKkk BBBC CCCC CCCC CKK', 'mt': 'MTkk BBBB SSSS SCCC CCCC CCCC CCCC CCC', 'mr': 'MRkk BBBB BSSS SSCC CCCC CCCC CKK', 'mu': 'MUkk BBBB BBSS CCCC CCCC CCCC CCCC CC', 'mc': 'MCkk BBBB BGGG GGCC CCCC CCCC CKK', 'me': 'MEkk BBBC CCCC CCCC CCCC KK', 'nl': 'NLkk BBBB CCCC CCCC CC', 'no': 'NOkk BBBB CCCC CCK', 'pl':'PLkk BBBS SSSK CCCC CCCC CCCC CCCC', 'pt': 'PTkk BBBB SSSS CCCC CCCC CCCK K', 'ro': 'ROkk BBBB CCCC CCCC CCCC CCCC', 'sm': 'SMkk KBBB BBSS SSSC CCCC CCCC CCC', 'sa': 'SAkk BBCC CCCC CCCC CCCC CCCC', 'rs': 'RSkk BBBC CCCC CCCC CCCC KK', 'sk': 'SKkk BBBB SSSS SSCC CCCC CCCC', 'si': 'SIkk BBSS SCCC CCCC CKK', 'es': 'ESkk BBBB SSSS KKCC CCCC CCCC', 'se': 'SEkk BBBB CCCC CCCC CCCC CCCC', 'ch': 'CHkk BBBB BCCC CCCC CCCC C', 'tn': 'TNkk BBSS SCCC CCCC CCCC CCCC', 'tr': 'TRkk BBBB BRCC CCCC CCCC CCCC CC', 'ae': 'AEkk BBBC CCCC CCCC CCCC CCC', 'gb': 'GBkk BBBB SSSS SSCC CCCC CC', } def _format_iban(iban_str): ''' This function removes all characters from given 'iban_str' that isn't a alpha numeric and converts it to upper case. ''' res = "" if iban_str: for char in iban_str: if char.isalnum(): res += char.upper() return res def _pretty_iban(iban_str): "return iban_str in groups of four characters separated by a single space" res = [] while iban_str: res.append(iban_str[:4]) iban_str = iban_str[4:] return ' '.join(res) class res_partner_bank(osv.osv): _inherit = "res.partner.bank" def create(self, cr, uid, vals, context=None): #overwrite to format the iban number correctly if (vals.get('state',False)=='iban') and vals.get('acc_number', False): vals['acc_number'] = _format_iban(vals['acc_number']) vals['acc_number'] = _pretty_iban(vals['acc_number']) return super(res_partner_bank, self).create(cr, uid, vals, context) def write(self, cr, uid, ids, vals, context=None): #overwrite to format the iban number correctly if (vals.get('state',False)=='iban') and vals.get('acc_number', False): vals['acc_number'] = _format_iban(vals['acc_number']) vals['acc_number'] = _pretty_iban(vals['acc_number']) return super(res_partner_bank, self).write(cr, uid, ids, vals, context) def is_iban_valid(self, cr, uid, iban, context=None): """ Check if IBAN is valid or not @param iban: IBAN as string @return: True if IBAN is valid, False otherwise """ if not iban: return False iban = _format_iban(iban).lower() if iban[:2] in _ref_iban and len(iban) != len(_format_iban(_ref_iban[iban[:2]])): return False #the four first digits have to be shifted to the end iban = iban[4:] + iban[:4] #letters have to be transformed into numbers (a = 10, b = 11, ...) iban2 = "" for char in iban: if char.isalpha(): iban2 += str(ord(char)-87) else: iban2 += char #iban is correct if modulo 97 == 1 return int(iban2) % 97 == 1 def check_iban(self, cr, uid, ids, context=None): ''' Check the IBAN number ''' for bank_acc in self.browse(cr, uid, ids, context=context): if bank_acc.state != 'iban': continue if not self.is_iban_valid(cr, uid, bank_acc.acc_number, context=context): return False return True def _construct_constraint_msg(self, cr, uid, ids, context=None): def default_iban_check(iban_cn): return iban_cn and iban_cn[0] in string.ascii_lowercase and iban_cn[1] in string.ascii_lowercase iban_country = self.browse(cr, uid, ids)[0].acc_number and self.browse(cr, uid, ids)[0].acc_number[:2].lower() if default_iban_check(iban_country): if iban_country in _ref_iban: return _('The IBAN does not seem to be correct. You should have entered something like this %s'), \ ('%s \nWhere B = National bank code, S = Branch code,'\ ' C = Account No, K = Check digit' % _ref_iban[iban_country]) return _('This IBAN does not pass the validation check, please verify it'), () return _('The IBAN is invalid, it should begin with the country code'), () def _check_bank(self, cr, uid, ids, context=None): for partner_bank in self.browse(cr, uid, ids, context=context): if partner_bank.state == 'iban' and not partner_bank.bank.bic: return False return True def get_bban_from_iban(self, cr, uid, ids, context=None): ''' This function returns the bank account number computed from the iban account number, thanks to the mapping_list dictionary that contains the rules associated to its country. ''' res = {} mapping_list = { #TODO add rules for others countries 'be': lambda x: x[4:], 'fr': lambda x: x[14:], 'ch': lambda x: x[9:], 'gb': lambda x: x[14:], } for record in self.browse(cr, uid, ids, context=context): if not record.acc_number: res[record.id] = False continue res[record.id] = False for code, function in mapping_list.items(): if record.acc_number.lower().startswith(code): res[record.id] = function(record.acc_number) break return res _columns = { # Deprecated: we keep it for backward compatibility, to be removed in v7 # We use acc_number instead of IBAN since v6.1, but we keep this field # to not break community modules. 'iban': fields.related('acc_number', string='IBAN', size=34, readonly=True, help="International Bank Account Number", type="char"), } _constraints = [ (check_iban, _construct_constraint_msg, ["iban", "acc_number", "state"]), (_check_bank, '\nPlease define BIC/Swift code on bank for bank type IBAN Account to make valid payments', ['bic']) ] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
alexus37/AugmentedRealityChess
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/arrays/ctypesarrays.py
9
4980
"""ctypes sized data-arrays as a data-formatmechanism XXX we have to use _ctypes.Array as the type descriminator, would be nice to have it available from the public module """ REGISTRY_NAME = 'ctypesarrays' import ctypes, _ctypes from OpenGL.raw.GL import _types from OpenGL.arrays import _arrayconstants as GL_1_1 from OpenGL import constant from OpenGL.arrays import formathandler from OpenGL._bytes import bytes,unicode import operator class CtypesArrayHandler( formathandler.FormatHandler ): """Ctypes Array-type-specific data-type handler for OpenGL""" @classmethod def from_param( cls, value, typeCode=None ): return ctypes.byref( value ) dataPointer = staticmethod( ctypes.addressof ) HANDLED_TYPES = (_ctypes.Array, ) isOutput = True @classmethod def voidDataPointer( cls, value ): """Given value in a known data-pointer type, return void_p for pointer""" return ctypes.byref( value ) @classmethod def zeros( cls, dims, typeCode ): """Return Numpy array of zeros in given size""" type = GL_TYPE_TO_ARRAY_MAPPING[ typeCode ] for dim in dims: type *= int(dim) return type() # should expicitly set to 0s @classmethod def ones( cls, dims, typeCode='d' ): """Return numpy array of ones in given size""" raise NotImplementedError( """Haven't got a good ones implementation yet""" ) ## type = GL_TYPE_TO_ARRAY_MAPPING[ typeCode ] ## for dim in dims: ## type *= dim ## return type() # should expicitly set to 0s @classmethod def arrayToGLType( cls, value ): """Given a value, guess OpenGL type of the corresponding pointer""" result = ARRAY_TO_GL_TYPE_MAPPING.get( value._type_ ) if result is not None: return result raise TypeError( """Don't know GL type for array of type %r, known types: %s\nvalue:%s"""%( value._type_, list(ARRAY_TO_GL_TYPE_MAPPING.keys()), value, ) ) @classmethod def arraySize( cls, value, typeCode = None ): """Given a data-value, calculate dimensions for the array""" try: return value.__class__.__component_count__ except AttributeError as err: dims = 1 for length in cls.dims( value ): dims *= length value.__class__.__component_count__ = dims return dims @classmethod def arrayByteCount( cls, value, typeCode = None ): """Given a data-value, calculate number of bytes required to represent""" return ctypes.sizeof( value ) @classmethod def types( cls, value ): """Produce iterable producing all composite types""" dimObject = value while dimObject is not None: yield dimObject dimObject = getattr( dimObject, '_type_', None ) if isinstance( dimObject, (bytes,unicode)): dimObject = None @classmethod def dims( cls, value ): """Produce iterable of all dimensions""" try: return value.__class__.__dimensions__ except AttributeError as err: dimensions = [] for base in cls.types( value ): length = getattr( base, '_length_', None) if length is not None: dimensions.append( length ) dimensions = tuple( dimensions ) value.__class__.__dimensions__ = dimensions return dimensions @classmethod def asArray( cls, value, typeCode=None ): """Convert given value to an array value of given typeCode""" return value @classmethod def unitSize( cls, value, typeCode=None ): """Determine unit size of an array (if possible)""" try: return value.__class__.__min_dimension__ except AttributeError as err: dim = cls.dims( value )[-1] value.__class__.__min_dimension__ = dim return dim @classmethod def dimensions( cls, value, typeCode=None ): """Determine dimensions of the passed array value (if possible)""" return tuple( cls.dims(value) ) ARRAY_TO_GL_TYPE_MAPPING = { _types.GLdouble: GL_1_1.GL_DOUBLE, _types.GLfloat: GL_1_1.GL_FLOAT, _types.GLint: GL_1_1.GL_INT, _types.GLuint: GL_1_1.GL_UNSIGNED_INT, _types.GLshort: GL_1_1.GL_SHORT, _types.GLushort: GL_1_1.GL_UNSIGNED_SHORT, _types.GLchar: GL_1_1.GL_CHAR, _types.GLbyte: GL_1_1.GL_BYTE, _types.GLubyte: GL_1_1.GL_UNSIGNED_BYTE, } GL_TYPE_TO_ARRAY_MAPPING = { GL_1_1.GL_DOUBLE: _types.GLdouble, GL_1_1.GL_FLOAT: _types.GLfloat, GL_1_1.GL_INT: _types.GLint, GL_1_1.GL_UNSIGNED_INT: _types.GLuint, GL_1_1.GL_SHORT: _types.GLshort, GL_1_1.GL_UNSIGNED_SHORT: _types.GLushort, GL_1_1.GL_CHAR: _types.GLchar, GL_1_1.GL_BYTE: _types.GLbyte, GL_1_1.GL_UNSIGNED_BYTE: _types.GLubyte, }
mit
jwheare/digest
lib/reportlab/graphics/widgets/markers.py
1
8838
#Copyright ReportLab Europe Ltd. 2000-2004 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgets/markers.py """ This modules defines a collection of markers used in charts. """ __version__=''' $Id: markers.py 3140 2007-09-13 10:17:45Z rgbecker $ ''' from types import FunctionType, ClassType from reportlab.graphics.shapes import Rect, Line, Circle, Polygon, Drawing, Group from reportlab.graphics.widgets.signsandsymbols import SmileyFace from reportlab.graphics.widgetbase import Widget from reportlab.lib.validators import isNumber, isColorOrNone, OneOf, Validator from reportlab.lib.attrmap import AttrMap, AttrMapValue from reportlab.lib.colors import black from reportlab.graphics.widgets.flags import Flag from math import sin, cos, pi import copy, new _toradians = pi/180.0 class Marker(Widget): '''A polymorphic class of markers''' _attrMap = AttrMap(BASE=Widget, kind = AttrMapValue( OneOf(None, 'Square', 'Diamond', 'Circle', 'Cross', 'Triangle', 'StarSix', 'Pentagon', 'Hexagon', 'Heptagon', 'Octagon', 'StarFive', 'FilledSquare', 'FilledCircle', 'FilledDiamond', 'FilledCross', 'FilledTriangle','FilledStarSix', 'FilledPentagon', 'FilledHexagon', 'FilledHeptagon', 'FilledOctagon', 'FilledStarFive', 'Smiley','ArrowHead', 'FilledArrowHead'), desc='marker type name'), size = AttrMapValue(isNumber,desc='marker size'), x = AttrMapValue(isNumber,desc='marker x coordinate'), y = AttrMapValue(isNumber,desc='marker y coordinate'), dx = AttrMapValue(isNumber,desc='marker x coordinate adjustment'), dy = AttrMapValue(isNumber,desc='marker y coordinate adjustment'), angle = AttrMapValue(isNumber,desc='marker rotation'), fillColor = AttrMapValue(isColorOrNone, desc='marker fill colour'), strokeColor = AttrMapValue(isColorOrNone, desc='marker stroke colour'), strokeWidth = AttrMapValue(isNumber, desc='marker stroke width'), arrowBarbDx = AttrMapValue(isNumber, desc='arrow only the delta x for the barbs'), arrowHeight = AttrMapValue(isNumber, desc='arrow only height'), ) def __init__(self,*args,**kw): self.setProperties(kw) self._setKeywords( kind = None, strokeColor = black, strokeWidth = 0.1, fillColor = None, size = 5, x = 0, y = 0, dx = 0, dy = 0, angle = 0, arrowBarbDx = -1.25, arrowHeight = 1.875, ) def clone(self): return new.instance(self.__class__,self.__dict__.copy()) def _Smiley(self): x, y = self.x+self.dx, self.y+self.dy d = self.size/2.0 s = SmileyFace() s.fillColor = self.fillColor s.strokeWidth = self.strokeWidth s.strokeColor = self.strokeColor s.x = x-d s.y = y-d s.size = d*2 return s def _Square(self): x, y = self.x+self.dx, self.y+self.dy d = self.size/2.0 s = Rect(x-d,y-d,2*d,2*d,fillColor=self.fillColor,strokeColor=self.strokeColor,strokeWidth=self.strokeWidth) return s def _Diamond(self): d = self.size/2.0 return self._doPolygon((-d,0,0,d,d,0,0,-d)) def _Circle(self): x, y = self.x+self.dx, self.y+self.dy s = Circle(x,y,self.size/2.0,fillColor=self.fillColor,strokeColor=self.strokeColor,strokeWidth=self.strokeWidth) return s def _Cross(self): x, y = self.x+self.dx, self.y+self.dy s = float(self.size) h, s = s/2, s/6 return self._doPolygon((-s,-h,-s,-s,-h,-s,-h,s,-s,s,-s,h,s,h,s,s,h,s,h,-s,s,-s,s,-h)) def _Triangle(self): x, y = self.x+self.dx, self.y+self.dy r = float(self.size)/2 c = 30*_toradians s = sin(30*_toradians)*r c = cos(c)*r return self._doPolygon((0,r,-c,-s,c,-s)) def _StarSix(self): r = float(self.size)/2 c = 30*_toradians s = sin(c)*r c = cos(c)*r z = s/2 g = c/2 return self._doPolygon((0,r,-z,s,-c,s,-s,0,-c,-s,-z,-s,0,-r,z,-s,c,-s,s,0,c,s,z,s)) def _StarFive(self): R = float(self.size)/2 r = R*sin(18*_toradians)/cos(36*_toradians) P = [] angle = 90 for i in xrange(5): for radius in R, r: theta = angle*_toradians P.append(radius*cos(theta)) P.append(radius*sin(theta)) angle = angle + 36 return self._doPolygon(P) def _Pentagon(self): return self._doNgon(5) def _Hexagon(self): return self._doNgon(6) def _Heptagon(self): return self._doNgon(7) def _Octagon(self): return self._doNgon(8) def _ArrowHead(self): s = self.size h = self.arrowHeight b = self.arrowBarbDx return self._doPolygon((0,0,b,-h,s,0,b,h)) def _doPolygon(self,P): x, y = self.x+self.dx, self.y+self.dy if x or y: P = map(lambda i,P=P,A=[x,y]: P[i] + A[i&1], range(len(P))) return Polygon(P, strokeWidth =self.strokeWidth, strokeColor=self.strokeColor, fillColor=self.fillColor) def _doFill(self): old = self.fillColor if old is None: self.fillColor = self.strokeColor r = (self.kind and getattr(self,'_'+self.kind[6:]) or Group)() self.fillColor = old return r def _doNgon(self,n): P = [] size = float(self.size)/2 for i in xrange(n): r = (2.*i/n+0.5)*pi P.append(size*cos(r)) P.append(size*sin(r)) return self._doPolygon(P) _FilledCircle = _doFill _FilledSquare = _doFill _FilledDiamond = _doFill _FilledCross = _doFill _FilledTriangle = _doFill _FilledStarSix = _doFill _FilledPentagon = _doFill _FilledHexagon = _doFill _FilledHeptagon = _doFill _FilledOctagon = _doFill _FilledStarFive = _doFill _FilledArrowHead = _doFill def draw(self): if self.kind: m = getattr(self,'_'+self.kind) if self.angle: _x, _dx, _y, _dy = self.x, self.dx, self.y, self.dy self.x, self.dx, self.y, self.dy = 0,0,0,0 try: m = m() finally: self.x, self.dx, self.y, self.dy = _x, _dx, _y, _dy if not isinstance(m,Group): _m, m = m, Group() m.add(_m) if self.angle: m.rotate(self.angle) x, y = _x+_dx, _y+_dy if x or y: m.shift(x,y) else: m = m() else: m = Group() return m def uSymbol2Symbol(uSymbol,x,y,color): if type(uSymbol) == FunctionType: symbol = uSymbol(x, y, 5, color) elif type(uSymbol) == ClassType and issubclass(uSymbol,Widget): size = 10. symbol = uSymbol() symbol.x = x - (size/2) symbol.y = y - (size/2) try: symbol.size = size symbol.color = color except: pass elif isinstance(uSymbol,Marker) or isinstance(uSymbol,Flag): symbol = uSymbol.clone() if isinstance(uSymbol,Marker): symbol.fillColor = symbol.fillColor or color symbol.x, symbol.y = x, y else: symbol = None return symbol class _isSymbol(Validator): def test(self,x): return callable(x) or isinstance(x,Marker) or isinstance(x,Flag) \ or (type(x)==ClassType and issubclass(x,Widget)) isSymbol = _isSymbol() def makeMarker(name,**kw): if Marker._attrMap['kind'].validate(name): m = apply(Marker,(),kw) m.kind = name elif name[-5:]=='_Flag' and Flag._attrMap['kind'].validate(name[:-5]): m = apply(Flag,(),kw) m.kind = name[:-5] m.size = 10 else: raise ValueError, "Invalid marker name %s" % name return m if __name__=='__main__': D = Drawing() D.add(Marker()) D.save(fnRoot='Marker',formats=['pdf'], outDir='/tmp')
bsd-3-clause
karamcnair/zulip
zerver/lib/unminify.py
5
1693
from __future__ import absolute_import import re import os.path import sourcemap from six.moves import map class SourceMap(object): '''Map (line, column) pairs from generated to source file.''' def __init__(self, sourcemap_dir): self._dir = sourcemap_dir self._indices = {} def _index_for(self, minified_src): '''Return the source map index for minified_src, loading it if not already loaded.''' if minified_src not in self._indices: with open(os.path.join(self._dir, minified_src + '.map')) as fp: self._indices[minified_src] = sourcemap.load(fp) return self._indices[minified_src] def annotate_stacktrace(self, stacktrace): out = '' for ln in stacktrace.splitlines(): out += ln + '\n' match = re.search(r'/static/min/(.+)(\.[0-9a-f]+)\.js:(\d+):(\d+)', ln) if match: # Get the appropriate source map for the minified file. minified_src = match.groups()[0] + '.js' index = self._index_for(minified_src) gen_line, gen_col = list(map(int, match.groups()[2:4])) # The sourcemap lib is 0-based, so subtract 1 from line and col. try: result = index.lookup(line=gen_line-1, column=gen_col-1) out += (' = %s line %d column %d\n' % (result.src, result.src_line+1, result.src_col+1)) except IndexError: out += ' [Unable to look up in source map]\n' if ln.startswith(' at'): out += '\n' return out
apache-2.0
RockySteveJobs/python-for-android
python-modules/twisted/twisted/conch/mixin.py
61
1365
# -*- test-case-name: twisted.conch.test.test_mixin -*- # Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """ Experimental optimization This module provides a single mixin class which allows protocols to collapse numerous small writes into a single larger one. @author: Jp Calderone """ from twisted.internet import reactor class BufferingMixin: """Mixin which adds write buffering. """ _delayedWriteCall = None bytes = None DELAY = 0.0 def schedule(self): return reactor.callLater(self.DELAY, self.flush) def reschedule(self, token): token.reset(self.DELAY) def write(self, bytes): """Buffer some bytes to be written soon. Every call to this function delays the real write by C{self.DELAY} seconds. When the delay expires, all collected bytes are written to the underlying transport using L{ITransport.writeSequence}. """ if self._delayedWriteCall is None: self.bytes = [] self._delayedWriteCall = self.schedule() else: self.reschedule(self._delayedWriteCall) self.bytes.append(bytes) def flush(self): """Flush the buffer immediately. """ self._delayedWriteCall = None self.transport.writeSequence(self.bytes) self.bytes = None
apache-2.0
zzeleznick/zDjango
venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.py
294
6173
# -*- coding: utf-8 -*- """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import os import re import time import hashlib import logging from base64 import b64encode from .compat import urlparse, str from .cookies import extract_cookies_to_jar from .utils import parse_dict_header log = logging.getLogger(__name__) CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): """Returns a Basic Auth string.""" return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1') class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password self.last_nonce = '' self.nonce_count = 0 self.chal = {} self.pos = None def build_digest_header(self, method, url): realm = self.chal['realm'] nonce = self.chal['nonce'] qop = self.chal.get('qop') algorithm = self.chal.get('algorithm') opaque = self.chal.get('opaque') if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) path = p_parsed.path if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 ncvalue = '%08x' % self.nonce_count s = str(self.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if qop is None: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base) def handle_401(self, r, **kwargs): """Takes the given response and tries digest-auth, if needed.""" if self.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self.pos) num_401_calls = getattr(self, 'num_401_calls', 1) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and num_401_calls < 2: setattr(self, 'num_401_calls', num_401_calls + 1) pat = re.compile(r'digest ', flags=re.IGNORECASE) self.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.raw.release_conn() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r setattr(self, 'num_401_calls', 1) return r def __call__(self, r): # If we have a saved nonce, skip the 401 if self.last_nonce: r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self.pos = r.body.tell() except AttributeError: pass r.register_hook('response', self.handle_401) return r
mit
phobson/statsmodels
statsmodels/tsa/statespace/tests/results/results_structural.py
7
7399
""" Results for SARIMAX tests Results from R, KFAS library using script `test_ucm.R`. See also Stata time series documentation. Author: Chad Fulton License: Simplified-BSD """ from numpy import pi irregular = { 'models': [ {'irregular': True}, {'level': 'irregular'}, {'level': 'ntrend'}, ], 'params': [36.74687342], 'llf': -653.8562525, 'kwargs': {} } # this model will issue a warning that there is no stochastic component, and # will then add an irregular component. Thus it's output will be just like # the "deterministic constant" model. fixed_intercept = { 'models': [ {'level': True}, {'level': 'fixed intercept'}, ], 'params': [2.127438969], 'llf': -365.5289923, 'kwargs': {} } deterministic_constant = { 'models': [ {'irregular': True, 'level': True}, {'level': 'deterministic constant'}, {'level': 'dconstant'}, ], 'params': [2.127438969], 'llf': -365.5289923, 'kwargs': {} } local_level = { 'models': [ {'irregular': True, 'level': True, 'stochastic_level': True}, {'level': 'local level'}, {'level': 'llevel'} ], 'params': [4.256647886e-06, 1.182078808e-01], 'llf': -70.97242557, 'kwargs': {} } random_walk = { 'models': [ {'level': True, 'stochastic_level': True}, {'level': 'random walk'}, {'level': 'rwalk'}, ], 'params': [0.1182174646], 'llf': -70.96771641, 'kwargs': {} } # this model will issue a warning that there is no stochastic component, and # will then add an irregular component. Thus it's output will be just like # the "deterministic trend" model. fixed_slope = { 'models': [ {'level': True, 'trend': True}, {'level': 'fixed slope'}, ], 'params': [2.134137554], 'llf': -370.7758666, 'kwargs': {} } deterministic_trend = { 'models': [ {'irregular': True, 'level': True, 'trend': True}, {'level': 'deterministic trend'}, {'level': 'dtrend'}, ], 'params': [2.134137554], 'llf': -370.7758666, 'kwargs': {} } local_linear_deterministic_trend = { 'models': [ {'irregular': True, 'level': True, 'stochastic_level': True, 'trend': True}, {'level': 'local linear deterministic trend'}, {'level': 'lldtrend'}, ], 'params': [4.457592057e-06, 1.184455029e-01], 'llf': -73.47291031, 'kwargs': {} } random_walk_with_drift = { 'models': [ {'level': True, 'stochastic_level': True, 'trend': True}, {'level': 'random walk with drift'}, {'level': 'rwdrift'}, ], 'params': [0.1184499547], 'llf': -73.46798576, 'kwargs': {} } local_linear_trend = { 'models': [ {'irregular': True, 'level': True, 'stochastic_level': True, 'trend': True, 'stochastic_trend': True}, {'level': 'local linear trend'}, {'level': 'lltrend'} ], 'params': [1.339852549e-06, 1.008704925e-02, 6.091760810e-02], 'llf': -31.15640107, 'kwargs': {} } smooth_trend = { 'models': [ {'irregular': True, 'level': True, 'trend': True, 'stochastic_trend': True}, {'level': 'smooth trend'}, {'level': 'strend'}, ], 'params': [0.0008824099119, 0.0753064234342], 'llf': -31.92261408, 'kwargs': {} } random_trend = { 'models': [ {'level': True, 'trend': True, 'stochastic_trend': True}, {'level': 'random trend'}, {'level': 'rtrend'}, ], 'params': [0.08054724989], 'llf': -32.05607557, 'kwargs': {} } cycle = { 'models': [{'irregular': True, 'cycle': True, 'stochastic_cycle': True, 'damped_cycle': True}], 'params': [37.57197224, 0.1, 2*pi/10, 1], 'llf': -672.3102588, 'kwargs': { # Required due to the way KFAS estimated loglikelihood which P1inf is # set in the R code 'loglikelihood_burn': 0 } } seasonal = { 'models': [{'irregular': True, 'seasonal': 4}], 'params': [38.1704278, 0.1], 'llf': -655.3337155, 'kwargs': {}, 'rtol': 1e-6 } reg = { # Note: The test needs to fill in exog=np.log(dta['realgdp']) 'models': [ {'irregular': True, 'exog': True, 'mle_regression': False}, {'level': 'irregular', 'exog': True, 'mle_regression': False}, {'level': 'ntrend', 'exog': True, 'mle_regression': False}, {'level': 'ntrend', 'exog': 'numpy', 'mle_regression': False}, ], 'params': [2.215447924], 'llf': -379.6233483, 'kwargs': { # Required due to the way KFAS estimated loglikelihood which P1inf is # set in the R code 'loglikelihood_burn': 0 } } rtrend_ar1 = { 'models': [ {'level': True, 'trend': True, 'stochastic_trend': True, 'autoregressive': 1}, {'level': 'random trend', 'autoregressive': 1}, {'level': 'rtrend', 'autoregressive': 1} ], 'params': [0.0609, 0.0097, 0.9592], 'llf': -31.15629379, 'kwargs': {} } lltrend_cycle_seasonal_reg_ar1 = { # Note: The test needs to fill in exog=np.log(dta['realgdp']) 'models': [ # Complete specification {'irregular': True, 'level': True, 'stochastic_level': True, 'trend': True, 'stochastic_trend': True, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': True, 'mle_regression': False}, # Verbose string specification {'level': 'local linear trend', 'autoregressive': 1, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': True, 'mle_regression': False}, # Abbreviated string specification {'level': 'lltrend', 'autoregressive': 1, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': True, 'mle_regression': False}, # Numpy exog dataset {'level': 'lltrend', 'autoregressive': 1, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': 'numpy', 'mle_regression': False,}, # Annual frequency dataset {'level': 'lltrend', 'autoregressive': 1, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': True, 'mle_regression': False, 'freq':'AS'}, # Quarterly frequency dataset {'level': 'lltrend', 'autoregressive': 1, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': True, 'mle_regression': False, 'freq':'QS'}, # Monthly frequency dataset {'level': 'lltrend', 'autoregressive': 1, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': True, 'mle_regression': False, 'freq':'MS'}, # Minutely frequency dataset {'level': 'lltrend', 'autoregressive': 1, 'cycle': True, 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1, 'exog': True, 'mle_regression': False, 'freq':'T', 'cycle_period_bounds': (1.5*12, 12*12)}, ], 'params': [0.0001, 0.01, 0.06, 0.0001, 0.0001, 0.1, 2*pi / 10, 0.2], 'llf': -168.5258709, 'kwargs': { # Required due to the way KFAS estimated loglikelihood which P1inf is # set in the R code 'loglikelihood_burn': 0 } }
bsd-3-clause
themartorana/django-hoptoad
hoptoad/api/htv1.py
1
5326
import traceback import urllib2 import yaml from django.views.debug import get_safe_settings from django.conf import settings from hoptoad import get_hoptoad_settings PROTECTED_PARAMS = frozenset(get_hoptoad_settings() .get('HOPTOAD_PROTECTED_PARAMS', [])) def _parse_environment(request): """Return an environment mapping for a notification from the given request. """ env = dict((str(k), str(v)) for (k, v) in get_safe_settings().items()) env.update(dict((str(k), str(v)) for (k, v) in request.META.items())) env['REQUEST_URI'] = request.build_absolute_uri() return env def _parse_traceback(trace): """Return the given traceback string formatted for a notification.""" p_traceback = ["%s:%d:in '%s'" % (filename, lineno, funcname) for filename, lineno, funcname, _ in traceback.extract_tb(trace)] p_traceback.reverse() return p_traceback def _parse_message(exc): """Return a message for a notification from the given exception.""" return '%s: %s' % (exc.__class__.__name__, unicode(exc)) def _parse_request(request): """Return a request mapping for a notification from the given request.""" data = [] for (k, v) in request.POST.items(): try: data.append((str(k), str(v.encode('ascii', 'replace')))) except UnicodeEncodeError: data.append((str(k), repr(v))) if not data: for (k, v) in request.GET.items(): try: data.append((str(k), str(v.encode('ascii', 'replace')))) except UnicodeEncodeError: data.append((str(k), repr(v))) data = dict(data) for k in PROTECTED_PARAMS.intersection(data.keys()): data[k] = '********************' return data def _parse_session(session): """Return a request mapping for a notification from the given session.""" try: session_keys = session.keys() except Exception: # It is possible, especially if you're using a 100% ACID-compliant # database, that an exception was thrown and the database has # stopped processing any further transactions without a rollback # issued. # # It shouldn't be the job of a middleware instance to issue a # rollback, so, we will just return an empty dictionary with the # error messages return {"SessionInfo": "Couldn't extract session because the database " "had a failed transaction. "} else: if not session_keys: return {"SessionInfo": "No session information could be extracted"} try: session_items = session.items() except Exception: # Same reasoning as above for the session keys return {"SessionInfo": "Couldn't extract session because the database " "had a failed transaction. "} else: return dict((str(k), str(v)) for (k, v) in session_items) def _generate_payload(request, exc=None, trace=None, message=None, error_class=None): """Generate a YAML payload for a Hoptoad notification. Parameters: request -- A Django HTTPRequest. This is required. Keyword parameters: exc -- A Python Exception object. If this is not given the mess parameter must be. trace -- A Python Traceback object. This is not required. message -- A string representing the error message. If this is not given, the exc parameter must be. error_class -- A string representing the error class. If this is not given the excc parameter must be. """ p_message = message if message else _parse_message(exc) p_error_class = error_class if error_class else exc.__class__.__name__ p_traceback = _parse_traceback(trace) if trace else [] p_environment = _parse_environment(request) p_request = _parse_request(request) p_session = _parse_session(request.session) return yaml.dump({'notice': {'api_key': settings.HOPTOAD_API_KEY, 'error_class': p_error_class, 'error_message': p_message, 'backtrace': p_traceback, 'request': {'url': request.build_absolute_uri(), 'params': p_request}, 'session': {'key': '', 'data': p_session}, 'environment': p_environment, }}, default_flow_style=False) def _ride_the_toad(payload, timeout): """Send a notification (an HTTP POST request) to Hoptoad. Parameters: payload -- the YAML payload for the request from _generate_payload() timeout -- the maximum timeout, in seconds, or None to use the default """ headers = {'Content-Type': 'application/x-yaml', 'Accept': 'text/xml, application/xml', } r = urllib2.Request('http://hoptoadapp.com/notices', payload, headers) try: if timeout: urllib2.urlopen(r, timeout=timeout) else: urllib2.urlopen(r) except urllib2.URLError: pass def report(payload, timeout): return _ride_the_toad(payload, timeout)
mit
alheinecke/tensorflow-xsmm
tensorflow/python/training/moving_averages_test.py
73
15366
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for moving_averages.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import moving_averages class MovingAveragesTest(test.TestCase): def testAssignMovingAverageWithoutZeroDebias(self): with self.test_session(): var = variables.Variable([10.0, 11.0]) val = constant_op.constant([1.0, 2.0], dtypes.float32) decay = 0.25 assign = moving_averages.assign_moving_average( var, val, decay, zero_debias=False) variables.global_variables_initializer().run() self.assertAllClose([10.0, 11.0], var.eval()) assign.op.run() self.assertAllClose( [10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)], var.eval()) def testAssignMovingAverage(self): with self.test_session(): var = variables.Variable([0.0, 0.0]) val = constant_op.constant([1.0, 2.0], dtypes.float32) decay = 0.25 assign = moving_averages.assign_moving_average(var, val, decay) variables.global_variables_initializer().run() self.assertAllClose([0.0, 0.0], var.eval()) assign.op.run() self.assertAllClose([ 1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25) ], var.eval()) def testWeightedMovingAverage(self): with self.test_session() as sess: decay = 0.5 weight = array_ops.placeholder(dtypes.float32, []) val = array_ops.placeholder(dtypes.float32, []) wma = moving_averages.weighted_moving_average(val, decay, weight) variables.global_variables_initializer().run() # Get the first weighted moving average. val_1 = 3.0 weight_1 = 4.0 wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1}) numerator_1 = val_1 * weight_1 * (1.0 - decay) denominator_1 = weight_1 * (1.0 - decay) self.assertAllClose(numerator_1 / denominator_1, wma_array) # Get the second weighted moving average. val_2 = 11.0 weight_2 = 22.0 wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2}) numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay) denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay) self.assertAllClose(numerator_2 / denominator_2, wma_array) def _Repeat(value, dim): if dim == 1: return value return [value] * dim class ExponentialMovingAverageTest(test.TestCase): def _CheckDecay(self, ema, actual_decay, dim): def _Scale(dk, steps): if ema._zero_debias: return 1 - dk**steps else: return 1 tens = _Repeat(10.0, dim) thirties = _Repeat(30.0, dim) var0 = variables.Variable(tens, name="v0") var1 = variables.Variable(thirties, name="v1") variables.global_variables_initializer().run() # Note that tensor2 is not a Variable but just a plain Tensor resulting # from the sum operation. tensor2 = var0 + var1 update = ema.apply([var0, var1, tensor2]) avg0 = ema.average(var0) avg1 = ema.average(var1) avg2 = ema.average(tensor2) self.assertItemsEqual([var0, var1], variables.moving_average_variables()) self.assertFalse(avg0 in variables.trainable_variables()) self.assertFalse(avg1 in variables.trainable_variables()) self.assertFalse(avg2 in variables.trainable_variables()) variables.global_variables_initializer().run() self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name) self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name) self.assertEqual("add/ExponentialMovingAverage:0", avg2.name) # Check initial values. self.assertAllClose(tens, var0.eval()) self.assertAllClose(thirties, var1.eval()) self.assertAllClose(_Repeat(10.0 + 30.0, dim), tensor2.eval()) # Check that averages are initialized correctly. self.assertAllClose(tens, avg0.eval()) self.assertAllClose(thirties, avg1.eval()) # Note that averages of Tensor's initialize to zeros_like since no value # of the Tensor is known because the Op has not been run (yet). self.assertAllClose(_Repeat(0.0, dim), avg2.eval()) # Update the averages and check. update.run() dk = actual_decay expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim) self.assertAllClose(expected, avg0.eval()) expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim) self.assertAllClose(expected, avg1.eval()) expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk) / _Scale(dk, 1), dim) self.assertAllClose(expected, avg2.eval()) # Again, update the averages and check. update.run() expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk), dim) self.assertAllClose(expected, avg0.eval()) expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk), dim) self.assertAllClose(expected, avg1.eval()) expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk + (10.0 + 30.0) * (1 - dk)) / _Scale(dk, 2), dim) self.assertAllClose(expected, avg2.eval()) def testAverageVariablesNoNumUpdates_Scalar(self): with self.test_session(): ema = moving_averages.ExponentialMovingAverage(0.25) self._CheckDecay(ema, actual_decay=0.25, dim=1) def testAverageVariablesNoNumUpdates_Scalar_Debias(self): with self.test_session(): ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True) self._CheckDecay(ema, actual_decay=0.25, dim=1) def testAverageVariablesNoNumUpdates_Vector(self): with self.test_session(): ema = moving_averages.ExponentialMovingAverage(0.25) self._CheckDecay(ema, actual_decay=0.25, dim=5) def testAverageVariablesNoNumUpdates_Vector_Debias(self): with self.test_session(): ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True) self._CheckDecay(ema, actual_decay=0.25, dim=5) def testAverageVariablesNumUpdates_Scalar(self): with self.test_session(): # With num_updates 1, the decay applied is 0.1818 ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1) self._CheckDecay(ema, actual_decay=0.181818, dim=1) def testAverageVariablesNumUpdates_Scalar_Debias(self): with self.test_session(): # With num_updates 1, the decay applied is 0.1818 ema = moving_averages.ExponentialMovingAverage( 0.25, num_updates=1, zero_debias=True) self._CheckDecay(ema, actual_decay=0.181818, dim=1) def testAverageVariablesNumUpdates_Vector(self): with self.test_session(): # With num_updates 1, the decay applied is 0.1818 ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1) self._CheckDecay(ema, actual_decay=0.181818, dim=5) def testAverageVariablesNumUpdates_Vector_Debias(self): with self.test_session(): # With num_updates 1, the decay applied is 0.1818 ema = moving_averages.ExponentialMovingAverage( 0.25, num_updates=1, zero_debias=True) self._CheckDecay(ema, actual_decay=0.181818, dim=5) def testAverageVariablesWithControlDeps(self): with self.test_session() as sess: v0 = variables.Variable(0, name="v0") add_to_v0 = v0.assign_add(1) v1 = variables.Variable([10.0], name="v1") assign_to_v1 = v1.assign([20.0]) ema = moving_averages.ExponentialMovingAverage(0.25) with ops.control_dependencies([add_to_v0]): ema_op = ema.apply([v1]) # the moving average of v1 should not have any control inputs v1_avg = ema.average(v1) self.assertEqual([], v1_avg.initializer.control_inputs) self.assertEqual([], v1_avg.value().op.control_inputs) self.assertEqual([], v1_avg.value().op.control_inputs) # We should be able to initialize v1_avg before v0. sess.run(v1_avg.initializer) sess.run(v0.initializer) self.assertEqual([10.0], sess.run(v1_avg)) # running ema_op should add to v0 (in addition to updating v1_avg) sess.run(assign_to_v1) sess.run(ema_op) self.assertEqual(1, sess.run(v0)) self.assertEqual([17.5], sess.run(v1_avg)) def averageVariablesNamesHelper(self, zero_debias): with self.test_session(): v0 = variables.Variable(10.0, name="v0") v1 = variables.Variable(30.0, name="v1") # Add a non-trainable variable. v2 = variables.Variable(20.0, name="v2", trainable=False) tensor2 = v0 + v1 ema = moving_averages.ExponentialMovingAverage( 0.25, zero_debias=zero_debias, name="foo") self.assertEqual("v0/foo", ema.average_name(v0)) self.assertEqual("v1/foo", ema.average_name(v1)) self.assertEqual("add/foo", ema.average_name(tensor2)) ema.apply([v0, v1, tensor2]) vars_to_restore = ema.variables_to_restore() # vars_to_restore should contain the following: # {v0/foo : v0, # v1/foo : v1, # add/foo : add/foo, # v2 : v2} expected_names = [ ema.average_name(v0), ema.average_name(v1), ema.average_name(tensor2), v2.op.name ] if zero_debias: # vars_to_restore should also contain the following: # {add/foo/biased: add/foo/biased, # add/foo/local_step: add/foo/local_step} expected_names += [ ema.average_name(tensor2) + "/biased", ema.average_name(tensor2) + "/local_step" ] self.assertEqual(sorted(vars_to_restore.keys()), sorted(expected_names)) self.assertEqual(ema.average_name(v0), ema.average(v0).op.name) self.assertEqual(ema.average_name(v1), ema.average(v1).op.name) self.assertEqual(ema.average_name(tensor2), ema.average(tensor2).op.name) def testAverageVariablesNames(self): self.averageVariablesNamesHelper(zero_debias=True) def testAverageVariablesNamesNoDebias(self): self.averageVariablesNamesHelper(zero_debias=False) def averageVariablesNamesRespectScopeHelper(self, zero_debias): # See discussion on #2740. with self.test_session(): with variable_scope.variable_scope("scope1"): v0 = variables.Variable(10.0, name="v0") v1 = variables.Variable(30.0, name="v1") # Add a non-trainable variable. v2 = variables.Variable(20.0, name="v2", trainable=False) tensor2 = v0 + v1 with variable_scope.variable_scope("scope2"): ema = moving_averages.ExponentialMovingAverage( 0.25, zero_debias=zero_debias, name="foo") self.assertEqual("scope2/scope1/v0/foo", ema.average_name(v0)) self.assertEqual("scope2/scope1/v1/foo", ema.average_name(v1)) self.assertEqual("scope2/scope1/add/foo", ema.average_name(tensor2)) ema.apply([v0, v1, tensor2]) vars_to_restore = ema.variables_to_restore() # vars_to_restore should contain the following: # {scope2/scope1/v0/foo : v0, # scope2/scope1/v1/foo : v1, # scope2/scope1/add/foo : add/foo, # scope1/v2 : v2} expected_names = [ ema.average_name(v0), ema.average_name(v1), ema.average_name(tensor2), v2.op.name ] if zero_debias: # vars_to_restore should also contain the following: # {scope2/scope2/scope1/add/foo/biased: add/foo/biased, # scope2/scope2/scope1/add/foo/local_step: add/foo/local_step} sc = "scope2/" expected_names += [ sc + ema.average_name(tensor2) + "/biased", sc + ema.average_name(tensor2) + "/local_step" ] self.assertEqual(sorted(vars_to_restore.keys()), sorted(expected_names)) self.assertEqual(ema.average_name(v0), ema.average(v0).op.name) self.assertEqual(ema.average_name(v1), ema.average(v1).op.name) self.assertEqual( ema.average_name(tensor2), ema.average(tensor2).op.name) def testAverageVariablesNamesRespectScope(self): self.averageVariablesNamesRespectScopeHelper(zero_debias=True) def testAverageVariablesNamesRespectScopeNoDebias(self): self.averageVariablesNamesRespectScopeHelper(zero_debias=False) def testSubsetAverageVariablesNames(self): with self.test_session(): v0 = variables.Variable(10.0, name="v0") v1 = variables.Variable(30.0, name="v1") # Add a non-trainable variable. v2 = variables.Variable(20.0, name="v2", trainable=False) tensor2 = v0 + v1 ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg") self.assertEqual("v0/foo_avg", ema.average_name(v0)) self.assertEqual("v1/foo_avg", ema.average_name(v1)) self.assertEqual("add/foo_avg", ema.average_name(tensor2)) vars_to_restore = ema.variables_to_restore([v0, tensor2]) # vars_to_restore should contain the following: # {v0/foo_avg : v0, # add/foo_avg : add # v1 : v1, # v2 : v2} self.assertEqual( sorted(vars_to_restore.keys()), sorted([ ema.average_name(v0), ema.average_name(tensor2), v1.op.name, v2.op.name ])) ema.apply([v0, v1, tensor2]) self.assertEqual(ema.average_name(v0), ema.average(v0).op.name) self.assertEqual(ema.average_name(v1), ema.average(v1).op.name) self.assertEqual(ema.average_name(tensor2), ema.average(tensor2).op.name) def testAverageVariablesDeviceAssignment(self): with ops.device("/job:dev_v0"): v0 = variables.Variable(10.0, name="v0") with ops.device("/job:dev_v1"): v1 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="v1", container="", shared_name="") v1.set_shape([1]) tensor2 = v0 + v1 ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg") with ops.device("/job:default"): ema.apply([v0, v1, tensor2]) self.assertDeviceEqual("/job:dev_v0", ema.average(v0).device) self.assertDeviceEqual("/job:dev_v1", ema.average(v1).device) # However, the colocation property is maintained. self.assertEqual([b"loc:@v1"], ema.average(v1).op.colocation_groups()) self.assertDeviceEqual("/job:default", ema.average(tensor2).device) if __name__ == "__main__": test.main()
apache-2.0
nathaniel-mahieu/bitcoin
qa/rpc-tests/getblocktemplate_longpoll.py
103
2976
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import threading class LongpollThread(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) # query current longpollid templat = node.getblocktemplate() self.longpollid = templat['longpollid'] # create a new connection to the node, we can't use the same # connection from two threads self.node = get_rpc_proxy(node.url, 1, timeout=600) def run(self): self.node.getblocktemplate({'longpollid':self.longpollid}) class GetBlockTemplateLPTest(BitcoinTestFramework): ''' Test longpolling with getblocktemplate. ''' def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def run_test(self): print("Warning: this test will take about 70 seconds in the best case. Be patient.") self.nodes[0].generate(10) templat = self.nodes[0].getblocktemplate() longpollid = templat['longpollid'] # longpollid should not change between successive invocations if nothing else happens templat2 = self.nodes[0].getblocktemplate() assert(templat2['longpollid'] == longpollid) # Test 1: test that the longpolling wait if we do nothing thr = LongpollThread(self.nodes[0]) thr.start() # check that thread still lives thr.join(5) # wait 5 seconds or until thread exits assert(thr.is_alive()) # Test 2: test that longpoll will terminate if another node generates a block self.nodes[1].generate(1) # generate a block on another node # check that thread will exit now that new transaction entered mempool thr.join(5) # wait 5 seconds or until thread exits assert(not thr.is_alive()) # Test 3: test that longpoll will terminate if we generate a block ourselves thr = LongpollThread(self.nodes[0]) thr.start() self.nodes[0].generate(1) # generate a block on another node thr.join(5) # wait 5 seconds or until thread exits assert(not thr.is_alive()) # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll thr = LongpollThread(self.nodes[0]) thr.start() # generate a random transaction and submit it (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20) # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned thr.join(60 + 20) assert(not thr.is_alive()) if __name__ == '__main__': GetBlockTemplateLPTest().main()
mit
dunkhong/grr
grr/core/grr_response_core/lib/rdfvalues/chipsec_types.py
2
1327
#!/usr/bin/env python """RDFValues used to communicate with Chipsec.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_proto import chipsec_pb2 class DumpFlashImageRequest(rdf_structs.RDFProtoStruct): """A request to Chipsec to dump the flash image (BIOS).""" protobuf = chipsec_pb2.DumpFlashImageRequest class DumpFlashImageResponse(rdf_structs.RDFProtoStruct): """A response from Chipsec to dump the flash image (BIOS).""" protobuf = chipsec_pb2.DumpFlashImageResponse rdf_deps = [ rdf_paths.PathSpec, ] class ACPITableData(rdf_structs.RDFProtoStruct): """Response from Chipsec for one ACPI table.""" protobuf = chipsec_pb2.ACPITableData rdf_deps = [ rdfvalue.RDFBytes, ] class DumpACPITableRequest(rdf_structs.RDFProtoStruct): """A request to Chipsec to dump an ACPI table.""" protobuf = chipsec_pb2.DumpACPITableRequest class DumpACPITableResponse(rdf_structs.RDFProtoStruct): """A response from Chipsec to dump an ACPI table.""" protobuf = chipsec_pb2.DumpACPITableResponse rdf_deps = [ ACPITableData, ]
apache-2.0
fabianmurariu/FrameworkBenchmarks
frameworks/Python/wheezyweb/app.py
20
4676
import os import sys from functools import partial from operator import attrgetter from random import randint import bleach from wheezy.http import HTTPResponse from wheezy.http import WSGIApplication from wheezy.routing import url from wheezy.web.handlers import BaseHandler from wheezy.web.middleware import bootstrap_defaults from wheezy.web.middleware import path_routing_middleware_factory from wheezy.template.engine import Engine from wheezy.template.ext.core import CoreExtension from wheezy.template.loader import FileLoader from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine, Column from sqlalchemy.types import String, Integer, Unicode from sqlalchemy.orm import sessionmaker from meinheld import server DBDRIVER = 'mysql' DBHOSTNAME = os.environ.get('DBHOST', 'localhost') DATABASE_URI = '%s://benchmarkdbuser:benchmarkdbpass@%s:3306/hello_world?charset=utf8' % (DBDRIVER, DBHOSTNAME) Base = declarative_base() db_engine = create_engine(DATABASE_URI) Session = sessionmaker(bind=db_engine) db_session = Session() if sys.version_info[0] == 3: xrange = range def getQueryNum(queryString): try: int(queryString) return int(queryString) except ValueError: return 1 class Fortune(Base): __tablename__ = "Fortune" id = Column(Integer, primary_key=True) message = Column(String) def serialize(self): return { 'id': self.id, 'randomNumber': self.randomNumber, } class World(Base): __tablename__ = "World" id = Column(Integer, primary_key=True) randomNumber = Column(Integer) def serialize(self): return { 'id': self.id, 'randomNumber': self.randomNumber, } class JsonHandler(BaseHandler): def get(self): response = self.json_response({"message": "Hello, World!"}) response.headers = [("Content-Type", "application/json; charset=UTF-8")] return response class DbHandler(BaseHandler): def get(self): db_engine.connect() wid = randint(1, 10000) world = db_session.query(World).get(wid).serialize() return self.json_response(world) class QueriesHandler(BaseHandler): def get(self): queries = self.request.get_param("queries") num_queries = getQueryNum(queries) if num_queries < 1: num_queries = 1 if num_queries > 500: num_queries = 500 rp = partial(randint, 1, 10000) get = db_session.query(World).get worlds = [get(rp()).serialize() for _ in xrange(num_queries)] return self.json_response(worlds) class UpdatesHandler(BaseHandler): def get(self): queries = self.request.get_param("queries") num_queries = getQueryNum(queries) if num_queries < 1: num_queries = 1 if num_queries > 500: num_queries = 500 worlds = [] rp = partial(randint, 1, 10000) ids = [rp() for _ in xrange(num_queries)] ids.sort() # To avoid deadlock for id in ids: world = db_session.query(World).get(id) world.randomNumber = rp() worlds.append(world.serialize()) db_session.commit() return self.json_response(worlds) class FortuneHandler(BaseHandler): def get(self): fortunes = db_session.query(Fortune).all() fortunes.append(Fortune(id=0, message="Additional fortune added at request time.")) fortunes.sort(key=attrgetter("message")) engine = Engine(loader=FileLoader(["views"]), extensions=[CoreExtension()]) template = engine.get_template("fortune.html") for f in fortunes: f.message = bleach.clean(f.message) template_html = template.render({"fortunes": fortunes}) response = HTTPResponse() response.write(template_html) return response def plaintext(request): response = HTTPResponse() response.headers = [("Content-Type", "text/plain; charset=UTF-8")] response.write("Hello, world!") return response all_urls = [ url("plaintext", plaintext, name="plaintext"), url("json", JsonHandler, name="json"), url("db", DbHandler, name="db"), url("queries", QueriesHandler, name="queries"), url("updates", UpdatesHandler, name="updates"), url("fortune", FortuneHandler, name="fortune") ] options = {} app = WSGIApplication( middleware = [ bootstrap_defaults(url_mapping=all_urls), path_routing_middleware_factory ], options = options ) if __name__ == '__main__': server.listen(("127.0.0.1", 8080)) server.run(app)
bsd-3-clause
kjchalup/neural_networks
neural_networks/mtn.py
1
7773
""" Multi-task networks. """ import numpy as np import tensorflow as tf from neural_networks import nn from neural_networks import scalers class MTN(nn.NN): def __init__(self, x_dim, y_dim, arch=[128, 128], ntype='plain', **kwargs): """ A multi-task network. The output is a concatenation of the outputs for all n_task tasks. Let the tasks have output dimensionalities y1, ..., yn. The input then consists of: 1) A task-flag section: a bit vector of length sum(yi), containing zeros everywhere except for coordinates corresponding to the task of the current input (where the bits are set to 1). 2) The true input, which must have the same dimensionality for all tasks. These two input parts should be concatenated. """ super().__init__(x_dim, y_dim, arch=arch, ntype=ntype, **kwargs) def define_loss(self): x_dim = self.x_tf.get_shape().as_list()[1] y_dim = self.y_tf.get_shape().as_list()[1] return tf.losses.mean_squared_error( self.y_tf, self.y_pred * self.x_tf[:, :y_dim]) def define_scalers(self): xscale = scalers.HalfScaler(ignore_dim=self.y_dim) yscale = scalers.StandardScaler() return xscale, yscale def make_data(x, y, n_data, noise_std, degree=3): """ Create a 'dataset' outputs: a random polynomial over noisy MNIST labels. Args: x (n_samples, 28**2): MNIST images. y (n_samples, 1): MNIST labels. n_data (int): Extract a subset of this size. noise_std (float): Standard deviation of Gaussian noise to be added to the labels. degree (int): Degree of the polynomial whose random coefficients will define this dataset. Returns: x, y: The dataset. """ y_orig = np.array(y) while True: n_samples = x.shape[0] data_ids = np.random.choice(n_samples, n_data, replace=False) coeffs = np.random.rand(degree) * 2 y = np.sum(np.array([coeffs[i] * y_orig**i for i in range(degree)]), axis=0).reshape(-1, 1) y += np.random.rand(*y.shape) * noise_std yield (x[data_ids], y[data_ids]) def concatenate_tasks(X, Y, task_start, task_end, samples, n_test): """ Given a list of X and Y data, extract sublists and concatenate into one dataset. Args: X (List((n_samples, x_dim))): Input data. Y (List((n_samples, y_dim))): Output data. task_start (int): First dataset to extract. task_end (int): Last dataset to extract. samples (int): Number of training samples to extract from each dataset. n_test (int): Number of test samples to extract from each data. Returns: X_multi, Y_multi: Training data, concatenated datasets between task_start and task_end. X_test, Y_test: As above, but test data. """ n_tasks = task_end - task_start X_multi = np.zeros((samples * n_tasks, n_tasks + dim)) Y_multi = np.zeros((samples * n_tasks, n_tasks)) X_test = np.zeros((n_test * n_tasks, n_tasks + dim)) Y_test = np.zeros((n_test * n_tasks, n_tasks)) for task_id_id, task_id in enumerate(range(task_start, task_end)): X_multi[task_id_id*samples : (task_id_id+1)*samples, task_id_id:task_id_id+1] = 1. data = np.array(X[task_id]) X_multi[task_id_id*samples : (task_id_id+1)*samples, n_tasks:] = data[:samples] Y_multi[task_id_id*samples : (task_id_id+1)*samples, task_id_id:task_id_id+1] = Y[task_id_id][:samples] X_test[task_id_id*n_test : (task_id_id+1)*n_test, task_id_id:task_id_id+1] = 1. data = np.array(X[task_id]) X_test[task_id_id*n_test : (task_id_id+1)*n_test, n_tasks:] = data[samples:] Y_test[task_id_id*n_test : (task_id_id+1)*n_test, task_id_id:task_id+1] = Y[task_id_id][samples:] return X_multi, Y_multi, X_test, Y_test if __name__=="__main__": """ Check that everything works as expected. """ print('===============================================================') print('Evaluating MTN. Takes about 30min on a Titan X machine.') print('===============================================================') import numpy as np from tensorflow.examples.tutorials.mnist import input_data import matplotlib.pyplot as plt # Load MNIST data. mnist = input_data.read_data_sets("MNIST_data/", one_hot=False) mY = mnist.train.labels mX = mnist.train.images.reshape(mY.shape[0], -1) dim = mX.shape[1] # Fix task and nn parameters. n_task_list = [1, 2, 4, 8, 16, 32] max_tasks = max(n_task_list) samples = 100 noise_std = .1 n_test = 10000 kwargs = { 'arch': [32]*30, #[32] * 30, 'ntype': 'highway', 'batch_size': 32, 'lr': 1e-4, 'valsize': .3, 'epochs': 10000 } kwargs = {} # Make data: X is a list of datasets, each with the same coordinates # but potentially 1) different sample sizes and # 2) different output tasks in Y. np.random.seed(1) data = make_data(mX, mY, samples + n_test, noise_std) X, Y = zip(*[next(data) for _ in range(max_tasks)]) errs_mtn = np.zeros((len(n_task_list), max_tasks)) for n_tasks_id, n_tasks in enumerate(n_task_list): print('=' * 70) print('Starting {}-split training'.format(n_tasks)) print('=' * 70) # Run multi-task prediction on a group of n_tasks tasks. for task_start in range(0, max_tasks, n_tasks): print('task_start = {}'.format(task_start)) X_multi, Y_multi, X_test, Y_test = concatenate_tasks( X, Y, task_start, task_start + n_tasks, samples, n_test) # Create the Tensorflow graph. mtnet = MTN(x_dim=X_multi.shape[1], y_dim=n_tasks, **kwargs) with tf.Session() as sess: # Define the Tensorflow session, and its initializer op. sess.run(tf.global_variables_initializer()) # Fit the net. mtnet.fit(X_multi, Y_multi, sess=sess, nn_verbose=True, **kwargs) # Run the prediction on the task-group for task_id in range(n_tasks): mtnpred = mtnet.predict( X_test[task_id*n_test:(task_id+1)*n_test], sess=sess) mtnpred = mtnpred[:, task_id:task_id+1] errs_mtn[n_tasks_id, task_start+task_id] = ( np.sqrt(np.mean(( mtnpred - Y_test[task_id*n_test:(task_id+1)*n_test, task_id:task_id+1])**2))) # Reset the neural net graph. tf.reset_default_graph() print('Done\n.') vmax = np.max(errs_mtn) plt.figure(figsize=(10, 10)) plt.subplot(2, 1, 1) barw = .8 / len(n_task_list) for n_tasks_id in range(len(n_task_list)): plt.title('MTN performance as number of tasks grows'.format( n_task_list[n_tasks_id])) plt.xlabel('task ID') plt.ylabel('error') plt.bar(np.arange(max_tasks) + n_tasks_id * barw, width=barw, height=errs_mtn[n_tasks_id], label='{}'.format(n_task_list[n_tasks_id])) plt.ylim([0, vmax]) plt.legend(loc=0) plt.subplot(2, 1, 2) plt.xlabel('n_tasks') plt.ylabel('average error') plt.plot(n_task_list, errs_mtn.mean(axis=1), 'o') plt.savefig('res.png')
gpl-3.0
saurabh6790/med_test_lib
webnotes/tests/test_db.py
34
1323
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import unittest import webnotes from webnotes.test_runner import make_test_records class TestDB(unittest.TestCase): def test_get_value(self): from webnotes.utils import now_datetime import time webnotes.conn.sql("""delete from `tabProfile` where name not in ('Administrator', 'Guest')""") now = now_datetime() self.assertEquals(webnotes.conn.get_value("Profile", {"name": ["=", "Administrator"]}), "Administrator") self.assertEquals(webnotes.conn.get_value("Profile", {"name": ["like", "Admin%"]}), "Administrator") self.assertEquals(webnotes.conn.get_value("Profile", {"name": ["!=", "Guest"]}), "Administrator") self.assertEquals(webnotes.conn.get_value("Profile", {"modified": ["<", now]}), "Administrator") self.assertEquals(webnotes.conn.get_value("Profile", {"modified": ["<=", now]}), "Administrator") time.sleep(2) if "Profile" in webnotes.test_objects: del webnotes.test_objects["Profile"] make_test_records("Profile") self.assertEquals("test1@example.com", webnotes.conn.get_value("Profile", {"modified": [">", now]})) self.assertEquals("test1@example.com", webnotes.conn.get_value("Profile", {"modified": [">=", now]}))
mit
ternaris/marv-robotics
code/marv/marv_node/tests/test_push_false_values.py
1
1710
# Copyright 2016 - 2020 Ternaris. # SPDX-License-Identifier: AGPL-3.0-only # pylint: disable=invalid-name import pytest from ..testing import make_dataset, marv, run_nodes class Falsish: def __bool__(self): return False def __eq__(self, other): return type(other) is type(self) def __repr__(self): return '<FALSISH>' FALSISH = Falsish() class DontBoolMe: def __bool__(self): raise Exception def __eq__(self, other): return type(other) is type(self) def __repr__(self): return '<DONTBOOLME>' DONTBOOLME = DontBoolMe() @marv.node() def source(): # yield marv.push(None) -- So far, this means we're done yield marv.push(0) yield marv.push(0.0) yield marv.push(False) yield marv.push('') yield marv.push(FALSISH) yield marv.push(DONTBOOLME) @marv.node() @marv.input('stream', default=source) def consumer1(stream): yield marv.push(42) while True: msg = yield marv.pull(stream) if msg is None: break yield marv.push(msg) @marv.node() @marv.input('stream', default=source) def consumer2(stream): yield marv.push(42) while True: msg, = yield marv.pull_all(stream) if msg is None: break yield marv.push(msg) DATASET = make_dataset() async def test(): with pytest.raises(Exception): if DONTBOOLME: pass nodes = [source, consumer1, consumer2] streams = await run_nodes(DATASET, nodes) assert streams == [ [0, 0.0, False, '', FALSISH, DONTBOOLME], [42, 0, 0.0, False, '', FALSISH, DONTBOOLME], [42, 0, 0.0, False, '', FALSISH, DONTBOOLME], ]
agpl-3.0
ilayn/scipy
scipy/fftpack/helper.py
12
3307
import operator from numpy.fft.helper import fftshift, ifftshift, fftfreq import scipy.fft._pocketfft.helper as _helper import numpy as np __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'] def rfftfreq(n, d=1.0): """DFT sample frequencies (for usage with rfft, irfft). The returned float array contains the frequency bins in cycles/unit (with zero at the start) given a window length `n` and a sample spacing `d`:: f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd Parameters ---------- n : int Window length. d : scalar, optional Sample spacing. Default is 1. Returns ------- out : ndarray The array of length `n`, containing the sample frequencies. Examples -------- >>> from scipy import fftpack >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) >>> sig_fft = fftpack.rfft(sig) >>> n = sig_fft.size >>> timestep = 0.1 >>> freq = fftpack.rfftfreq(n, d=timestep) >>> freq array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ]) """ n = operator.index(n) if n < 0: raise ValueError("n = %s is not valid. " "n must be a nonnegative integer." % n) return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d) def next_fast_len(target): """ Find the next fast size of input data to `fft`, for zero-padding, etc. SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this returns the next composite of the prime factors 2, 3, and 5 which is greater than or equal to `target`. (These are also known as 5-smooth numbers, regular numbers, or Hamming numbers.) Parameters ---------- target : int Length to start searching from. Must be a positive integer. Returns ------- out : int The first 5-smooth number greater than or equal to `target`. Notes ----- .. versionadded:: 0.18.0 Examples -------- On a particular machine, an FFT of prime length takes 133 ms: >>> from scipy import fftpack >>> rng = np.random.default_rng() >>> min_len = 10007 # prime length is worst case for speed >>> a = rng.standard_normal(min_len) >>> b = fftpack.fft(a) Zero-padding to the next 5-smooth length reduces computation time to 211 us, a speedup of 630 times: >>> fftpack.helper.next_fast_len(min_len) 10125 >>> b = fftpack.fft(a, 10125) Rounding up to the next power of 2 is not optimal, taking 367 us to compute, 1.7 times as long as the 5-smooth size: >>> b = fftpack.fft(a, 16384) """ # Real transforms use regular sizes so this is backwards compatible return _helper.good_size(target, True) def _good_shape(x, shape, axes): """Ensure that shape argument is valid for scipy.fftpack scipy.fftpack does not support len(shape) < x.ndim when axes is not given. """ if shape is not None and axes is None: shape = _helper._iterable_of_int(shape, 'shape') if len(shape) != np.ndim(x): raise ValueError("when given, axes and shape arguments" " have to be of the same length") return shape
bsd-3-clause
3dfxsoftware/cbss-addons
account_move_folio/wizard/fill_folio_gap.py
1
4828
# -*- encoding: utf-8 -*- from openerp.osv import fields, osv import re import logging _logger = logging.getLogger(__name__) class account_move_folio_fill_gap(osv.TransientModel): _name = 'account.move.folio.fill.gap' _description = "Fill Gap in Journal Entry Folios" _columns = { 'sure': fields.boolean('Check this box'), } def data_save(self, cr, uid, ids, context=None): """ This function fill the Gaps in Journal Entry Folios @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: list of wizard ids """ context = context or {} journal_obj = self.pool.get('account.journal') ir_seq_obj = self.pool.get('ir.sequence') WRONG = 0 MATCH = 0 FOLIOS = 0 GAPS = 0 folio_obj = self.pool.get('account.move.folio') journal_ids = journal_obj.search(cr, uid, [], context=context) if not journal_ids: _logger.info('NO JOURNALS FOR THIS COMPANY') return {'type': 'ir.actions.act_window_close'} for j_id in journal_ids: journal_brw = journal_obj.browse(cr, uid,j_id, context=context) seq_brw = journal_brw.sequence_id expr = '' if seq_brw.prefix: expr = seq_brw.prefix.split('/') expr = map(lambda t:t if 'year' not in t else 4*'[0-9]',expr) expr = '^'+'/'.join(expr) folio_ids = folio_obj.search(cr, uid,[('journal_id','=',j_id)],context=context) if not folio_ids: _logger.info('NO FOLIOS FOR THIS JOURNAL %s'% j_id) continue _logger.info('%s FOLIOS TO PROCESS IN JOURNAL %s'%(len(folio_ids),j_id)) folio_set = set() for f_id in folio_ids: folio_brw = folio_obj.browse(cr, uid,f_id,context=context) m = re.match(expr, folio_brw.name) if m is not None: expr2 = re.compile('^' + m.group()) expr2 = expr2.sub('', folio_brw.name) folio_set.add(int(expr2)) #_logger.info('%s <= MATCH'%folio_brw.name) MATCH += 1 else: _logger.info('%s <= WRONG'%folio_brw.name) WRONG += 1 FOLIOS += 1 actual_folio_set = set(range(1,seq_brw.number_next)) gap_folio_set = [] if folio_set: gap_folio_set = actual_folio_set - folio_set _logger.info('GAP FOLIO SET %s'%gap_folio_set) for gap in gap_folio_set: d = ir_seq_obj._interpolation_dict() try: interpolated_prefix = ir_seq_obj._interpolate(seq_brw.prefix, d) interpolated_suffix = ir_seq_obj._interpolate(seq_brw.suffix, d) except ValueError: _logger.info("Invalid prefix or suffix for sequence '%s'" % seq_brw.name) gap_name = interpolated_prefix + '%%0%sd' % seq_brw.padding % gap + interpolated_suffix _logger.info('GAP NAME %s'%gap_name) flag = True next_gap = gap next_gap_id = False while flag: next_gap += 1 if next_gap in gap_folio_set: continue else: flag = False if next_gap >= seq_brw.number_next: break else: next_gap_name = interpolated_prefix + '%%0%sd' % seq_brw.padding % next_gap + interpolated_suffix next_gap_id = folio_obj.search(cr, uid,[('name','=',next_gap_name),('journal_id','=',j_id)],context=context) next_gap_id = next_gap_id and next_gap_id[0] or False period_id = False date = False if next_gap_id: ngval_brw = folio_obj.browse(cr, uid, next_gap_id,context=context) date = ngval_brw.date or False period_id = ngval_brw.period_id and ngval_brw.period_id.id or False folio_obj.create(cr, uid, { 'name':gap_name, 'journal_id':j_id, 'date':date, 'period_id':period_id, }) GAPS += len(gap_folio_set) _logger.info('MATCHES: %s'% MATCH) _logger.info('WRONGS: %s'% WRONG) _logger.info('FOLIOS: %s'% FOLIOS) _logger.info('GAPS: %s'% GAPS) return {'type': 'ir.actions.act_window_close'}
gpl-2.0
lowitty/selenium
libs/windows/Crypto/PublicKey/ElGamal.py
124
13212
# # ElGamal.py : ElGamal encryption/decryption and signatures # # Part of the Python Cryptography Toolkit # # Originally written by: A.M. Kuchling # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """ElGamal public-key algorithm (randomized encryption and signature). Signature algorithm ------------------- The security of the ElGamal signature scheme is based (like DSA) on the discrete logarithm problem (DLP_). Given a cyclic group, a generator *g*, and an element *h*, it is hard to find an integer *x* such that *g^x = h*. The group is the largest multiplicative sub-group of the integers modulo *p*, with *p* prime. The signer holds a value *x* (*0<x<p-1*) as private key, and its public key (*y* where *y=g^x mod p*) is distributed. The ElGamal signature is twice as big as *p*. Encryption algorithm -------------------- The security of the ElGamal encryption scheme is based on the computational Diffie-Hellman problem (CDH_). Given a cyclic group, a generator *g*, and two integers *a* and *b*, it is difficult to find the element *g^{ab}* when only *g^a* and *g^b* are known, and not *a* and *b*. As before, the group is the largest multiplicative sub-group of the integers modulo *p*, with *p* prime. The receiver holds a value *a* (*0<a<p-1*) as private key, and its public key (*b* where *b*=g^a*) is given to the sender. The ElGamal ciphertext is twice as big as *p*. Domain parameters ----------------- For both signature and encryption schemes, the values *(p,g)* are called *domain parameters*. They are not sensitive but must be distributed to all parties (senders and receivers). Different signers can share the same domain parameters, as can different recipients of encrypted messages. Security -------- Both DLP and CDH problem are believed to be difficult, and they have been proved such (and therefore secure) for more than 30 years. The cryptographic strength is linked to the magnitude of *p*. In 2012, a sufficient size for *p* is deemed to be 2048 bits. For more information, see the most recent ECRYPT_ report. Even though ElGamal algorithms are in theory reasonably secure for new designs, in practice there are no real good reasons for using them. The signature is four times larger than the equivalent DSA, and the ciphertext is two times larger than the equivalent RSA. Functionality ------------- This module provides facilities for generating new ElGamal keys and for constructing them from known components. ElGamal keys allows you to perform basic signing, verification, encryption, and decryption. >>> from Crypto import Random >>> from Crypto.Random import random >>> from Crypto.PublicKey import ElGamal >>> from Crypto.Util.number import GCD >>> from Crypto.Hash import SHA >>> >>> message = "Hello" >>> key = ElGamal.generate(1024, Random.new().read) >>> h = SHA.new(message).digest() >>> while 1: >>> k = random.StrongRandom().randint(1,key.p-1) >>> if GCD(k,key.p-1)==1: break >>> sig = key.sign(h,k) >>> ... >>> if key.verify(h,sig): >>> print "OK" >>> else: >>> print "Incorrect signature" .. _DLP: http://www.cosic.esat.kuleuven.be/publications/talk-78.pdf .. _CDH: http://en.wikipedia.org/wiki/Computational_Diffie%E2%80%93Hellman_assumption .. _ECRYPT: http://www.ecrypt.eu.org/documents/D.SPA.17.pdf """ __revision__ = "$Id$" __all__ = ['generate', 'construct', 'error', 'ElGamalobj'] from Crypto.PublicKey.pubkey import * from Crypto.Util import number class error (Exception): pass # Generate an ElGamal key with N bits def generate(bits, randfunc, progress_func=None): """Randomly generate a fresh, new ElGamal key. The key will be safe for use for both encryption and signature (although it should be used for **only one** purpose). :Parameters: bits : int Key length, or size (in bits) of the modulus *p*. Recommended value is 2048. randfunc : callable Random number generation function; it should accept a single integer N and return a string of random data N bytes long. progress_func : callable Optional function that will be called with a short string containing the key parameter currently being generated; it's useful for interactive applications where a user is waiting for a key to be generated. :attention: You should always use a cryptographically secure random number generator, such as the one defined in the ``Crypto.Random`` module; **don't** just use the current time and the ``random`` module. :Return: An ElGamal key object (`ElGamalobj`). """ obj=ElGamalobj() # Generate a safe prime p # See Algorithm 4.86 in Handbook of Applied Cryptography if progress_func: progress_func('p\n') while 1: q = bignum(getPrime(bits-1, randfunc)) obj.p = 2*q+1 if number.isPrime(obj.p, randfunc=randfunc): break # Generate generator g # See Algorithm 4.80 in Handbook of Applied Cryptography # Note that the order of the group is n=p-1=2q, where q is prime if progress_func: progress_func('g\n') while 1: # We must avoid g=2 because of Bleichenbacher's attack described # in "Generating ElGamal signatures without knowning the secret key", # 1996 # obj.g = number.getRandomRange(3, obj.p, randfunc) safe = 1 if pow(obj.g, 2, obj.p)==1: safe=0 if safe and pow(obj.g, q, obj.p)==1: safe=0 # Discard g if it divides p-1 because of the attack described # in Note 11.67 (iii) in HAC if safe and divmod(obj.p-1, obj.g)[1]==0: safe=0 # g^{-1} must not divide p-1 because of Khadir's attack # described in "Conditions of the generator for forging ElGamal # signature", 2011 ginv = number.inverse(obj.g, obj.p) if safe and divmod(obj.p-1, ginv)[1]==0: safe=0 if safe: break # Generate private key x if progress_func: progress_func('x\n') obj.x=number.getRandomRange(2, obj.p-1, randfunc) # Generate public key y if progress_func: progress_func('y\n') obj.y = pow(obj.g, obj.x, obj.p) return obj def construct(tup): """Construct an ElGamal key from a tuple of valid ElGamal components. The modulus *p* must be a prime. The following conditions must apply: - 1 < g < p-1 - g^{p-1} = 1 mod p - 1 < x < p-1 - g^x = y mod p :Parameters: tup : tuple A tuple of long integers, with 3 or 4 items in the following order: 1. Modulus (*p*). 2. Generator (*g*). 3. Public key (*y*). 4. Private key (*x*). Optional. :Return: An ElGamal key object (`ElGamalobj`). """ obj=ElGamalobj() if len(tup) not in [3,4]: raise ValueError('argument for construct() wrong length') for i in range(len(tup)): field = obj.keydata[i] setattr(obj, field, tup[i]) return obj class ElGamalobj(pubkey): """Class defining an ElGamal key. :undocumented: __getstate__, __setstate__, __repr__, __getattr__ """ #: Dictionary of ElGamal parameters. #: #: A public key will only have the following entries: #: #: - **y**, the public key. #: - **g**, the generator. #: - **p**, the modulus. #: #: A private key will also have: #: #: - **x**, the private key. keydata=['p', 'g', 'y', 'x'] def encrypt(self, plaintext, K): """Encrypt a piece of data with ElGamal. :Parameter plaintext: The piece of data to encrypt with ElGamal. It must be numerically smaller than the module (*p*). :Type plaintext: byte string or long :Parameter K: A secret number, chosen randomly in the closed range *[1,p-2]*. :Type K: long (recommended) or byte string (not recommended) :Return: A tuple with two items. Each item is of the same type as the plaintext (string or long). :attention: selection of *K* is crucial for security. Generating a random number larger than *p-1* and taking the modulus by *p-1* is **not** secure, since smaller values will occur more frequently. Generating a random number systematically smaller than *p-1* (e.g. *floor((p-1)/8)* random bytes) is also **not** secure. In general, it shall not be possible for an attacker to know the value of any bit of K. :attention: The number *K* shall not be reused for any other operation and shall be discarded immediately. """ return pubkey.encrypt(self, plaintext, K) def decrypt(self, ciphertext): """Decrypt a piece of data with ElGamal. :Parameter ciphertext: The piece of data to decrypt with ElGamal. :Type ciphertext: byte string, long or a 2-item tuple as returned by `encrypt` :Return: A byte string if ciphertext was a byte string or a tuple of byte strings. A long otherwise. """ return pubkey.decrypt(self, ciphertext) def sign(self, M, K): """Sign a piece of data with ElGamal. :Parameter M: The piece of data to sign with ElGamal. It may not be longer in bit size than *p-1*. :Type M: byte string or long :Parameter K: A secret number, chosen randomly in the closed range *[1,p-2]* and such that *gcd(k,p-1)=1*. :Type K: long (recommended) or byte string (not recommended) :attention: selection of *K* is crucial for security. Generating a random number larger than *p-1* and taking the modulus by *p-1* is **not** secure, since smaller values will occur more frequently. Generating a random number systematically smaller than *p-1* (e.g. *floor((p-1)/8)* random bytes) is also **not** secure. In general, it shall not be possible for an attacker to know the value of any bit of K. :attention: The number *K* shall not be reused for any other operation and shall be discarded immediately. :attention: M must be be a cryptographic hash, otherwise an attacker may mount an existential forgery attack. :Return: A tuple with 2 longs. """ return pubkey.sign(self, M, K) def verify(self, M, signature): """Verify the validity of an ElGamal signature. :Parameter M: The expected message. :Type M: byte string or long :Parameter signature: The ElGamal signature to verify. :Type signature: A tuple with 2 longs as return by `sign` :Return: True if the signature is correct, False otherwise. """ return pubkey.verify(self, M, signature) def _encrypt(self, M, K): a=pow(self.g, K, self.p) b=( M*pow(self.y, K, self.p) ) % self.p return ( a,b ) def _decrypt(self, M): if (not hasattr(self, 'x')): raise TypeError('Private key not available in this object') ax=pow(M[0], self.x, self.p) plaintext=(M[1] * inverse(ax, self.p ) ) % self.p return plaintext def _sign(self, M, K): if (not hasattr(self, 'x')): raise TypeError('Private key not available in this object') p1=self.p-1 if (GCD(K, p1)!=1): raise ValueError('Bad K value: GCD(K,p-1)!=1') a=pow(self.g, K, self.p) t=(M-self.x*a) % p1 while t<0: t=t+p1 b=(t*inverse(K, p1)) % p1 return (a, b) def _verify(self, M, sig): if sig[0]<1 or sig[0]>self.p-1: return 0 v1=pow(self.y, sig[0], self.p) v1=(v1*pow(sig[0], sig[1], self.p)) % self.p v2=pow(self.g, M, self.p) if v1==v2: return 1 return 0 def size(self): return number.size(self.p) - 1 def has_private(self): if hasattr(self, 'x'): return 1 else: return 0 def publickey(self): return construct((self.p, self.g, self.y)) object=ElGamalobj
mit
alexlo03/ansible
test/units/modules/network/ios/test_ios_system.py
68
5372
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.ios import ios_system from units.modules.utils import set_module_args from .ios_module import TestIosModule, load_fixture class TestIosSystemModule(TestIosModule): module = ios_system def setUp(self): super(TestIosSystemModule, self).setUp() self.mock_get_config = patch('ansible.modules.network.ios.ios_system.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.ios.ios_system.load_config') self.load_config = self.mock_load_config.start() def tearDown(self): super(TestIosSystemModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None): self.get_config.return_value = load_fixture('ios_system_config.cfg') self.load_config.return_value = None def test_ios_system_hostname_changed(self): set_module_args(dict(hostname='foo')) commands = ['hostname foo'] self.execute_module(changed=True, commands=commands) def test_ios_system_domain_name(self): set_module_args(dict(domain_name=['test.com'])) commands = ['ip domain name test.com', 'no ip domain name eng.example.net', 'no ip domain name vrf management eng.example.net'] self.execute_module(changed=True, commands=commands) def test_ios_system_domain_name_complex(self): set_module_args(dict(domain_name=[{'name': 'test.com', 'vrf': 'test'}, {'name': 'eng.example.net'}])) commands = ['ip domain name vrf test test.com', 'no ip domain name vrf management eng.example.net'] self.execute_module(changed=True, commands=commands) def test_ios_system_domain_search(self): set_module_args(dict(domain_search=['ansible.com', 'redhat.com'])) commands = ['no ip domain list vrf management example.net', 'no ip domain list example.net', 'no ip domain list example.com', 'ip domain list ansible.com', 'ip domain list redhat.com'] self.execute_module(changed=True, commands=commands, sort=False) def test_ios_system_domain_search_complex(self): set_module_args(dict(domain_search=[{'name': 'ansible.com', 'vrf': 'test'}])) commands = ['no ip domain list vrf management example.net', 'no ip domain list example.net', 'no ip domain list example.com', 'ip domain list vrf test ansible.com'] self.execute_module(changed=True, commands=commands, sort=False) def test_ios_system_lookup_source(self): set_module_args(dict(lookup_source='Ethernet1')) commands = ['ip domain lookup source-interface Ethernet1'] self.execute_module(changed=True, commands=commands) def test_ios_system_name_servers(self): name_servers = ['8.8.8.8', '8.8.4.4'] set_module_args(dict(name_servers=name_servers)) commands = ['no ip name-server vrf management 8.8.8.8', 'ip name-server 8.8.4.4'] self.execute_module(changed=True, commands=commands, sort=False) def rest_ios_system_name_servers_complex(self): name_servers = dict(server='8.8.8.8', vrf='test') set_module_args(dict(name_servers=name_servers)) commands = ['no name-server 8.8.8.8', 'no name-server vrf management 8.8.8.8', 'ip name-server vrf test 8.8.8.8'] self.execute_module(changed=True, commands=commands, sort=False) def test_ios_system_state_absent(self): set_module_args(dict(state='absent')) commands = ['no hostname', 'no ip domain lookup source-interface GigabitEthernet0/0', 'no ip domain list vrf management', 'no ip domain list', 'no ip domain name vrf management', 'no ip domain name', 'no ip name-server vrf management', 'no ip name-server'] self.execute_module(changed=True, commands=commands) def test_ios_system_no_change(self): set_module_args(dict(hostname='ios01')) self.execute_module(commands=[]) def test_ios_system_missing_vrf(self): name_servers = dict(server='8.8.8.8', vrf='missing') set_module_args(dict(name_servers=name_servers)) self.execute_module(failed=True)
gpl-3.0
fabianvf/osf.io
website/addons/box/tests/test_models.py
17
15800
# -*- coding: utf-8 -*- import mock from nose.tools import * # noqa (PEP8 asserts) from box import BoxClientException from framework.auth import Auth from framework.exceptions import HTTPError from website.addons.box.model import ( BoxUserSettings, BoxNodeSettings, BoxFile ) from tests.base import OsfTestCase from tests.factories import UserFactory, ProjectFactory from website.addons.box.tests.factories import ( BoxOAuthSettings, BoxUserSettingsFactory, BoxNodeSettingsFactory, ) from website.addons.base import exceptions class TestFileGuid(OsfTestCase): def setUp(self): super(OsfTestCase, self).setUp() self.user = UserFactory() self.project = ProjectFactory(creator=self.user) self.project.add_addon('box', auth=Auth(self.user)) self.node_addon = self.project.get_addon('box') def test_provider(self): assert_equal('box', BoxFile().provider) def test_correct_path(self): guid = BoxFile(node=self.project, path='1234567890/foo/bar') assert_equals(guid.path, '1234567890/foo/bar') assert_equals(guid.waterbutler_path, '/1234567890/foo/bar') @mock.patch('website.addons.base.requests.get') def test_unique_identifier(self, mock_get): uid = '#!' mock_response = mock.Mock(ok=True, status_code=200) mock_get.return_value = mock_response mock_response.json.return_value = { 'data': { 'extra': { 'etag': uid }, } } guid = BoxFile(node=self.project, path='1234567890/foo/bar') guid.enrich() assert_equals(uid, guid.unique_identifier) @mock.patch('website.addons.base.requests.get') def test_unique_identifier_version(self, mock_get): uid = '#!' mock_response = mock.Mock(ok=True, status_code=200) mock_get.return_value = mock_response mock_response.json.return_value = { 'data': { 'extra': {}, 'version': uid } } guid = BoxFile(node=self.project, path='1234567890/foo/bar') guid.enrich() assert_equals(uid, guid.unique_identifier) def test_node_addon_get_or_create(self): guid, created = self.node_addon.find_or_create_file_guid('1234567890/foo/bar') assert_true(created) assert_equal(guid.path, '1234567890/foo/bar') assert_equal(guid.waterbutler_path, '/1234567890/foo/bar') def test_node_addon_get_or_create_finds(self): guid1, created1 = self.node_addon.find_or_create_file_guid('/foo/bar') guid2, created2 = self.node_addon.find_or_create_file_guid('/foo/bar') assert_true(created1) assert_false(created2) assert_equals(guid1, guid2) class TestUserSettingsModel(OsfTestCase): def setUp(self): super(TestUserSettingsModel, self).setUp() self.user = UserFactory() def test_fields(self): oauth_settings = BoxOAuthSettings(user_id='foo', username='bar', access_token='defined') oauth_settings.save() user_settings = BoxUserSettings(owner=self.user, oauth_settings=oauth_settings) user_settings.save() retrieved = BoxUserSettings.load(user_settings._id) assert_true(retrieved.owner) assert_true(retrieved.user_id) assert_true(retrieved.username) assert_true(retrieved.access_token) def test_has_auth(self): oauth_settings = BoxOAuthSettings(user_id='foo', username='bar') oauth_settings.save() user_settings = BoxUserSettingsFactory(oauth_settings=oauth_settings) assert_false(user_settings.has_auth) user_settings.access_token = '12345' user_settings.save() assert_true(user_settings.has_auth) @mock.patch('website.addons.box.model.requests') def test_clear_clears_associated_node_settings(self, mock_requests): node_settings = BoxNodeSettingsFactory() user_settings = BoxUserSettingsFactory() node_settings.user_settings = user_settings node_settings.save() user_settings.clear() user_settings.save() # Node settings no longer associated with user settings assert_is(node_settings.folder_id, None) assert_is(node_settings.user_settings, None) mock_requests.post.assert_called_once() @mock.patch('website.addons.box.model.requests') def test_clear(self, mock_requests): node_settings = BoxNodeSettingsFactory() user_settings = BoxUserSettingsFactory() node_settings.user_settings = user_settings node_settings.save() assert_true(user_settings.access_token) user_settings.clear() user_settings.save() assert_false(user_settings.user_id) assert_false(user_settings.access_token) mock_requests.post.assert_called_once() @mock.patch('website.addons.box.model.requests') def test_clear_wo_oauth_settings(self, mock_requests): user_settings = BoxUserSettingsFactory() user_settings.oauth_settings = None user_settings.save() node_settings = BoxNodeSettingsFactory() node_settings.user_settings = user_settings node_settings.save() assert_false(user_settings.oauth_settings) user_settings.clear() user_settings.save() assert_false(user_settings.user_id) assert_false(user_settings.access_token) assert_false(mock_requests.post.called) @mock.patch('website.addons.box.model.requests') def test_delete(self, mock_requests): user_settings = BoxUserSettingsFactory() assert_true(user_settings.has_auth) user_settings.delete() user_settings.save() assert_false(user_settings.user_id) assert_true(user_settings.deleted) assert_false(user_settings.access_token) mock_requests.post.assert_called_once() @mock.patch('website.addons.box.model.requests') def test_delete_clears_associated_node_settings(self, mock_requests): node_settings = BoxNodeSettingsFactory() user_settings = BoxUserSettingsFactory() node_settings.user_settings = user_settings node_settings.save() user_settings.delete() user_settings.save() # Node settings no longer associated with user settings assert_false(node_settings.deleted) assert_is(node_settings.folder_id, None) mock_requests.post.assert_called_once() assert_is(node_settings.user_settings, None) class TestBoxNodeSettingsModel(OsfTestCase): def setUp(self): super(TestBoxNodeSettingsModel, self).setUp() self.user = UserFactory() self.user.add_addon('box') self.user.save() self.oauth = BoxOAuthSettings(user_id='not sleep', access_token='seems legit') self.oauth.save() self.user_settings = self.user.get_addon('box') self.user_settings.oauth_settings = self.oauth self.user_settings.save() self.project = ProjectFactory() self.node_settings = BoxNodeSettingsFactory( user_settings=self.user_settings, folder_id='1234567890', owner=self.project ) def test_complete_true(self): assert_true(self.node_settings.has_auth) assert_true(self.node_settings.complete) def test_complete_false(self): self.node_settings.folder_id = None assert_true(self.node_settings.has_auth) assert_false(self.node_settings.complete) def test_complete_auth_false(self): self.node_settings.user_settings = None assert_false(self.node_settings.has_auth) assert_false(self.node_settings.complete) def test_fields(self): node_settings = BoxNodeSettings(user_settings=self.user_settings) node_settings.save() assert_true(node_settings.user_settings) assert_equal(node_settings.user_settings.owner, self.user) assert_true(hasattr(node_settings, 'folder_id')) assert_true(hasattr(node_settings, 'user_settings')) def test_folder_defaults_to_none(self): node_settings = BoxNodeSettings(user_settings=self.user_settings) node_settings.save() assert_is_none(node_settings.folder_id) def test_has_auth(self): self.user_settings.access_token = None settings = BoxNodeSettings(user_settings=self.user_settings) settings.save() assert_false(settings.has_auth) settings.user_settings.access_token = '123abc' settings.user_settings.save() assert_true(settings.has_auth) def test_to_json(self): settings = self.node_settings user = UserFactory() result = settings.to_json(user) assert_equal(result['addon_short_name'], 'box') def test_delete(self): assert_true(self.node_settings.user_settings) assert_true(self.node_settings.folder_id) old_logs = self.project.logs self.node_settings.delete() self.node_settings.save() assert_is(self.node_settings.user_settings, None) assert_is(self.node_settings.folder_id, None) assert_true(self.node_settings.deleted) assert_equal(self.project.logs, old_logs) def test_deauthorize(self): assert_true(self.node_settings.user_settings) assert_true(self.node_settings.folder_id) self.node_settings.deauthorize(auth=Auth(self.user)) self.node_settings.save() assert_is(self.node_settings.user_settings, None) assert_is(self.node_settings.folder_id, None) last_log = self.project.logs[-1] assert_equal(last_log.action, 'box_node_deauthorized') params = last_log.params assert_in('node', params) assert_in('project', params) assert_in('folder_id', params) @mock.patch("website.addons.box.model.BoxNodeSettings._update_folder_data") def test_set_folder(self, mock_update_folder): folder_id = '1234567890' self.node_settings.set_folder(folder_id, auth=Auth(self.user)) self.node_settings.save() # Folder was set assert_equal(self.node_settings.folder_id, folder_id) # Log was saved last_log = self.project.logs[-1] assert_equal(last_log.action, 'box_folder_selected') def test_set_user_auth(self): node_settings = BoxNodeSettingsFactory() user_settings = BoxUserSettingsFactory() node_settings.set_user_auth(user_settings) node_settings.save() assert_true(node_settings.has_auth) assert_equal(node_settings.user_settings, user_settings) # A log was saved last_log = node_settings.owner.logs[-1] assert_equal(last_log.action, 'box_node_authorized') log_params = last_log.params assert_equal(log_params['folder_id'], node_settings.folder_id) assert_equal(log_params['node'], node_settings.owner._primary_key) assert_equal(last_log.user, user_settings.owner) def test_serialize_credentials(self): self.user_settings.access_token = 'secret' self.user_settings.save() credentials = self.node_settings.serialize_waterbutler_credentials() expected = {'token': self.node_settings.user_settings.access_token} assert_equal(credentials, expected) def test_serialize_credentials_not_authorized(self): self.node_settings.user_settings = None self.node_settings.save() with assert_raises(exceptions.AddonError): self.node_settings.serialize_waterbutler_credentials() def test_serialize_settings(self): settings = self.node_settings.serialize_waterbutler_settings() expected = {'folder': self.node_settings.folder_id} assert_equal(settings, expected) def test_serialize_settings_not_configured(self): self.node_settings.folder_id = None self.node_settings.save() with assert_raises(exceptions.AddonError): self.node_settings.serialize_waterbutler_settings() @mock.patch('website.addons.box.model.BoxUserSettings.fetch_access_token') def test_serialize_waterbutler_credentials_reraises_box_client_exception_as_http_error(self, mock_fetch_access_token): mock_fetch_access_token.side_effect = BoxClientException(status_code=400, message='Oops') with assert_raises(HTTPError): self.node_settings.serialize_waterbutler_credentials() def test_create_log(self): action = 'file_added' path = 'pizza.nii' nlog = len(self.project.logs) self.node_settings.create_waterbutler_log( auth=Auth(user=self.user), action=action, metadata={'path': path, 'materialized': path}, ) self.project.reload() assert_equal(len(self.project.logs), nlog + 1) assert_equal( self.project.logs[-1].action, 'box_{0}'.format(action), ) assert_equal( self.project.logs[-1].params['path'], path ) class TestNodeSettingsCallbacks(OsfTestCase): def setUp(self): super(TestNodeSettingsCallbacks, self).setUp() # Create node settings with auth self.user_settings = BoxUserSettingsFactory(access_token='123abc') self.node_settings = BoxNodeSettingsFactory( user_settings=self.user_settings, ) self.project = self.node_settings.owner self.user = self.user_settings.owner def test_after_fork_by_authorized_box_user(self): fork = ProjectFactory() clone, message = self.node_settings.after_fork( node=self.project, fork=fork, user=self.user_settings.owner ) assert_equal(clone.user_settings, self.user_settings) def test_after_fork_by_unauthorized_box_user(self): fork = ProjectFactory() user = UserFactory() clone, message = self.node_settings.after_fork( node=self.project, fork=fork, user=user, save=True ) # need request context for url_for assert_is(clone.user_settings, None) def test_before_fork(self): node = ProjectFactory() message = self.node_settings.before_fork(node, self.user) assert_true(message) def test_before_remove_contributor_message(self): message = self.node_settings.before_remove_contributor( self.project, self.user) assert_true(message) assert_in(self.user.fullname, message) assert_in(self.project.project_or_component, message) def test_after_remove_authorized_box_user_not_self(self): message = self.node_settings.after_remove_contributor( self.project, self.user_settings.owner) self.node_settings.save() assert_is_none(self.node_settings.user_settings) assert_true(message) assert_in("You can re-authenticate", message) def test_after_remove_authorized_box_user_self(self): auth = Auth(user=self.user_settings.owner) message = self.node_settings.after_remove_contributor( self.project, self.user_settings.owner, auth) self.node_settings.save() assert_is_none(self.node_settings.user_settings) assert_true(message) assert_not_in("You can re-authenticate", message) def test_after_delete(self): self.project.remove_node(Auth(user=self.project.creator)) # Ensure that changes to node settings have been saved self.node_settings.reload() assert_true(self.node_settings.user_settings is None) assert_true(self.node_settings.folder_id is None)
apache-2.0
AlmogCohen/flask-admin
flask_admin/model/filters.py
18
8827
import time import datetime from flask_admin._compat import text_type from flask_admin.babel import lazy_gettext class BaseFilter(object): """ Base filter class. """ def __init__(self, name, options=None, data_type=None): """ Constructor. :param name: Displayed name :param options: List of fixed options. If provided, will use drop down instead of textbox. :param data_type: Client-side widget type to use. """ self.name = name self.options = options self.data_type = data_type def get_options(self, view): """ Return list of predefined options. Override to customize behavior. :param view: Associated administrative view class. """ options = self.options if options: if callable(options): options = options() return [(v, text_type(n)) for v, n in options] return None def validate(self, value): """ Validate value. If value is valid, returns `True` and `False` otherwise. :param value: Value to validate """ # useful for filters with date conversions, see if conversion in clean() raises ValueError try: self.clean(value) return True except ValueError: return False def clean(self, value): """ Parse value into python format. Occurs before .apply() :param value: Value to parse """ return value def apply(self, query): """ Apply search criteria to the query and return new query. :param query: Query """ raise NotImplementedError() def operation(self): """ Return readable operation name. For example: u'equals' """ raise NotImplementedError() def __unicode__(self): return self.name # Customized filters class BaseBooleanFilter(BaseFilter): """ Base boolean filter, uses fixed list of options. """ def __init__(self, name, options=None, data_type=None): super(BaseBooleanFilter, self).__init__(name, (('1', lazy_gettext(u'Yes')), ('0', lazy_gettext(u'No'))), data_type) def validate(self, value): return value in ('0', '1') class BaseIntFilter(BaseFilter): """ Base Int filter. Adds validation and changes value to python int. Avoid using int(float(value)) to also allow using decimals, because it causes precision issues with large numbers. """ def clean(self, value): return int(value) class BaseFloatFilter(BaseFilter): """ Base Float filter. Adds validation and changes value to python float. """ def clean(self, value): return float(value) class BaseIntListFilter(BaseFilter): """ Base Integer list filter. Adds validation for int "In List" filter. Avoid using int(float(value)) to also allow using decimals, because it causes precision issues with large numbers. """ def clean(self, value): return [int(v.strip()) for v in value.split(',') if v.strip()] class BaseFloatListFilter(BaseFilter): """ Base Float list filter. Adds validation for float "In List" filter. """ def clean(self, value): return [float(v.strip()) for v in value.split(',') if v.strip()] class BaseDateFilter(BaseFilter): """ Base Date filter. Uses client-side date picker control. """ def __init__(self, name, options=None, data_type=None): super(BaseDateFilter, self).__init__(name, options, data_type='datepicker') def clean(self, value): return datetime.datetime.strptime(value, '%Y-%m-%d').date() class BaseDateBetweenFilter(BaseFilter): """ Base Date Between filter. Consolidates logic for validation and clean. Apply method is different for each back-end. """ def clean(self, value): return [datetime.datetime.strptime(range, '%Y-%m-%d') for range in value.split(' to ')] def operation(self): return lazy_gettext('between') def validate(self, value): try: value = [datetime.datetime.strptime(range, '%Y-%m-%d') for range in value.split(' to ')] # if " to " is missing, fail validation # sqlalchemy's .between() will not work if end date is before start date if (len(value) == 2) and (value[0] <= value[1]): return True else: return False except ValueError: return False class BaseDateTimeFilter(BaseFilter): """ Base DateTime filter. Uses client-side date time picker control. """ def __init__(self, name, options=None, data_type=None): super(BaseDateTimeFilter, self).__init__(name, options, data_type='datetimepicker') def clean(self, value): # datetime filters will not work in SQLite + SQLAlchemy if value not converted to datetime return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S') class BaseDateTimeBetweenFilter(BaseFilter): """ Base DateTime Between filter. Consolidates logic for validation and clean. Apply method is different for each back-end. """ def clean(self, value): return [datetime.datetime.strptime(range, '%Y-%m-%d %H:%M:%S') for range in value.split(' to ')] def operation(self): return lazy_gettext('between') def validate(self, value): try: value = [datetime.datetime.strptime(range, '%Y-%m-%d %H:%M:%S') for range in value.split(' to ')] if (len(value) == 2) and (value[0] <= value[1]): return True else: return False except ValueError: return False class BaseTimeFilter(BaseFilter): """ Base Time filter. Uses client-side time picker control. """ def __init__(self, name, options=None, data_type=None): super(BaseTimeFilter, self).__init__(name, options, data_type='timepicker') def clean(self, value): # time filters will not work in SQLite + SQLAlchemy if value not converted to time timetuple = time.strptime(value, '%H:%M:%S') return datetime.time(timetuple.tm_hour, timetuple.tm_min, timetuple.tm_sec) class BaseTimeBetweenFilter(BaseFilter): """ Base Time Between filter. Consolidates logic for validation and clean. Apply method is different for each back-end. """ def clean(self, value): timetuples = [time.strptime(range, '%H:%M:%S') for range in value.split(' to ')] return [datetime.time(timetuple.tm_hour, timetuple.tm_min, timetuple.tm_sec) for timetuple in timetuples] def operation(self): return lazy_gettext('between') def validate(self, value): try: timetuples = [time.strptime(range, '%H:%M:%S') for range in value.split(' to ')] if (len(timetuples) == 2) and (timetuples[0] <= timetuples[1]): return True else: return False except ValueError: raise return False def convert(*args): """ Decorator for field to filter conversion routine. See :mod:`flask_admin.contrib.sqla.filters` for usage example. """ def _inner(func): func._converter_for = list(map(lambda x: x.lower(), args)) return func return _inner class BaseFilterConverter(object): """ Base filter converter. Derive from this class to implement custom field to filter conversion logic. """ def __init__(self): self.converters = dict() for p in dir(self): attr = getattr(self, p) if hasattr(attr, '_converter_for'): for p in attr._converter_for: self.converters[p] = attr
bsd-3-clause
kartikdhar/djangotest
virt1/lib/python2.7/site-packages/pip/baseparser.py
149
9643
"""Base option parser setup""" from __future__ import absolute_import import sys import optparse import os import re import textwrap from distutils.util import strtobool from pip._vendor.six import string_types from pip._vendor.six.moves import configparser from pip.locations import ( legacy_config_file, config_basename, running_under_virtualenv, site_config_files ) from pip.utils import appdirs, get_terminal_size _environ_prefix_re = re.compile(r"^PIP_", re.I) class PrettyHelpFormatter(optparse.IndentedHelpFormatter): """A prettier/less verbose help formatter for optparse.""" def __init__(self, *args, **kwargs): # help position must be aligned with __init__.parseopts.description kwargs['max_help_position'] = 30 kwargs['indent_increment'] = 1 kwargs['width'] = get_terminal_size()[0] - 2 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) def format_option_strings(self, option): return self._format_option_strings(option, ' <%s>', ', ') def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '): """ Return a comma-separated list of option strings and metavars. :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') :param mvarfmt: metavar format string - evaluated as mvarfmt % metavar :param optsep: separator """ opts = [] if option._short_opts: opts.append(option._short_opts[0]) if option._long_opts: opts.append(option._long_opts[0]) if len(opts) > 1: opts.insert(1, optsep) if option.takes_value(): metavar = option.metavar or option.dest.lower() opts.append(mvarfmt % metavar.lower()) return ''.join(opts) def format_heading(self, heading): if heading == 'Options': return '' return heading + ':\n' def format_usage(self, usage): """ Ensure there is only one newline between usage and the first heading if there is no description. """ msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ") return msg def format_description(self, description): # leave full control over description to us if description: if hasattr(self.parser, 'main'): label = 'Commands' else: label = 'Description' # some doc strings have initial newlines, some don't description = description.lstrip('\n') # some doc strings have final newlines and spaces, some don't description = description.rstrip() # dedent, then reindent description = self.indent_lines(textwrap.dedent(description), " ") description = '%s:\n%s\n' % (label, description) return description else: return '' def format_epilog(self, epilog): # leave full control over epilog to us if epilog: return epilog else: return '' def indent_lines(self, text, indent): new_lines = [indent + line for line in text.split('\n')] return "\n".join(new_lines) class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): """Custom help formatter for use in ConfigOptionParser that updates the defaults before expanding them, allowing them to show up correctly in the help listing""" def expand_default(self, option): if self.parser is not None: self.parser.update_defaults(self.parser.defaults) return optparse.IndentedHelpFormatter.expand_default(self, option) class CustomOptionParser(optparse.OptionParser): def insert_option_group(self, idx, *args, **kwargs): """Insert an OptionGroup at a given position.""" group = self.add_option_group(*args, **kwargs) self.option_groups.pop() self.option_groups.insert(idx, group) return group @property def option_list_all(self): """Get a list of all options, including those in option groups.""" res = self.option_list[:] for i in self.option_groups: res.extend(i.option_list) return res class ConfigOptionParser(CustomOptionParser): """Custom option parser which updates its defaults by checking the configuration files and environmental variables""" isolated = False def __init__(self, *args, **kwargs): self.config = configparser.RawConfigParser() self.name = kwargs.pop('name') self.isolated = kwargs.pop("isolated", False) self.files = self.get_config_files() if self.files: self.config.read(self.files) assert self.name optparse.OptionParser.__init__(self, *args, **kwargs) def get_config_files(self): # the files returned by this method will be parsed in order with the # first files listed being overridden by later files in standard # ConfigParser fashion config_file = os.environ.get('PIP_CONFIG_FILE', False) if config_file == os.devnull: return [] # at the base we have any site-wide configuration files = list(site_config_files) # per-user configuration next if not self.isolated: if config_file and os.path.exists(config_file): files.append(config_file) else: # This is the legacy config file, we consider it to be a lower # priority than the new file location. files.append(legacy_config_file) # This is the new config file, we consider it to be a higher # priority than the legacy file. files.append( os.path.join( appdirs.user_config_dir("pip"), config_basename, ) ) # finally virtualenv configuration first trumping others if running_under_virtualenv(): venv_config_file = os.path.join( sys.prefix, config_basename, ) if os.path.exists(venv_config_file): files.append(venv_config_file) return files def check_default(self, option, key, val): try: return option.check_value(key, val) except optparse.OptionValueError as exc: print("An error occurred during configuration: %s" % exc) sys.exit(3) def update_defaults(self, defaults): """Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists).""" # Then go and look for the other sources of configuration: config = {} # 1. config files for section in ('global', self.name): config.update( self.normalize_keys(self.get_config_section(section)) ) # 2. environmental variables if not self.isolated: config.update(self.normalize_keys(self.get_environ_vars())) # Then set the options with those values for key, val in config.items(): option = self.get_option(key) if option is not None: # ignore empty values if not val: continue if option.action in ('store_true', 'store_false', 'count'): val = strtobool(val) if option.action == 'append': val = val.split() val = [self.check_default(option, key, v) for v in val] else: val = self.check_default(option, key, val) defaults[option.dest] = val return defaults def normalize_keys(self, items): """Return a config dictionary with normalized keys regardless of whether the keys were specified in environment variables or in config files""" normalized = {} for key, val in items: key = key.replace('_', '-') if not key.startswith('--'): key = '--%s' % key # only prefer long opts normalized[key] = val return normalized def get_config_section(self, name): """Get a section of a configuration""" if self.config.has_section(name): return self.config.items(name) return [] def get_environ_vars(self): """Returns a generator with all environmental vars with prefix PIP_""" for key, val in os.environ.items(): if _environ_prefix_re.search(key): yield (_environ_prefix_re.sub("", key).lower(), val) def get_default_values(self): """Overridding to make updating the defaults after instantiation of the option parser possible, update_defaults() does the dirty work.""" if not self.process_default_values: # Old, pre-Optik 1.5 behaviour. return optparse.Values(self.defaults) defaults = self.update_defaults(self.defaults.copy()) # ours for option in self._get_all_options(): default = defaults.get(option.dest) if isinstance(default, string_types): opt_str = option.get_opt_string() defaults[option.dest] = option.check_value(opt_str, default) return optparse.Values(defaults) def error(self, msg): self.print_usage(sys.stderr) self.exit(2, "%s\n" % msg)
mit
arenadata/ambari
ambari-server/src/test/python/TestStackSelect.py
1
8343
# !/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from mock.mock import patch from mock.mock import MagicMock from resource_management.core.logger import Logger from resource_management.core.exceptions import Fail from resource_management.libraries.functions import stack_select from resource_management.libraries.script import Script from unittest import TestCase Logger.initialize_logger() class TestStackSelect(TestCase): def test_missing_role_information_throws_exception(self): """ Tests that missing the service & role throws an excpetion :return: """ version = "2.5.9.9-9999" command_json = TestStackSelect._get_incomplete_cluster_simple_upgrade_json() Script.config = command_json self.assertRaises(Fail, stack_select.select_packages, version) @patch.object(stack_select, "get_supported_packages") @patch("resource_management.libraries.functions.stack_select.select") def test_select_package_for_standard_orchestration(self, stack_select_select_mock, get_supported_packages_mock): """ Tests that missing the service & role throws an excpetion :return: """ get_supported_packages_mock.return_value = TestStackSelect._get_supported_packages() version = "2.5.9.9-9999" command_json = TestStackSelect._get_cluster_simple_upgrade_json() Script.config = dict() Script.config.update(command_json) Script.config.update( { "configurations" : { "cluster-env" : {} }, "hostLevelParams": {} } ) Script.config["configurations"]["cluster-env"]["stack_packages"] = self._get_stack_packages() Script.config["hostLevelParams"] = { "stack_name" : "HDP" } stack_select.select_packages(version) self.assertEqual(len(stack_select_select_mock.call_args_list), 2) self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-master", version)) self.assertEqual(stack_select_select_mock.call_args_list[1][0], ("foo-client", version)) @patch.object(stack_select, "get_supported_packages") @patch("resource_management.libraries.functions.stack_select.select") def test_select_package_for_patch_orchestration(self, stack_select_select_mock, get_supported_packages_mock): """ Tests that missing the service & role throws an excpetion :return: """ get_supported_packages_mock.return_value = TestStackSelect._get_supported_packages() version = "2.5.9.9-9999" command_json = TestStackSelect._get_cluster_simple_upgrade_json() command_json["upgradeSummary"]["orchestration"] = "PATCH" Script.config = dict() Script.config.update(command_json) Script.config.update( { "configurations" : { "cluster-env" : {} }, "hostLevelParams": {} } ) Script.config["configurations"]["cluster-env"]["stack_packages"] = self._get_stack_packages() Script.config["hostLevelParams"] = { "stack_name" : "HDP" } stack_select.select_packages(version) self.assertEqual(len(stack_select_select_mock.call_args_list), 1) self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-master", version)) stack_select_select_mock.reset_mock() command_json["upgradeSummary"]["orchestration"] = "MAINT" stack_select.select_packages(version) self.assertEqual(len(stack_select_select_mock.call_args_list), 1) self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-master", version)) @patch.object(stack_select, "get_supported_packages") @patch("resource_management.libraries.functions.stack_select.select") def test_legacy_package_fallback(self, stack_select_select_mock, get_supported_packages_mock): """ Tests that if the package specified by the JSON isn't support by the stack-select tool, the the fallback legacy value is used. :return: """ get_supported_packages_mock.return_value = ["foo-legacy"] version = "2.5.9.9-9999" command_json = TestStackSelect._get_cluster_simple_upgrade_json() Script.config = dict() Script.config.update(command_json) Script.config.update( { "configurations" : { "cluster-env" : {} }, "hostLevelParams": {} } ) Script.config["configurations"]["cluster-env"]["stack_packages"] = self._get_stack_packages_with_legacy() Script.config["hostLevelParams"] = { "stack_name" : "HDP" } stack_select.select_packages(version) self.assertEqual(len(stack_select_select_mock.call_args_list), 1) self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-legacy", version)) @staticmethod def _get_incomplete_cluster_simple_upgrade_json(): """ A command missing the role and service name during an upgrade. :return: """ return { "roleCommand":"ACTIONEXECUTE", "hostLevelParams": { "stack_name": "HDP", "stack_version": "2.4", }, "commandParams": { "source_stack": "2.4", "target_stack": "2.5", "upgrade_direction": "upgrade", "version": "2.5.9.9-9999" }, "upgradeSummary": { "services":{ "HDFS":{ "sourceRepositoryId":1, "sourceStackId":"HDP-2.4", "sourceVersion":"2.4.0.0-1234", "targetRepositoryId":2, "targetStackId":"HDP-2.5", "targetVersion":"2.5.9.9-9999" } }, "direction":"UPGRADE", "type":"rolling_upgrade", "isRevert":False, "orchestration":"STANDARD" } } @staticmethod def _get_cluster_simple_upgrade_json(): """ A restart command during an upgrade. :return: """ return { "roleCommand":"ACTIONEXECUTE", "serviceName": "FOO_SERVICE", "role": "FOO_MASTER", "hostLevelParams": { "stack_name": "HDP", "stack_version": "2.4", }, "commandParams": { "source_stack": "2.4", "target_stack": "2.5", "upgrade_direction": "upgrade", "version": "2.5.9.9-9999" }, "upgradeSummary": { "services":{ "HDFS":{ "sourceRepositoryId":1, "sourceStackId":"HDP-2.4", "sourceVersion":"2.4.0.0-1234", "targetRepositoryId":2, "targetStackId":"HDP-2.5", "targetVersion":"2.5.9.9-9999" } }, "direction":"UPGRADE", "type":"rolling_upgrade", "isRevert":False, "orchestration":"STANDARD" } } @staticmethod def _get_stack_packages(): import json return json.dumps( { "HDP": { "stack-select": { "FOO_SERVICE": { "FOO_MASTER": { "STACK-SELECT-PACKAGE": "foo-master", "INSTALL": [ "foo-master", "foo-client" ], "PATCH": [ "foo-master" ], "STANDARD": [ "foo-master", "foo-client" ] } } } } } ) @staticmethod def _get_stack_packages_with_legacy(): import json return json.dumps( { "HDP": { "stack-select": { "FOO_SERVICE": { "FOO_MASTER": { "LEGACY":"foo-legacy", "STACK-SELECT-PACKAGE": "foo-master", "INSTALL": [ "foo-master" ], "PATCH": [ "foo-master" ], "STANDARD": [ "foo-master" ] } } } } } ) @staticmethod def _get_supported_packages(): return ["foo-master", "foo-client"]
apache-2.0
olivergs/lotroassist
src/plugins/moneycount/__init__.py
1
2633
# -*- coding: utf-8 -*- ############################################################################### # (C) 2010 Oliver Gutiérrez <ogutsua@gmail.com> # LOTROAssist Money count plugin ############################################################################### # Python Imports import re # GTK Imports import gtk # EVOGTK imports from evogtk.gui import GUIClass # Plugin class class Plugin(GUIClass): """ LOTROAssist money count plugin class """ metadata={ 'PLUGIN_NAME': 'Money Count', 'PLUGIN_CODENAME': 'lootbag', 'PLUGIN_VERSION': '0.1', 'PLUGIN_DESC': 'Lord Of The Rings Online Assistant plugin for money counting', 'PLUGIN_COPYRIGHT': '(C) 2010 Oliver Gutiérrez <ogutsua@gmail.com>', 'PLUGIN_WEBSITE': 'http://www.evosistemas.com', 'PLUGIN_DOCK': 'status', } def initialize(self): """ Initialization function """ self.regexp=re.compile(r'You( sold \d+ items for | looted | received |\'ve earned |r share was )((?P<gold>\d+) gold coin(s)*( and )*)*((?P<silver>\d+) silver piece(s)*( and )*)*((?P<copper>\d+) copper coin(s)*)*( for quest completion| in the mail)*\.$') # TODO: lotroassist: purchasing # You purchased 43 Travelling Rations for 68 silver pieces and 80 copper coins. def clearMoney(self,widget): """ Clear money """ self.ui.lblCopperCoins=0 self.ui.lblSilverCoins=0 self.ui.lblGoldCoins=0 def newLine(self,line): """ New line analysing function """ # Analyze log line resp=self.regexp.search(line) if resp: # Set values carry=0 copper=resp.group('copper') silver=resp.group('silver') gold=resp.group('gold') if not copper: copper=0 if not silver: silver=0 if not gold: gold=0 # Calculate copper coins val=int(self.ui.lblCopperCoins)+int(copper) if val >= 100: carry=1 val=val%100 self.ui.lblCopperCoins=val # Calculate silver coins silver=int(silver)+carry carry=0 val=int(self.ui.lblSilverCoins)+silver if val >= 1000: carry=1 val=val%1000 self.ui.lblSilverCoins=val # Calculate gold coins gold=int(gold)+carry self.ui.lblGoldCoins=int(self.ui.lblGoldCoins)+gold return True
mit
RevanProdigalKnight/sublimetext-codeformatter
CodeFormatter.py
1
5396
# @author Avtandil Kikabidze # @copyright Copyright (c) 2008-2015, Avtandil Kikabidze aka LONGMAN (akalongman@gmail.com) # @link http://longman.me # @license The MIT License (MIT) import os, sys, sublime, sublime_plugin st_version = 2 if sublime.version() == '' or int(sublime.version()) > 3000: st_version = 3 reloader_name = 'codeformatter.reloader' # ST3 loads each package as a module, so it needs an extra prefix if st_version == 3: reloader_name = 'CodeFormatter.' + reloader_name from imp import reload if reloader_name in sys.modules: reload(sys.modules[reloader_name]) try: # Python 3 from .codeformatter import reloader from .codeformatter.formatter import Formatter except (ValueError): # Python 2 from codeformatter import reloader from codeformatter.formatter import Formatter # fix for ST2 cprint = globals()["__builtins__"]["print"] debug_mode = False def plugin_loaded(): cprint('CodeFormatter: Plugin Initialized') settings = sublime.load_settings('CodeFormatter.sublime-settings') debug_mode = settings.get('codeformatter_debug', False) #if debug_mode: #from pprint import pprint #pprint(settings) #debug_write("Debug mode enabled") #debug_write("Platform "+sublime.platform()+" "+sublime.arch()) #debug_write("Sublime Version "+sublime.version()) #debug_write("Settings "+pprint(settings)) if (sublime.platform() != "windows"): import stat path = sublime.packages_path()+"/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar" st = os.stat(path) os.chmod(path, st.st_mode | stat.S_IEXEC) if st_version == 2: plugin_loaded() class CodeFormatterCommand(sublime_plugin.TextCommand): def run(self, edit, syntax=False, saving=False): if self.view.is_scratch(): return show_error("File is scratch") file_name = self.view.file_name() # if not file_name: # return show_error("File does not exist.") # if not os.path.exists(file_name): # return show_error("File "+file_name+" does not exist.") formatter = Formatter(self.view, file_name, syntax, saving) if not formatter.exists(): if saving: return False return show_error("Formatter for this file type ("+formatter.syntax+") not found.") if (saving and not formatter.formatOnSaveEnabled()): return False file_text = sublime.Region(0, self.view.size()) file_text_utf = self.view.substr(file_text).encode('utf-8') if (len(file_text_utf) == 0): return show_error("No code found.") stdout, stderr = formatter.format(file_text_utf) if len(stderr) == 0 and len(stdout) > 0: self.view.replace(edit, file_text, stdout) else: show_error("Format error:\n"+stderr) class CodeFormatterEventListener(sublime_plugin.EventListener): def on_pre_save(self, view): args = {} args['saving'] = True view.run_command('code_formatter', args) class CodeFormatterShowPhpTransformationsCommand(sublime_plugin.TextCommand): def run(self, edit, syntax=False): import subprocess, re platform = sublime.platform() settings = sublime.load_settings('CodeFormatter.sublime-settings') opts = settings.get('codeformatter_php_options') php_path = "php" if ("php_path" in opts and opts["php_path"]): php_path = opts["php_path"] cmd = [] cmd.append(str(php_path)) cmd.append(sublime.packages_path()+"/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar") cmd.append("--list") stderr = "" stdout = "" try: if (platform == "windows"): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, creationflags=subprocess.SW_HIDE) else: p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() except Exception as e: stderr = str(e) if (not stderr and not stdout): stderr = "Error while gethering list of php transformations" if len(stderr) == 0 and len(stdout) > 0: text = stdout.decode('utf-8') text = re.sub("Usage:.*?PASSNAME", "Available PHP Tranformations:", text) window = self.view.window() pt = window.get_output_panel("paneltranformations") pt.set_read_only(False) pt.insert(edit, pt.size(), text) window.run_command("show_panel", {"panel": "output.paneltranformations"}) else: show_error("Formatter error:\n"+stderr) def console_write(text, prefix=False): if prefix: sys.stdout.write('CodeFormatter: ') sys.stdout.write(text+"\n") def debug_write(text, prefix=False): console_write(text, True) def show_error(text): sublime.error_message(u'CodeFormatter\n\n%s' % text)
mit
samhodgkinson/azure-quickstart-templates
splunk-on-ubuntu/scripts/dorestore.py
104
2089
#      The MIT License (MIT) # #      Copyright (c) 2016 Microsoft. All rights reserved. # #      Permission is hereby granted, free of charge, to any person obtaining a copy #      of this software and associated documentation files (the "Software"), to deal #      in the Software without restriction, including without limitation the rights #      to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #      copies of the Software, and to permit persons to whom the Software is #      furnished to do so, subject to the following conditions: # #      The above copyright notice and this permission notice shall be included in #      all copies or substantial portions of the Software. # #      THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #      IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #      FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #      AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #      LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #      OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #      THE SOFTWARE. from azure.storage import CloudStorageAccount import config, time account_name = config.STORAGE_ACCOUNT_NAME account_key = config.STORAGE_ACCOUNT_KEY account = CloudStorageAccount(account_name = account_name, account_key = account_key) service = account.create_block_blob_service() # The last time a backup was dropped into the folder, it was named 'splunketccfg.tar'. # This is (almost) always the one to restore. container_name = 'backups' restore_file_name = 'splunketccfg.tar' OUTPUT_FILE = 'splunketccfg.tar' exists = service.exists(container_name, restore_file_name) if exists: service.get_blob_to_path(container_name, restore_file_name, OUTPUT_FILE) else: print('Backup file does not exist')
mit
jeid64/dcos
packages/adminrouter/extra/src/test-harness/tests/open/test_master.py
3
2954
# Copyright (C) Mesosphere, Inc. See LICENSE file for details. import logging import pytest from generic_test_code.common import ( assert_endpoint_response, generic_correct_upstream_dest_test, ) from generic_test_code.open import assert_iam_queried_for_uid from util import SearchCriteria, iam_denies_all_requests log = logging.getLogger(__name__) authed_endpoints = [ '/acs/api/v1/foo/bar', '/capabilities', '/cosmos/service/foo/bar', '/dcos-history-service/foo/bar', '/exhibitor/foo/bar', '/marathon/v2/apps', '/mesos/master/state-summary', '/mesos_dns/foo/bar', '/metadata', '/navstar/lashup/key', '/package/foo/bar', '/pkgpanda/foo/bar', '/pkgpanda/active.buildinfo.full.json', '/service/nginx-alwaysthere/foo/bar', '/service/nginx-alwaysthere/foo/bar', '/slave/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1', '/system/health/v1/foo/bar', '/system/v1/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1/logs/v1/foo/bar', '/system/v1/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1/metrics/v0/foo/bar', '/system/v1/leader/marathon/foo/bar', '/system/v1/leader/mesos/foo/bar', '/system/v1/logs/v1/foo/bar', '/system/v1/metrics/foo/bar', ] class TestAuthEnforcementOpen: @pytest.mark.parametrize("path", authed_endpoints) def test_if_unknown_user_is_forbidden_access( self, mocker, master_ar_process, path, valid_user_header): log_messages = { 'User not found: `bozydar`': SearchCriteria(1, True)} with iam_denies_all_requests(mocker): with assert_iam_queried_for_uid(mocker, 'bozydar'): assert_endpoint_response( master_ar_process, path, 401, headers=valid_user_header, assert_stderr=log_messages) @pytest.mark.parametrize("path", authed_endpoints) def test_if_known_user_is_permitted_access( self, mocker, master_ar_process, path, valid_user_header): is_auth_location = path.startswith("/acs/api/v1") with assert_iam_queried_for_uid( mocker, 'bozydar', expect_two_iam_calls=is_auth_location): assert_endpoint_response( master_ar_process, path, 200, headers=valid_user_header, ) class TestHealthEndpointOpen: def test_if_request_is_sent_to_correct_upstream(self, master_ar_process, valid_user_header): generic_correct_upstream_dest_test(master_ar_process, valid_user_header, '/system/health/v1/foo/bar', 'http://127.0.0.1:1050', )
apache-2.0
robertwb/incubator-beam
sdks/python/apache_beam/runners/interactive/pipeline_fragment_test.py
5
5445
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for apache_beam.runners.interactive.pipeline_fragment.""" import unittest from unittest.mock import patch import apache_beam as beam from apache_beam.options.pipeline_options import StandardOptions from apache_beam.runners.interactive import interactive_beam as ib from apache_beam.runners.interactive import interactive_environment as ie from apache_beam.runners.interactive import interactive_runner as ir from apache_beam.runners.interactive import pipeline_fragment as pf from apache_beam.runners.interactive.testing.mock_ipython import mock_get_ipython from apache_beam.runners.interactive.testing.pipeline_assertion import assert_pipeline_equal from apache_beam.runners.interactive.testing.pipeline_assertion import assert_pipeline_proto_equal from apache_beam.testing.test_stream import TestStream @unittest.skipIf( not ie.current_env().is_interactive_ready, '[interactive] dependency is not installed.') class PipelineFragmentTest(unittest.TestCase): def setUp(self): # Assume a notebook frontend is connected to the mocked ipython kernel. ie.current_env()._is_in_ipython = True ie.current_env()._is_in_notebook = True @patch('IPython.get_ipython', new_callable=mock_get_ipython) def test_build_pipeline_fragment(self, cell): with cell: # Cell 1 p = beam.Pipeline(ir.InteractiveRunner()) p_expected = beam.Pipeline(ir.InteractiveRunner()) # Watch local scope now to allow interactive beam to track the pipelines. ib.watch(locals()) with cell: # Cell 2 # pylint: disable=range-builtin-not-iterating init = p | 'Init' >> beam.Create(range(10)) init_expected = p_expected | 'Init' >> beam.Create(range(10)) with cell: # Cell 3 square = init | 'Square' >> beam.Map(lambda x: x * x) _ = init | 'Cube' >> beam.Map(lambda x: x**3) _ = init_expected | 'Square' >> beam.Map(lambda x: x * x) # Watch every PCollection has been defined so far in local scope. ib.watch(locals()) fragment = pf.PipelineFragment([square]).deduce_fragment() assert_pipeline_equal(self, p_expected, fragment) @patch('IPython.get_ipython', new_callable=mock_get_ipython) def test_user_pipeline_intact_after_deducing_pipeline_fragment(self, cell): with cell: # Cell 1 p = beam.Pipeline(ir.InteractiveRunner()) # Watch the pipeline `p` immediately without calling locals(). ib.watch({'p': p}) with cell: # Cell 2 # pylint: disable=range-builtin-not-iterating init = p | 'Init' >> beam.Create(range(10)) with cell: # Cell 3 square = init | 'Square' >> beam.Map(lambda x: x * x) with cell: # Cell 4 cube = init | 'Cube' >> beam.Map(lambda x: x**3) # Watch every PCollection has been defined so far in local scope without # calling locals(). ib.watch({'init': init, 'square': square, 'cube': cube}) user_pipeline_proto_before_deducing_fragment = p.to_runner_api( return_context=False) _ = pf.PipelineFragment([square]).deduce_fragment() user_pipeline_proto_after_deducing_fragment = p.to_runner_api( return_context=False) assert_pipeline_proto_equal( self, user_pipeline_proto_before_deducing_fragment, user_pipeline_proto_after_deducing_fragment) @patch('IPython.get_ipython', new_callable=mock_get_ipython) def test_pipeline_fragment_produces_correct_data(self, cell): with cell: # Cell 1 p = beam.Pipeline(ir.InteractiveRunner()) ib.watch({'p': p}) with cell: # Cell 2 # pylint: disable=range-builtin-not-iterating init = p | 'Init' >> beam.Create(range(5)) with cell: # Cell 3 square = init | 'Square' >> beam.Map(lambda x: x * x) _ = init | 'Cube' >> beam.Map(lambda x: x**3) ib.watch(locals()) result = pf.PipelineFragment([square]).run() self.assertEqual([0, 1, 4, 9, 16], list(result.get(square))) def test_fragment_does_not_prune_teststream(self): """Tests that the fragment does not prune the TestStream composite parts. """ options = StandardOptions(streaming=True) p = beam.Pipeline(ir.InteractiveRunner(), options) test_stream = p | TestStream(output_tags=['a', 'b']) # pylint: disable=unused-variable a = test_stream['a'] | 'a' >> beam.Map(lambda _: _) b = test_stream['b'] | 'b' >> beam.Map(lambda _: _) fragment = pf.PipelineFragment([b]).deduce_fragment() # If the fragment does prune the TestStreawm composite parts, then the # resulting graph is invalid and the following call will raise an exception. fragment.to_runner_api() if __name__ == '__main__': unittest.main()
apache-2.0
Kazade/NeHe-Website
google_appengine/lib/django_1_2/django/contrib/localflavor/cl/forms.py
201
3196
""" Chile specific form helpers. """ from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import RegexField, Select from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_unicode class CLRegionSelect(Select): """ A Select widget that uses a list of Chilean Regions (Regiones) as its choices. """ def __init__(self, attrs=None): from cl_regions import REGION_CHOICES super(CLRegionSelect, self).__init__(attrs, choices=REGION_CHOICES) class CLRutField(RegexField): """ Chilean "Rol Unico Tributario" (RUT) field. This is the Chilean national identification number. Samples for testing are available from https://palena.sii.cl/cvc/dte/ee_empresas_emisoras.html """ default_error_messages = { 'invalid': _('Enter a valid Chilean RUT.'), 'strict': _('Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.'), 'checksum': _('The Chilean RUT is not valid.'), } def __init__(self, *args, **kwargs): if 'strict' in kwargs: del kwargs['strict'] super(CLRutField, self).__init__(r'^(\d{1,2}\.)?\d{3}\.\d{3}-[\dkK]$', error_message=self.default_error_messages['strict'], *args, **kwargs) else: # In non-strict mode, accept RUTs that validate but do not exist in # the real world. super(CLRutField, self).__init__(r'^[\d\.]{1,11}-?[\dkK]$', *args, **kwargs) def clean(self, value): """ Check and clean the Chilean RUT. """ super(CLRutField, self).clean(value) if value in EMPTY_VALUES: return u'' rut, verificador = self._canonify(value) if self._algorithm(rut) == verificador: return self._format(rut, verificador) else: raise ValidationError(self.error_messages['checksum']) def _algorithm(self, rut): """ Takes RUT in pure canonical form, calculates the verifier digit. """ suma = 0 multi = 2 for r in rut[::-1]: suma += int(r) * multi multi += 1 if multi == 8: multi = 2 return u'0123456789K0'[11 - suma % 11] def _canonify(self, rut): """ Turns the RUT into one normalized format. Returns a (rut, verifier) tuple. """ rut = smart_unicode(rut).replace(' ', '').replace('.', '').replace('-', '') return rut[:-1], rut[-1].upper() def _format(self, code, verifier=None): """ Formats the RUT from canonical form to the common string representation. If verifier=None, then the last digit in 'code' is the verifier. """ if verifier is None: verifier = code[-1] code = code[:-1] while len(code) > 3 and '.' not in code[:3]: pos = code.find('.') if pos == -1: new_dot = -3 else: new_dot = pos - 3 code = code[:new_dot] + '.' + code[new_dot:] return u'%s-%s' % (code, verifier)
bsd-3-clause
abhikumar22/MYBLOG
blg/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py
510
1413
from __future__ import absolute_import, division, unicode_literals from xml.dom import Node from . import base class TreeWalker(base.NonRecursiveTreeWalker): def getNodeDetails(self, node): if node.nodeType == Node.DOCUMENT_TYPE_NODE: return base.DOCTYPE, node.name, node.publicId, node.systemId elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): return base.TEXT, node.nodeValue elif node.nodeType == Node.ELEMENT_NODE: attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) if attr.namespaceURI: attrs[(attr.namespaceURI, attr.localName)] = attr.value else: attrs[(None, attr.name)] = attr.value return (base.ELEMENT, node.namespaceURI, node.nodeName, attrs, node.hasChildNodes()) elif node.nodeType == Node.COMMENT_NODE: return base.COMMENT, node.nodeValue elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): return (base.DOCUMENT,) else: return base.UNKNOWN, node.nodeType def getFirstChild(self, node): return node.firstChild def getNextSibling(self, node): return node.nextSibling def getParentNode(self, node): return node.parentNode
gpl-3.0
danifss/sar-1516-proj-g1
rendezvous/core/models.py
1
2172
from __future__ import unicode_literals from django.db import models ### USER class User(models.Model): userID = models.AutoField(primary_key=True) username = models.CharField(max_length=100, unique=True) email = models.CharField(max_length=150, unique=True) password = models.CharField(max_length=100) userSalt = models.CharField(max_length=64, blank=True) firstName = models.CharField(max_length=100) lastName = models.CharField(max_length=100) createdOn = models.DateTimeField(auto_now_add=True) def __unicode__(self): return u'ID {0} - {1} - ({2}/{3}/{4})'.format( self.userID, self.username, self.createdOn.day, self.createdOn.month, self.createdOn.year ) ### SERVICE (Available services in this network) class Service(models.Model): serviceID = models.AutoField(primary_key=True) name = models.CharField(max_length=200, unique=True) nickname = models.CharField(max_length=50, unique=True) description = models.CharField(max_length=250, blank=True) ip = models.CharField(max_length=15, unique=True) port = models.IntegerField() createdOn = models.DateTimeField(auto_now_add=True) def __unicode__(self): return u'ID {0} - {1} - ({2}/{3}/{4})'.format( self.serviceID, self.name, self.createdOn.day, self.createdOn.month, self.createdOn.year ) ### BROKER (Others broker PSW on the network) # class Broker(models.Model): # brokerID = models.AutoField(primary_key=True) # # user = models.ForeignKey(User) # name = models.CharField(max_length=50, unique=True) # ip = models.CharField(max_length=15, unique=True) # port = models.IntegerField() # description = models.CharField(max_length=250, blank=True) # createdOn = models.DateTimeField(auto_now_add=True) # # def __unicode__(self): # return u'ID {0} - {1} - ({2}/{3}/{4})'.format( # self.brokerID, # self.name, # self.createdOn.day, # self.createdOn.month, # self.createdOn.year # )
mit
jagguli/intellij-community
python/lib/Lib/Cookie.py
91
25261
#!/usr/bin/env python # #### # Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu> # # All Rights Reserved # # Permission to use, copy, modify, and distribute this software # and its documentation for any purpose and without fee is hereby # granted, provided that the above copyright notice appear in all # copies and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Timothy O'Malley not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS # SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR # ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # #### # # Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp # by Timothy O'Malley <timo@alum.mit.edu> # # Cookie.py is a Python module for the handling of HTTP # cookies as a Python dictionary. See RFC 2109 for more # information on cookies. # # The original idea to treat Cookies as a dictionary came from # Dave Mitchell (davem@magnet.com) in 1995, when he released the # first version of nscookie.py. # #### r""" Here's a sample session to show how to use this module. At the moment, this is the only documentation. The Basics ---------- Importing is easy.. >>> import Cookie Most of the time you start by creating a cookie. Cookies come in three flavors, each with slightly different encoding semantics, but more on that later. >>> C = Cookie.SimpleCookie() >>> C = Cookie.SerialCookie() >>> C = Cookie.SmartCookie() [Note: Long-time users of Cookie.py will remember using Cookie.Cookie() to create an Cookie object. Although deprecated, it is still supported by the code. See the Backward Compatibility notes for more information.] Once you've created your Cookie, you can add values just as if it were a dictionary. >>> C = Cookie.SmartCookie() >>> C["fig"] = "newton" >>> C["sugar"] = "wafer" >>> C.output() 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer' Notice that the printable representation of a Cookie is the appropriate format for a Set-Cookie: header. This is the default behavior. You can change the header and printed attributes by using the .output() function >>> C = Cookie.SmartCookie() >>> C["rocky"] = "road" >>> C["rocky"]["path"] = "/cookie" >>> print C.output(header="Cookie:") Cookie: rocky=road; Path=/cookie >>> print C.output(attrs=[], header="Cookie:") Cookie: rocky=road The load() method of a Cookie extracts cookies from a string. In a CGI script, you would use this method to extract the cookies from the HTTP_COOKIE environment variable. >>> C = Cookie.SmartCookie() >>> C.load("chips=ahoy; vienna=finger") >>> C.output() 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger' The load() method is darn-tootin smart about identifying cookies within a string. Escaped quotation marks, nested semicolons, and other such trickeries do not confuse it. >>> C = Cookie.SmartCookie() >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";') >>> print C Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;" Each element of the Cookie also supports all of the RFC 2109 Cookie attributes. Here's an example which sets the Path attribute. >>> C = Cookie.SmartCookie() >>> C["oreo"] = "doublestuff" >>> C["oreo"]["path"] = "/" >>> print C Set-Cookie: oreo=doublestuff; Path=/ Each dictionary element has a 'value' attribute, which gives you back the value associated with the key. >>> C = Cookie.SmartCookie() >>> C["twix"] = "none for you" >>> C["twix"].value 'none for you' A Bit More Advanced ------------------- As mentioned before, there are three different flavors of Cookie objects, each with different encoding/decoding semantics. This section briefly discusses the differences. SimpleCookie The SimpleCookie expects that all values should be standard strings. Just to be sure, SimpleCookie invokes the str() builtin to convert the value to a string, when the values are set dictionary-style. >>> C = Cookie.SimpleCookie() >>> C["number"] = 7 >>> C["string"] = "seven" >>> C["number"].value '7' >>> C["string"].value 'seven' >>> C.output() 'Set-Cookie: number=7\r\nSet-Cookie: string=seven' SerialCookie The SerialCookie expects that all values should be serialized using cPickle (or pickle, if cPickle isn't available). As a result of serializing, SerialCookie can save almost any Python object to a value, and recover the exact same object when the cookie has been returned. (SerialCookie can yield some strange-looking cookie values, however.) >>> C = Cookie.SerialCookie() >>> C["number"] = 7 >>> C["string"] = "seven" >>> C["number"].value 7 >>> C["string"].value 'seven' >>> C.output() 'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."' Be warned, however, if SerialCookie cannot de-serialize a value (because it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION. SmartCookie The SmartCookie combines aspects of each of the other two flavors. When setting a value in a dictionary-fashion, the SmartCookie will serialize (ala cPickle) the value *if and only if* it isn't a Python string. String objects are *not* serialized. Similarly, when the load() method parses out values, it attempts to de-serialize the value. If it fails, then it fallsback to treating the value as a string. >>> C = Cookie.SmartCookie() >>> C["number"] = 7 >>> C["string"] = "seven" >>> C["number"].value 7 >>> C["string"].value 'seven' >>> C.output() 'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven' Backwards Compatibility ----------------------- In order to keep compatibilty with earlier versions of Cookie.py, it is still possible to use Cookie.Cookie() to create a Cookie. In fact, this simply returns a SmartCookie. >>> C = Cookie.Cookie() >>> print C.__class__.__name__ SmartCookie Finis. """ #" # ^ # |----helps out font-lock # # Import our required modules # import string try: from cPickle import dumps, loads except ImportError: from pickle import dumps, loads import re, warnings __all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie", "SmartCookie","Cookie"] _nulljoin = ''.join _semispacejoin = '; '.join _spacejoin = ' '.join # # Define an exception visible to External modules # class CookieError(Exception): pass # These quoting routines conform to the RFC2109 specification, which in # turn references the character definitions from RFC2068. They provide # a two-way quoting algorithm. Any non-text character is translated # into a 4 character sequence: a forward-slash followed by the # three-digit octal equivalent of the character. Any '\' or '"' is # quoted with a preceeding '\' slash. # # These are taken from RFC2068 and RFC2109. # _LegalChars is the list of chars which don't require "'s # _Translator hash-table for fast quoting # _LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~" _Translator = { '\000' : '\\000', '\001' : '\\001', '\002' : '\\002', '\003' : '\\003', '\004' : '\\004', '\005' : '\\005', '\006' : '\\006', '\007' : '\\007', '\010' : '\\010', '\011' : '\\011', '\012' : '\\012', '\013' : '\\013', '\014' : '\\014', '\015' : '\\015', '\016' : '\\016', '\017' : '\\017', '\020' : '\\020', '\021' : '\\021', '\022' : '\\022', '\023' : '\\023', '\024' : '\\024', '\025' : '\\025', '\026' : '\\026', '\027' : '\\027', '\030' : '\\030', '\031' : '\\031', '\032' : '\\032', '\033' : '\\033', '\034' : '\\034', '\035' : '\\035', '\036' : '\\036', '\037' : '\\037', '"' : '\\"', '\\' : '\\\\', '\177' : '\\177', '\200' : '\\200', '\201' : '\\201', '\202' : '\\202', '\203' : '\\203', '\204' : '\\204', '\205' : '\\205', '\206' : '\\206', '\207' : '\\207', '\210' : '\\210', '\211' : '\\211', '\212' : '\\212', '\213' : '\\213', '\214' : '\\214', '\215' : '\\215', '\216' : '\\216', '\217' : '\\217', '\220' : '\\220', '\221' : '\\221', '\222' : '\\222', '\223' : '\\223', '\224' : '\\224', '\225' : '\\225', '\226' : '\\226', '\227' : '\\227', '\230' : '\\230', '\231' : '\\231', '\232' : '\\232', '\233' : '\\233', '\234' : '\\234', '\235' : '\\235', '\236' : '\\236', '\237' : '\\237', '\240' : '\\240', '\241' : '\\241', '\242' : '\\242', '\243' : '\\243', '\244' : '\\244', '\245' : '\\245', '\246' : '\\246', '\247' : '\\247', '\250' : '\\250', '\251' : '\\251', '\252' : '\\252', '\253' : '\\253', '\254' : '\\254', '\255' : '\\255', '\256' : '\\256', '\257' : '\\257', '\260' : '\\260', '\261' : '\\261', '\262' : '\\262', '\263' : '\\263', '\264' : '\\264', '\265' : '\\265', '\266' : '\\266', '\267' : '\\267', '\270' : '\\270', '\271' : '\\271', '\272' : '\\272', '\273' : '\\273', '\274' : '\\274', '\275' : '\\275', '\276' : '\\276', '\277' : '\\277', '\300' : '\\300', '\301' : '\\301', '\302' : '\\302', '\303' : '\\303', '\304' : '\\304', '\305' : '\\305', '\306' : '\\306', '\307' : '\\307', '\310' : '\\310', '\311' : '\\311', '\312' : '\\312', '\313' : '\\313', '\314' : '\\314', '\315' : '\\315', '\316' : '\\316', '\317' : '\\317', '\320' : '\\320', '\321' : '\\321', '\322' : '\\322', '\323' : '\\323', '\324' : '\\324', '\325' : '\\325', '\326' : '\\326', '\327' : '\\327', '\330' : '\\330', '\331' : '\\331', '\332' : '\\332', '\333' : '\\333', '\334' : '\\334', '\335' : '\\335', '\336' : '\\336', '\337' : '\\337', '\340' : '\\340', '\341' : '\\341', '\342' : '\\342', '\343' : '\\343', '\344' : '\\344', '\345' : '\\345', '\346' : '\\346', '\347' : '\\347', '\350' : '\\350', '\351' : '\\351', '\352' : '\\352', '\353' : '\\353', '\354' : '\\354', '\355' : '\\355', '\356' : '\\356', '\357' : '\\357', '\360' : '\\360', '\361' : '\\361', '\362' : '\\362', '\363' : '\\363', '\364' : '\\364', '\365' : '\\365', '\366' : '\\366', '\367' : '\\367', '\370' : '\\370', '\371' : '\\371', '\372' : '\\372', '\373' : '\\373', '\374' : '\\374', '\375' : '\\375', '\376' : '\\376', '\377' : '\\377' } _idmap = ''.join(chr(x) for x in xrange(256)) def _quote(str, LegalChars=_LegalChars, idmap=_idmap, translate=string.translate): # # If the string does not need to be double-quoted, # then just return the string. Otherwise, surround # the string in doublequotes and precede quote (with a \) # special characters. # if "" == translate(str, idmap, LegalChars): return str else: return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"' # end _quote _OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") _QuotePatt = re.compile(r"[\\].") def _unquote(str): # If there aren't any doublequotes, # then there can't be any special characters. See RFC 2109. if len(str) < 2: return str if str[0] != '"' or str[-1] != '"': return str # We have to assume that we must decode this string. # Down to work. # Remove the "s str = str[1:-1] # Check for special sequences. Examples: # \012 --> \n # \" --> " # i = 0 n = len(str) res = [] while 0 <= i < n: Omatch = _OctalPatt.search(str, i) Qmatch = _QuotePatt.search(str, i) if not Omatch and not Qmatch: # Neither matched res.append(str[i:]) break # else: j = k = -1 if Omatch: j = Omatch.start(0) if Qmatch: k = Qmatch.start(0) if Qmatch and ( not Omatch or k < j ): # QuotePatt matched res.append(str[i:k]) res.append(str[k+1]) i = k+2 else: # OctalPatt matched res.append(str[i:j]) res.append( chr( int(str[j+1:j+4], 8) ) ) i = j+4 return _nulljoin(res) # end _unquote # The _getdate() routine is used to set the expiration time in # the cookie's HTTP header. By default, _getdate() returns the # current time in the appropriate "expires" format for a # Set-Cookie header. The one optional argument is an offset from # now, in seconds. For example, an offset of -3600 means "one hour ago". # The offset may be a floating point number. # _weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] _monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname): from time import gmtime, time now = time() year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \ (weekdayname[wd], day, monthname[month], year, hh, mm, ss) # # A class to hold ONE key,value pair. # In a cookie, each such pair may have several attributes. # so this class is used to keep the attributes associated # with the appropriate key,value pair. # This class also includes a coded_value attribute, which # is used to hold the network representation of the # value. This is most useful when Python objects are # pickled for network transit. # class Morsel(dict): # RFC 2109 lists these attributes as reserved: # path comment domain # max-age secure version # # For historical reasons, these attributes are also reserved: # expires # # This dictionary provides a mapping from the lowercase # variant on the left to the appropriate traditional # formatting on the right. _reserved = { "expires" : "expires", "path" : "Path", "comment" : "Comment", "domain" : "Domain", "max-age" : "Max-Age", "secure" : "secure", "version" : "Version", } def __init__(self): # Set defaults self.key = self.value = self.coded_value = None # Set default attributes for K in self._reserved: dict.__setitem__(self, K, "") # end __init__ def __setitem__(self, K, V): K = K.lower() if not K in self._reserved: raise CookieError("Invalid Attribute %s" % K) dict.__setitem__(self, K, V) # end __setitem__ def isReservedKey(self, K): return K.lower() in self._reserved # end isReservedKey def set(self, key, val, coded_val, LegalChars=_LegalChars, idmap=_idmap, translate=string.translate): # First we verify that the key isn't a reserved word # Second we make sure it only contains legal characters if key.lower() in self._reserved: raise CookieError("Attempt to set a reserved key: %s" % key) if "" != translate(key, idmap, LegalChars): raise CookieError("Illegal key value: %s" % key) # It's a good key, so save it. self.key = key self.value = val self.coded_value = coded_val # end set def output(self, attrs=None, header = "Set-Cookie:"): return "%s %s" % ( header, self.OutputString(attrs) ) __str__ = output def __repr__(self): return '<%s: %s=%s>' % (self.__class__.__name__, self.key, repr(self.value) ) def js_output(self, attrs=None): # Print javascript return """ <script type="text/javascript"> <!-- begin hiding document.cookie = \"%s\"; // end hiding --> </script> """ % ( self.OutputString(attrs), ) # end js_output() def OutputString(self, attrs=None): # Build up our result # result = [] RA = result.append # First, the key=value pair RA("%s=%s" % (self.key, self.coded_value)) # Now add any defined attributes if attrs is None: attrs = self._reserved items = self.items() items.sort() for K,V in items: if V == "": continue if K not in attrs: continue if K == "expires" and type(V) == type(1): RA("%s=%s" % (self._reserved[K], _getdate(V))) elif K == "max-age" and type(V) == type(1): RA("%s=%d" % (self._reserved[K], V)) elif K == "secure": RA(str(self._reserved[K])) else: RA("%s=%s" % (self._reserved[K], V)) # Return the result return _semispacejoin(result) # end OutputString # end Morsel class # # Pattern for finding cookie # # This used to be strict parsing based on the RFC2109 and RFC2068 # specifications. I have since discovered that MSIE 3.0x doesn't # follow the character rules outlined in those specs. As a # result, the parsing rules here are less strict. # _LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _CookiePattern = re.compile( r"(?x)" # This is a Verbose pattern r"(?P<key>" # Start of group 'key' ""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy r")" # End of group 'key' r"\s*=\s*" # Equal Sign r"(?P<val>" # Start of group 'val' r'"(?:[^\\"]|\\.)*"' # Any doublequoted string r"|" # or ""+ _LegalCharsPatt +"*" # Any word or empty string r")" # End of group 'val' r"\s*;?" # Probably ending in a semi-colon ) # At long last, here is the cookie class. # Using this class is almost just like using a dictionary. # See this module's docstring for example usage. # class BaseCookie(dict): # A container class for a set of Morsels # def value_decode(self, val): """real_value, coded_value = value_decode(STRING) Called prior to setting a cookie's value from the network representation. The VALUE is the value read from HTTP header. Override this function to modify the behavior of cookies. """ return val, val # end value_encode def value_encode(self, val): """real_value, coded_value = value_encode(VALUE) Called prior to setting a cookie's value from the dictionary representation. The VALUE is the value being assigned. Override this function to modify the behavior of cookies. """ strval = str(val) return strval, strval # end value_encode def __init__(self, input=None): if input: self.load(input) # end __init__ def __set(self, key, real_value, coded_value): """Private method for setting a cookie's value""" M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) # end __set def __setitem__(self, key, value): """Dictionary style assignment.""" rval, cval = self.value_encode(value) self.__set(key, rval, cval) # end __setitem__ def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): """Return a string suitable for HTTP.""" result = [] items = self.items() items.sort() for K,V in items: result.append( V.output(attrs, header) ) return sep.join(result) # end output __str__ = output def __repr__(self): L = [] items = self.items() items.sort() for K,V in items: L.append( '%s=%s' % (K,repr(V.value) ) ) return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L)) def js_output(self, attrs=None): """Return a string suitable for JavaScript.""" result = [] items = self.items() items.sort() for K,V in items: result.append( V.js_output(attrs) ) return _nulljoin(result) # end js_output def load(self, rawdata): """Load cookies from a string (presumably HTTP_COOKIE) or from a dictionary. Loading cookies from a dictionary 'd' is equivalent to calling: map(Cookie.__setitem__, d.keys(), d.values()) """ if type(rawdata) == type(""): self.__ParseString(rawdata) else: self.update(rawdata) return # end load() def __ParseString(self, str, patt=_CookiePattern): i = 0 # Our starting point n = len(str) # Length of string M = None # current morsel while 0 <= i < n: # Start looking for a cookie match = patt.search(str, i) if not match: break # No more cookies K,V = match.group("key"), match.group("val") i = match.end(0) # Parse the key, value in case it's metainfo if K[0] == "$": # We ignore attributes which pertain to the cookie # mechanism as a whole. See RFC 2109. # (Does anyone care?) if M: M[ K[1:] ] = V elif K.lower() in Morsel._reserved: if M: M[ K ] = _unquote(V) else: rval, cval = self.value_decode(V) self.__set(K, rval, cval) M = self[K] # end __ParseString # end BaseCookie class class SimpleCookie(BaseCookie): """SimpleCookie SimpleCookie supports strings as cookie values. When setting the value using the dictionary assignment notation, SimpleCookie calls the builtin str() to convert the value to a string. Values received from HTTP are kept as strings. """ def value_decode(self, val): return _unquote( val ), val def value_encode(self, val): strval = str(val) return strval, _quote( strval ) # end SimpleCookie class SerialCookie(BaseCookie): """SerialCookie SerialCookie supports arbitrary objects as cookie values. All values are serialized (using cPickle) before being sent to the client. All incoming values are assumed to be valid Pickle representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE FORMAT, THEN AN EXCEPTION WILL BE RAISED. Note: Large cookie values add overhead because they must be retransmitted on every HTTP transaction. Note: HTTP has a 2k limit on the size of a cookie. This class does not check for this limit, so be careful!!! """ def __init__(self, input=None): warnings.warn("SerialCookie class is insecure; do not use it", DeprecationWarning) BaseCookie.__init__(self, input) # end __init__ def value_decode(self, val): # This could raise an exception! return loads( _unquote(val) ), val def value_encode(self, val): return val, _quote( dumps(val) ) # end SerialCookie class SmartCookie(BaseCookie): """SmartCookie SmartCookie supports arbitrary objects as cookie values. If the object is a string, then it is quoted. If the object is not a string, however, then SmartCookie will use cPickle to serialize the object into a string representation. Note: Large cookie values add overhead because they must be retransmitted on every HTTP transaction. Note: HTTP has a 2k limit on the size of a cookie. This class does not check for this limit, so be careful!!! """ def __init__(self, input=None): warnings.warn("Cookie/SmartCookie class is insecure; do not use it", DeprecationWarning) BaseCookie.__init__(self, input) # end __init__ def value_decode(self, val): strval = _unquote(val) try: return loads(strval), val except: return strval, val def value_encode(self, val): if type(val) == type(""): return val, _quote(val) else: return val, _quote( dumps(val) ) # end SmartCookie ########################################################### # Backwards Compatibility: Don't break any existing code! # We provide Cookie() as an alias for SmartCookie() Cookie = SmartCookie # ########################################################### def _test(): import doctest, Cookie return doctest.testmod(Cookie) if __name__ == "__main__": _test() #Local Variables: #tab-width: 4 #end:
apache-2.0
mcltn/ansible
lib/ansible/playbook/handler.py
237
1957
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleError #from ansible.inventory.host import Host from ansible.playbook.task import Task class Handler(Task): def __init__(self, block=None, role=None, task_include=None): self._flagged_hosts = [] super(Handler, self).__init__(block=block, role=role, task_include=task_include) def __repr__(self): ''' returns a human readable representation of the handler ''' return "HANDLER: %s" % self.get_name() @staticmethod def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None): t = Handler(block=block, role=role, task_include=task_include) return t.load_data(data, variable_manager=variable_manager, loader=loader) def flag_for_host(self, host): #assert instanceof(host, Host) if host not in self._flagged_hosts: self._flagged_hosts.append(host) def has_triggered(self, host): return host in self._flagged_hosts def serialize(self): result = super(Handler, self).serialize() result['is_handler'] = True return result
gpl-3.0
Zord13appdesa/python-for-android
python-build/python-libs/gdata/src/gdata/tlslite/HandshakeSettings.py
359
6364
"""Class for setting handshake parameters.""" from constants import CertificateType from utils import cryptomath from utils import cipherfactory class HandshakeSettings: """This class encapsulates various parameters that can be used with a TLS handshake. @sort: minKeySize, maxKeySize, cipherNames, certificateTypes, minVersion, maxVersion @type minKeySize: int @ivar minKeySize: The minimum bit length for asymmetric keys. If the other party tries to use SRP, RSA, or Diffie-Hellman parameters smaller than this length, an alert will be signalled. The default is 1023. @type maxKeySize: int @ivar maxKeySize: The maximum bit length for asymmetric keys. If the other party tries to use SRP, RSA, or Diffie-Hellman parameters larger than this length, an alert will be signalled. The default is 8193. @type cipherNames: list @ivar cipherNames: The allowed ciphers, in order of preference. The allowed values in this list are 'aes256', 'aes128', '3des', and 'rc4'. If these settings are used with a client handshake, they determine the order of the ciphersuites offered in the ClientHello message. If these settings are used with a server handshake, the server will choose whichever ciphersuite matches the earliest entry in this list. NOTE: If '3des' is used in this list, but TLS Lite can't find an add-on library that supports 3DES, then '3des' will be silently removed. The default value is ['aes256', 'aes128', '3des', 'rc4']. @type certificateTypes: list @ivar certificateTypes: The allowed certificate types, in order of preference. The allowed values in this list are 'x509' and 'cryptoID'. This list is only used with a client handshake. The client will advertise to the server which certificate types are supported, and will check that the server uses one of the appropriate types. NOTE: If 'cryptoID' is used in this list, but cryptoIDlib is not installed, then 'cryptoID' will be silently removed. @type minVersion: tuple @ivar minVersion: The minimum allowed SSL/TLS version. This variable can be set to (3,0) for SSL 3.0, (3,1) for TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to use a lower version, a protocol_version alert will be signalled. The default is (3,0). @type maxVersion: tuple @ivar maxVersion: The maximum allowed SSL/TLS version. This variable can be set to (3,0) for SSL 3.0, (3,1) for TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to use a higher version, a protocol_version alert will be signalled. The default is (3,2). (WARNING: Some servers may (improperly) reject clients which offer support for TLS 1.1. In this case, try lowering maxVersion to (3,1)). """ def __init__(self): self.minKeySize = 1023 self.maxKeySize = 8193 self.cipherNames = ["aes256", "aes128", "3des", "rc4"] self.cipherImplementations = ["cryptlib", "openssl", "pycrypto", "python"] self.certificateTypes = ["x509", "cryptoID"] self.minVersion = (3,0) self.maxVersion = (3,2) #Filters out options that are not supported def _filter(self): other = HandshakeSettings() other.minKeySize = self.minKeySize other.maxKeySize = self.maxKeySize other.cipherNames = self.cipherNames other.cipherImplementations = self.cipherImplementations other.certificateTypes = self.certificateTypes other.minVersion = self.minVersion other.maxVersion = self.maxVersion if not cipherfactory.tripleDESPresent: other.cipherNames = [e for e in self.cipherNames if e != "3des"] if len(other.cipherNames)==0: raise ValueError("No supported ciphers") try: import cryptoIDlib except ImportError: other.certificateTypes = [e for e in self.certificateTypes \ if e != "cryptoID"] if len(other.certificateTypes)==0: raise ValueError("No supported certificate types") if not cryptomath.cryptlibpyLoaded: other.cipherImplementations = [e for e in \ self.cipherImplementations if e != "cryptlib"] if not cryptomath.m2cryptoLoaded: other.cipherImplementations = [e for e in \ other.cipherImplementations if e != "openssl"] if not cryptomath.pycryptoLoaded: other.cipherImplementations = [e for e in \ other.cipherImplementations if e != "pycrypto"] if len(other.cipherImplementations)==0: raise ValueError("No supported cipher implementations") if other.minKeySize<512: raise ValueError("minKeySize too small") if other.minKeySize>16384: raise ValueError("minKeySize too large") if other.maxKeySize<512: raise ValueError("maxKeySize too small") if other.maxKeySize>16384: raise ValueError("maxKeySize too large") for s in other.cipherNames: if s not in ("aes256", "aes128", "rc4", "3des"): raise ValueError("Unknown cipher name: '%s'" % s) for s in other.cipherImplementations: if s not in ("cryptlib", "openssl", "python", "pycrypto"): raise ValueError("Unknown cipher implementation: '%s'" % s) for s in other.certificateTypes: if s not in ("x509", "cryptoID"): raise ValueError("Unknown certificate type: '%s'" % s) if other.minVersion > other.maxVersion: raise ValueError("Versions set incorrectly") if not other.minVersion in ((3,0), (3,1), (3,2)): raise ValueError("minVersion set incorrectly") if not other.maxVersion in ((3,0), (3,1), (3,2)): raise ValueError("maxVersion set incorrectly") return other def _getCertificateTypes(self): l = [] for ct in self.certificateTypes: if ct == "x509": l.append(CertificateType.x509) elif ct == "cryptoID": l.append(CertificateType.cryptoID) else: raise AssertionError() return l
apache-2.0
toastedcornflakes/scikit-learn
examples/svm/plot_svm_kernels.py
329
1971
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM-Kernels ========================================================= Three different types of SVM-Kernels are displayed below. The polynomial and RBF are especially useful when the data-points are not linearly separable. """ print(__doc__) # Code source: Gaël Varoquaux # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import svm # Our dataset and targets X = np.c_[(.4, -.7), (-1.5, -1), (-1.4, -.9), (-1.3, -1.2), (-1.1, -.2), (-1.2, -.4), (-.5, 1.2), (-1.5, 2.1), (1, 1), # -- (1.3, .8), (1.2, .5), (.2, -2), (.5, -2.4), (.2, -2.3), (0, -2.7), (1.3, 2.1)].T Y = [0] * 8 + [1] * 8 # figure number fignum = 1 # fit the model for kernel in ('linear', 'poly', 'rbf'): clf = svm.SVC(kernel=kernel, gamma=2) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') x_min = -3 x_max = 3 y_min = -3 y_max = 3 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
bsd-3-clause
amandalund/openmc
tests/regression_tests/void/test.py
8
1069
import numpy as np import openmc import pytest from tests.testing_harness import PyAPITestHarness @pytest.fixture def model(): model = openmc.model.Model() zn = openmc.Material() zn.set_density('g/cm3', 7.14) zn.add_nuclide('Zn64', 1.0) model.materials.append(zn) radii = np.linspace(1.0, 100.0) surfs = [openmc.Sphere(r=r) for r in radii] surfs[-1].boundary_type = 'vacuum' cells = [openmc.Cell(fill=(zn if i % 2 == 0 else None), region=region) for i, region in enumerate(openmc.model.subdivide(surfs))] model.geometry = openmc.Geometry(cells) model.settings.run_mode = 'fixed source' model.settings.batches = 3 model.settings.particles = 1000 model.settings.source = openmc.Source(space=openmc.stats.Point()) cell_filter = openmc.CellFilter(cells) tally = openmc.Tally() tally.filters = [cell_filter] tally.scores = ['total'] model.tallies.append(tally) return model def test_void(model): harness = PyAPITestHarness('statepoint.3.h5', model) harness.main()
mit
tsl143/zamboni
mkt/site/tests/test_helpers.py
11
6074
# -*- coding: utf-8 -*- from django.conf import settings import fudge import mock from datetime import datetime, timedelta from jingo import env from nose.tools import eq_ from urlparse import urljoin import mkt.site.tests from mkt.site.helpers import absolutify, css, js, product_as_dict, timesince from mkt.site.fixtures import fixture from mkt.webapps.models import Webapp class TestCSS(mkt.site.tests.TestCase): @mock.patch.object(settings, 'TEMPLATE_DEBUG', True) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_dev_unminified(self, fake_css): request = mock.Mock() request.GET = {} context = {'request': request} # Should be called with `debug=True`. fake_css.expects('css').with_args('mkt/devreg', False, True) css(context, 'mkt/devreg') @mock.patch.object(settings, 'TEMPLATE_DEBUG', False) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_prod_minified(self, fake_css): request = mock.Mock() request.GET = {} context = {'request': request} # Should be called with `debug=False`. fake_css.expects('css').with_args('mkt/devreg', False, False) css(context, 'mkt/devreg') @mock.patch.object(settings, 'TEMPLATE_DEBUG', True) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_dev_unminified_overridden(self, fake_css): request = mock.Mock() request.GET = {'debug': 'true'} context = {'request': request} # Should be called with `debug=True`. fake_css.expects('css').with_args('mkt/devreg', False, True) css(context, 'mkt/devreg') @mock.patch.object(settings, 'TEMPLATE_DEBUG', False) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_prod_unminified_overridden(self, fake_css): request = mock.Mock() request.GET = {'debug': 'true'} context = {'request': request} # Should be called with `debug=True`. fake_css.expects('css').with_args('mkt/devreg', False, True) css(context, 'mkt/devreg') class TestJS(mkt.site.tests.TestCase): @mock.patch.object(settings, 'TEMPLATE_DEBUG', True) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_dev_unminified(self, fake_js): request = mock.Mock() request.GET = {} context = {'request': request} # Should be called with `debug=True`. fake_js.expects('js').with_args('mkt/devreg', True, False, False) js(context, 'mkt/devreg') @mock.patch.object(settings, 'TEMPLATE_DEBUG', False) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_prod_minified(self, fake_js): request = mock.Mock() request.GET = {} context = {'request': request} # Should be called with `debug=False`. fake_js.expects('js').with_args('mkt/devreg', False, False, False) js(context, 'mkt/devreg') @mock.patch.object(settings, 'TEMPLATE_DEBUG', True) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_dev_unminified_overridden(self, fake_js): request = mock.Mock() request.GET = {'debug': 'true'} context = {'request': request} # Should be called with `debug=True`. fake_js.expects('js').with_args('mkt/devreg', True, False, False) js(context, 'mkt/devreg') @mock.patch.object(settings, 'TEMPLATE_DEBUG', False) @fudge.patch('mkt.site.helpers.jingo_minify_helpers') def test_prod_unminified_overridden(self, fake_js): request = mock.Mock() request.GET = {'debug': 'true'} context = {'request': request} # Should be called with `debug=True`. fake_js.expects('js').with_args('mkt/devreg', True, False, False) js(context, 'mkt/devreg') class TestProductAsDict(mkt.site.tests.TestCase): fixtures = fixture('webapp_337141') def test_correct(self): request = mock.Mock(GET={'src': 'poop'}) app = Webapp.objects.get(id=337141) data = product_as_dict(request, app) eq_(data['src'], 'poop') eq_(data['is_packaged'], False) eq_(data['categories'], []) eq_(data['name'], 'Something Something Steamcube!') eq_(data['id'], '337141') eq_(data['manifest_url'], 'http://micropipes.com/temp/steamcube.webapp') tokenUrl = '/reviewers/app/something-something/token' recordUrl = '/app/something-something/purchase/record?src=poop' assert tokenUrl in data['tokenUrl'], ( 'Invalid Token URL. Expected %s; Got %s' % (tokenUrl, data['tokenUrl'])) assert recordUrl in data['recordUrl'], ( 'Invalid Record URL. Expected %s; Got %s' % (recordUrl, data['recordUrl'])) def test_absolutify(): eq_(absolutify('/woo'), urljoin(settings.SITE_URL, '/woo')) eq_(absolutify('https://marketplace.firefox.com'), 'https://marketplace.firefox.com') def test_timesince(): month_ago = datetime.now() - timedelta(days=30) eq_(timesince(month_ago), u'1 month ago') eq_(timesince(None), u'') def render(s, context={}): return env.from_string(s).render(context) @mock.patch('mkt.site.helpers.reverse') def test_url(mock_reverse): render('{{ url("viewname", 1, z=2) }}') mock_reverse.assert_called_with('viewname', args=(1,), kwargs={'z': 2}) render('{{ url("viewname", 1, z=2, host="myhost") }}') mock_reverse.assert_called_with('viewname', args=(1,), kwargs={'z': 2}) def test_url_src(): s = render('{{ url("mkt.developers.apps.edit", "a3615", src="xxx") }}') assert s.endswith('?src=xxx') def test_f(): # This makes sure there's no UnicodeEncodeError when doing the string # interpolation. eq_(render(u'{{ "foo {0}"|f("baré") }}'), u'foo baré') def test_isotime(): time = datetime(2009, 12, 25, 10, 11, 12) s = render('{{ d|isotime }}', {'d': time}) eq_(s, '2009-12-25T18:11:12Z') s = render('{{ d|isotime }}', {'d': None}) eq_(s, '')
bsd-3-clause
lordmos/blink
Tools/Scripts/webkitpy/common/system/workspace_unittest.py
3
3638
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import webkitpy.thirdparty.unittest2 as unittest from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.common.system.workspace import Workspace from webkitpy.common.system.executive_mock import MockExecutive class WorkspaceTest(unittest.TestCase): def test_find_unused_filename(self): filesystem = MockFileSystem({ "dir/foo.jpg": "", "dir/foo-1.jpg": "", "dir/foo-2.jpg": "", }) workspace = Workspace(filesystem, None) self.assertEqual(workspace.find_unused_filename("bar", "bar", "bar"), "bar/bar.bar") self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=1), None) self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=2), None) self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg"), "dir/foo-3.jpg") def test_create_zip(self): workspace = Workspace(None, MockExecutive(should_log=True)) expected_logs = "MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path\n" class MockZipFile(object): def __init__(self, path): self.filename = path archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs) self.assertEqual(archive.filename, "/zip/path") def test_create_zip_exception(self): workspace = Workspace(None, MockExecutive(should_log=True, should_throw=True)) expected_logs = """MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path Workspace.create_zip failed in /source/path: MOCK ScriptError output: MOCK output of child process """ class MockZipFile(object): def __init__(self, path): self.filename = path archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs) self.assertIsNone(archive)
mit
j0gurt/ggrc-core
test/unit/ggrc/models/test_mixins_assignable.py
6
1270
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Tests for assignable mixin.""" import unittest from ggrc.models.mixins.assignable import Assignable class DummyAssignable(Assignable): assignees = None class TestAssignableMixin(unittest.TestCase): """ Tests inclusion of correct mixins and their attributes """ def test_get_assignees(self): """Test get_assignable function.""" dummy = DummyAssignable() dummy.assignees = [ ("Person 1", ("type1", "type2")), ("Person 2", ("type3",)), ("Person 3", ("type1",)), ("Person 4", ("type4",)), ("Person 5", ("type1", "type3", "type2")), ] no_filter = set(dict(dummy.get_assignees()).keys()) assignees = set(dict(dummy.assignees).keys()) self.assertEqual(no_filter, assignees) type1 = set(dict(dummy.get_assignees("type1")).keys()) self.assertEqual(set(["Person 1", "Person 3", "Person 5"]), type1) type3 = set(dict(dummy.get_assignees("type3")).keys()) self.assertEqual(set(["Person 2", "Person 5"]), type3) filter_list = set(dict(dummy.get_assignees(["type4", "type3"])).keys()) self.assertEqual(set(["Person 2", "Person 4", "Person 5"]), filter_list)
apache-2.0
Lujeni/ansible
lib/ansible/plugins/become/dzdo.py
25
3205
# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ become: dzdo short_description: Centrify's Direct Authorize description: - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. author: ansible (@core) version_added: "2.8" options: become_user: description: User you 'become' to execute the task ini: - section: privilege_escalation key: become_user - section: dzdo_become_plugin key: user vars: - name: ansible_become_user - name: ansible_dzdo_user env: - name: ANSIBLE_BECOME_USER - name: ANSIBLE_DZDO_USER become_exe: description: Dzdo executable default: dzdo ini: - section: privilege_escalation key: become_exe - section: dzdo_become_plugin key: executable vars: - name: ansible_become_exe - name: ansible_dzdo_exe env: - name: ANSIBLE_BECOME_EXE - name: ANSIBLE_DZDO_EXE become_flags: description: Options to pass to dzdo default: -H -S -n ini: - section: privilege_escalation key: become_flags - section: dzdo_become_plugin key: flags vars: - name: ansible_become_flags - name: ansible_dzdo_flags env: - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_DZDO_FLAGS become_pass: description: Options to pass to dzdo required: False vars: - name: ansible_become_password - name: ansible_become_pass - name: ansible_dzdo_pass env: - name: ANSIBLE_BECOME_PASS - name: ANSIBLE_DZDO_PASS ini: - section: dzdo_become_plugin key: password """ from ansible.plugins.become import BecomeBase class BecomeModule(BecomeBase): name = 'dzdo' # messages for detecting prompted password issues fail = ('Sorry, try again.',) def build_become_command(self, cmd, shell): super(BecomeModule, self).build_become_command(cmd, shell) if not cmd: return cmd becomecmd = self.get_option('become_exe') or self.name flags = self.get_option('become_flags') or '' if self.get_option('become_pass'): self.prompt = '[dzdo via ansible, key=%s] password:' % self._id flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt) user = self.get_option('become_user') or '' if user: user = '-u %s' % (user) return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)])
gpl-3.0
ktan2020/legacy-automation
win/Lib/site-packages/Crypto/Cipher/DES.py
117
4403
# -*- coding: utf-8 -*- # # Cipher/DES.py : DES # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """DES symmetric cipher DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized by NIST_ . It has a fixed data block size of 8 bytes. Its keys are 64 bits long, even though 8 bits were used for integrity (now they are ignored) and do not contribute to securty. DES is cryptographically secure, but its key length is too short by nowadays standards and it could be brute forced with some effort. DES should not be used for new designs. Use `AES`. As an example, encryption can be done as follows: >>> from Crypto.Cipher import DES3 >>> from Crypto import Random >>> >>> key = b'Sixteen byte key' >>> iv = Random.new().read(DES3.block_size) >>> cipher = DES3.new(key, DES3.MODE_OFB, iv) >>> plaintext = b'sona si latine loqueris ' >>> msg = iv + cipher.encrypt(plaintext) .. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard .. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf :undocumented: __revision__, __package__ """ __revision__ = "$Id$" from Crypto.Cipher import blockalgo from Crypto.Cipher import _DES class DESCipher(blockalgo.BlockAlgo): """DES cipher object""" def __init__(self, key, *args, **kwargs): """Initialize a DES cipher object See also `new()` at the module level.""" blockalgo.BlockAlgo.__init__(self, _DES, key, *args, **kwargs) def new(key, *args, **kwargs): """Create a new DES cipher :Parameters: key : byte string The secret key to use in the symmetric cipher. It must be 8 byte long. The parity bits will be ignored. :Keywords: mode : a *MODE_** constant The chaining mode to use for encryption or decryption. Default is `MODE_ECB`. IV : byte string The initialization vector to use for encryption or decryption. It is ignored for `MODE_ECB` and `MODE_CTR`. For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption and `block_size` +2 bytes for decryption (in the latter case, it is actually the *encrypted* IV which was prefixed to the ciphertext). It is mandatory. For all other modes, it must be `block_size` bytes longs. It is optional and when not present it will be given a default value of all zeroes. counter : callable (*Only* `MODE_CTR`). A stateful function that returns the next *counter block*, which is a byte string of `block_size` bytes. For better performance, use `Crypto.Util.Counter`. segment_size : integer (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext are segmented in. It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8. :Return: an `DESCipher` object """ return DESCipher(key, *args, **kwargs) #: Electronic Code Book (ECB). See `blockalgo.MODE_ECB`. MODE_ECB = 1 #: Cipher-Block Chaining (CBC). See `blockalgo.MODE_CBC`. MODE_CBC = 2 #: Cipher FeedBack (CFB). See `blockalgo.MODE_CFB`. MODE_CFB = 3 #: This mode should not be used. MODE_PGP = 4 #: Output FeedBack (OFB). See `blockalgo.MODE_OFB`. MODE_OFB = 5 #: CounTer Mode (CTR). See `blockalgo.MODE_CTR`. MODE_CTR = 6 #: OpenPGP Mode. See `blockalgo.MODE_OPENPGP`. MODE_OPENPGP = 7 #: Size of a data block (in bytes) block_size = 8 #: Size of a key (in bytes) key_size = 8
mit
jimgong92/allezViens
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py
2375
11628
"""Utilities for writing code that runs on Python 2 and 3""" #Copyright (c) 2010-2011 Benjamin Peterson #Permission is hereby granted, free of charge, to any person obtaining a copy of #this software and associated documentation files (the "Software"), to deal in #the Software without restriction, including without limitation the rights to #use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of #the Software, and to permit persons to whom the Software is furnished to do so, #subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS #FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR #COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER #IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN #CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.2.0" # Revision 41c74fef2ded # True if we are running on Python 3. PY3 = sys.version_info[0] == 3 if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # This is a bit ugly, but it avoids running this again. delattr(tp, self.name) return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _MovedItems(types.ModuleType): """Lazy loading of moved objects""" _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) del attr moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_code = "__code__" _func_defaults = "__defaults__" _iterkeys = "keys" _itervalues = "values" _iteritems = "items" else: _meth_func = "im_func" _meth_self = "im_self" _func_code = "func_code" _func_defaults = "func_defaults" _iterkeys = "iterkeys" _itervalues = "itervalues" _iteritems = "iteritems" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator if PY3: def get_unbound_function(unbound): return unbound Iterator = object def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) else: def get_unbound_function(unbound): return unbound.im_func class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) def iterkeys(d): """Return an iterator over the keys of a dictionary.""" return iter(getattr(d, _iterkeys)()) def itervalues(d): """Return an iterator over the values of a dictionary.""" return iter(getattr(d, _itervalues)()) def iteritems(d): """Return an iterator over the (key, value) pairs of a dictionary.""" return iter(getattr(d, _iteritems)()) if PY3: def b(s): return s.encode("latin-1") def u(s): return s if sys.version_info[1] <= 1: def int2byte(i): return bytes((i,)) else: # This is about 2x faster than the implementation above on 3.2+ int2byte = operator.methodcaller("to_bytes", 1, "big") import io StringIO = io.StringIO BytesIO = io.BytesIO else: def b(s): return s def u(s): return unicode(s, "unicode_escape") int2byte = chr import StringIO StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") if PY3: import builtins exec_ = getattr(builtins, "exec") def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value print_ = getattr(builtins, "print") del builtins else: def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) def print_(*args, **kwargs): """The new-style print function.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) _add_doc(reraise, """Reraise an exception.""") def with_metaclass(meta, base=object): """Create a base class with a metaclass.""" return meta("NewBase", (base,), {})
mit
google-code/android-scripting
python/src/Lib/multiprocessing/__init__.py
52
7627
# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Try calling `multiprocessing.doc.main()` to read the html # documentation in in a webbrowser. # # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # __version__ = '0.70a1' __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger', 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', ] __author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)' # # Imports # import os import sys from multiprocessing.process import Process, current_process, active_children from multiprocessing.util import SUBDEBUG, SUBWARNING # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass # This is down here because _multiprocessing uses BufferTooShort import _multiprocessing # # Definitions not depending on native semaphores # def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m def Pipe(duplex=True): ''' Returns two connection object connected by a pipe ''' from multiprocessing.connection import Pipe return Pipe(duplex) def cpu_count(): ''' Returns the number of CPUs in the system ''' if sys.platform == 'win32': try: num = int(os.environ['NUMBER_OF_PROCESSORS']) except (ValueError, KeyError): num = 0 elif 'bsd' in sys.platform or sys.platform == 'darwin': try: num = int(os.popen('sysctl -n hw.ncpu').read()) except ValueError: num = 0 else: try: num = os.sysconf('SC_NPROCESSORS_ONLN') except (ValueError, OSError, AttributeError): num = 0 if num >= 1: return num else: raise NotImplementedError('cannot determine number of cpus') def freeze_support(): ''' Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from multiprocessing.forking import freeze_support freeze_support() def get_logger(): ''' Return package logger -- if it does not already exist then it is created ''' from multiprocessing.util import get_logger return get_logger() def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' from multiprocessing.util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(): ''' Install support for sending connections and sockets between processes ''' from multiprocessing import reduction # # Definitions depending on native semaphores # def Lock(): ''' Returns a non-recursive lock object ''' from multiprocessing.synchronize import Lock return Lock() def RLock(): ''' Returns a recursive lock object ''' from multiprocessing.synchronize import RLock return RLock() def Condition(lock=None): ''' Returns a condition object ''' from multiprocessing.synchronize import Condition return Condition(lock) def Semaphore(value=1): ''' Returns a semaphore object ''' from multiprocessing.synchronize import Semaphore return Semaphore(value) def BoundedSemaphore(value=1): ''' Returns a bounded semaphore object ''' from multiprocessing.synchronize import BoundedSemaphore return BoundedSemaphore(value) def Event(): ''' Returns an event object ''' from multiprocessing.synchronize import Event return Event() def Queue(maxsize=0): ''' Returns a queue object ''' from multiprocessing.queues import Queue return Queue(maxsize) def JoinableQueue(maxsize=0): ''' Returns a queue object ''' from multiprocessing.queues import JoinableQueue return JoinableQueue(maxsize) def Pool(processes=None, initializer=None, initargs=()): ''' Returns a process pool object ''' from multiprocessing.pool import Pool return Pool(processes, initializer, initargs) def RawValue(typecode_or_type, *args): ''' Returns a shared object ''' from multiprocessing.sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(typecode_or_type, *args, **kwds): ''' Returns a synchronized shared object ''' from multiprocessing.sharedctypes import Value return Value(typecode_or_type, *args, **kwds) def Array(typecode_or_type, size_or_initializer, **kwds): ''' Returns a synchronized shared array ''' from multiprocessing.sharedctypes import Array return Array(typecode_or_type, size_or_initializer, **kwds) # # # if sys.platform == 'win32': def set_executable(executable): ''' Sets the path to a python.exe or pythonw.exe binary used to run child processes on Windows instead of sys.executable. Useful for people embedding Python. ''' from multiprocessing.forking import set_executable set_executable(executable) __all__ += ['set_executable']
apache-2.0
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
2360
3778
"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" # Note: This file is under the PSF license as the code comes from the python # stdlib. http://docs.python.org/3/license.html import re __version__ = '3.4.0.2' class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
mit
BubuLK/sfepy
sfepy/discrete/fem/fields_base.py
1
50180
""" Notes ----- Important attributes of continuous (order > 0) :class:`Field` and :class:`SurfaceField` instances: - `vertex_remap` : `econn[:, :n_vertex] = vertex_remap[conn]` - `vertex_remap_i` : `conn = vertex_remap_i[econn[:, :n_vertex]]` where `conn` is the mesh vertex connectivity, `econn` is the region-local field connectivity. """ from __future__ import absolute_import import numpy as nm from sfepy.base.base import output, get_default, assert_ from sfepy.base.base import Struct from sfepy.base.timing import Timer from sfepy.discrete.common.fields import parse_shape, Field from sfepy.discrete import PolySpace from sfepy.discrete.fem.mesh import Mesh from sfepy.discrete.fem.meshio import convert_complex_output from sfepy.discrete.fem.utils import (extend_cell_data, prepare_remap, invert_remap, get_min_value) from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping from sfepy.discrete.fem.fe_surface import FESurface from sfepy.discrete.integrals import Integral from sfepy.discrete.fem.linearizer import (get_eval_dofs, get_eval_coors, create_output) import six def set_mesh_coors(domain, fields, coors, update_fields=False, actual=False, clear_all=True, extra_dofs=False): if actual: if not hasattr(domain.mesh, 'coors_act'): domain.mesh.coors_act = nm.zeros_like(domain.mesh.coors) domain.mesh.coors_act[:] = coors[:domain.mesh.n_nod] else: domain.cmesh.coors[:] = coors[:domain.mesh.n_nod] if update_fields: for field in six.itervalues(fields): field.set_coors(coors, extra_dofs=extra_dofs) field.clear_mappings(clear_all=clear_all) def eval_nodal_coors(coors, mesh_coors, region, poly_space, geom_poly_space, econn, only_extra=True): """ Compute coordinates of nodes corresponding to `poly_space`, given mesh coordinates and `geom_poly_space`. """ if only_extra: iex = (poly_space.nts[:,0] > 0).nonzero()[0] if iex.shape[0] == 0: return qp_coors = poly_space.node_coors[iex, :] econn = econn[:, iex].copy() else: qp_coors = poly_space.node_coors ## # Evaluate geometry interpolation base functions in (extra) nodes. bf = geom_poly_space.eval_base(qp_coors) bf = bf[:,0,:].copy() ## # Evaluate extra coordinates with 'bf'. cmesh = region.domain.cmesh conn = cmesh.get_incident(0, region.cells, region.tdim) conn.shape = (econn.shape[0], -1) ecoors = nm.dot(bf, mesh_coors[conn]) coors[econn] = nm.swapaxes(ecoors, 0, 1) def _interp_to_faces(vertex_vals, bfs, faces): dim = vertex_vals.shape[1] n_face = faces.shape[0] n_qp = bfs.shape[0] faces_vals = nm.zeros((n_face, n_qp, dim), nm.float64) for ii, face in enumerate(faces): vals = vertex_vals[face,:dim] faces_vals[ii,:,:] = nm.dot(bfs[:,0,:], vals) return(faces_vals) def get_eval_expression(expression, fields, materials, variables, functions=None, mode='eval', term_mode=None, extra_args=None, verbose=True, kwargs=None): """ Get the function for evaluating an expression given a list of elements, and reference element coordinates. """ from sfepy.discrete.evaluate import eval_in_els_and_qp def _eval(iels, coors): val = eval_in_els_and_qp(expression, iels, coors, fields, materials, variables, functions=functions, mode=mode, term_mode=term_mode, extra_args=extra_args, verbose=verbose, kwargs=kwargs) return val[..., 0] return _eval def create_expression_output(expression, name, primary_field_name, fields, materials, variables, functions=None, mode='eval', term_mode=None, extra_args=None, verbose=True, kwargs=None, min_level=0, max_level=1, eps=1e-4): """ Create output mesh and data for the expression using the adaptive linearizer. Parameters ---------- expression : str The expression to evaluate. name : str The name of the data. primary_field_name : str The name of field that defines the element groups and polynomial spaces. fields : dict The dictionary of fields used in `variables`. materials : Materials instance The materials used in the expression. variables : Variables instance The variables used in the expression. functions : Functions instance, optional The user functions for materials etc. mode : one of 'eval', 'el_avg', 'qp' The evaluation mode - 'qp' requests the values in quadrature points, 'el_avg' element averages and 'eval' means integration over each term region. term_mode : str The term call mode - some terms support different call modes and depending on the call mode different values are returned. extra_args : dict, optional Extra arguments to be passed to terms in the expression. verbose : bool If False, reduce verbosity. kwargs : dict, optional The variables (dictionary of (variable name) : (Variable instance)) to be used in the expression. min_level : int The minimum required level of mesh refinement. max_level : int The maximum level of mesh refinement. eps : float The relative tolerance parameter of mesh adaptivity. Returns ------- out : dict The output dictionary. """ field = fields[primary_field_name] vertex_coors = field.coors[:field.n_vertex_dof, :] ps = field.poly_space gps = field.gel.poly_space vertex_conn = field.econn[:, :field.gel.n_vertex] eval_dofs = get_eval_expression(expression, fields, materials, variables, functions=functions, mode=mode, extra_args=extra_args, verbose=verbose, kwargs=kwargs) eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps) (level, coors, conn, vdofs, mat_ids) = create_output(eval_dofs, eval_coors, vertex_conn.shape[0], ps, min_level=min_level, max_level=max_level, eps=eps) mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids], field.domain.mesh.descs) out = {} out[name] = Struct(name='output_data', mode='vertex', data=vdofs, var_name=name, dofs=None, mesh=mesh, level=level) out = convert_complex_output(out) return out class FEField(Field): """ Base class for finite element fields. Notes ----- - interps and hence node_descs are per region (must have single geometry!) Field shape information: - ``shape`` - the shape of the base functions in a point - ``n_components`` - the number of DOFs per FE node - ``val_shape`` - the shape of field value (the product of DOFs and base functions) in a point """ def __init__(self, name, dtype, shape, region, approx_order=1): """ Create a finite element field. Parameters ---------- name : str The field name. dtype : numpy.dtype The field data type: float64 or complex128. shape : int/tuple/str The field shape: 1 or (1,) or 'scalar', space dimension (2, or (2,) or 3 or (3,)) or 'vector', or a tuple. The field shape determines the shape of the FE base functions and is related to the number of components of variables and to the DOF per node count, depending on the field kind. region : Region The region where the field is defined. approx_order : int or tuple The FE approximation order. The tuple form is (order, has_bubble), e.g. (1, True) means order 1 with a bubble function. Notes ----- Assumes one cell type for the whole region! """ shape = parse_shape(shape, region.domain.shape.dim) if not self._check_region(region): raise ValueError('unsuitable region for field %s! (%s)' % (name, region.name)) Struct.__init__(self, name=name, dtype=dtype, shape=shape, region=region) self.domain = self.region.domain self._set_approx_order(approx_order) self._setup_geometry() self._setup_kind() self._setup_shape() self.surface_data = {} self.point_data = {} self.ori = None self._create_interpolant() self._setup_global_base() self.setup_coors() self.clear_mappings(clear_all=True) self.clear_qp_base() self.basis_transform = None self.econn0 = None self.unused_dofs = None self.stored_subs = None def _set_approx_order(self, approx_order): """ Set a uniform approximation order. """ if isinstance(approx_order, tuple): self.approx_order = approx_order[0] self.force_bubble = approx_order[1] else: self.approx_order = approx_order self.force_bubble = False def get_true_order(self): """ Get the true approximation order depending on the reference element geometry. For example, for P1 (linear) approximation the true order is 1, while for Q1 (bilinear) approximation in 2D the true order is 2. """ gel = self.gel if (gel.dim + 1) == gel.n_vertex: order = self.approx_order else: order = gel.dim * self.approx_order if self.force_bubble: bubble_order = gel.dim + 1 order = max(order, bubble_order) return order def is_higher_order(self): """ Return True, if the field's approximation order is greater than one. """ return self.force_bubble or (self.approx_order > 1) def _setup_global_base(self): """ Setup global DOF/base functions, their indices and connectivity of the field. Called methods implemented in subclasses. """ self._setup_facet_orientations() self._init_econn() self.n_vertex_dof, self.vertex_remap = self._setup_vertex_dofs() self.vertex_remap_i = invert_remap(self.vertex_remap) aux = self._setup_edge_dofs() self.n_edge_dof, self.edge_dofs, self.edge_remap = aux aux = self._setup_face_dofs() self.n_face_dof, self.face_dofs, self.face_remap = aux aux = self._setup_bubble_dofs() self.n_bubble_dof, self.bubble_dofs, self.bubble_remap = aux self.n_nod = self.n_vertex_dof + self.n_edge_dof \ + self.n_face_dof + self.n_bubble_dof self._setup_esurface() def _setup_esurface(self): """ Setup extended surface entities (edges in 2D, faces in 3D), i.e. indices of surface entities into the extended connectivity. """ node_desc = self.node_desc gel = self.gel self.efaces = gel.get_surface_entities().copy() nd = node_desc.edge if nd is not None: efs = [] for eof in gel.get_edges_per_face(): efs.append(nm.concatenate([nd[ie] for ie in eof])) efs = nm.array(efs).squeeze() if efs.ndim < 2: efs = efs[:,nm.newaxis] self.efaces = nm.hstack((self.efaces, efs)) efs = node_desc.face if efs is not None: efs = nm.array(efs).squeeze() if efs.ndim < 2: efs = efs[:,nm.newaxis] self.efaces = nm.hstack((self.efaces, efs)) if gel.dim == 3: self.eedges = gel.edges.copy() efs = node_desc.edge if efs is not None: efs = nm.array(efs).squeeze() if efs.ndim < 2: efs = efs[:,nm.newaxis] self.eedges = nm.hstack((self.eedges, efs)) def set_coors(self, coors, extra_dofs=False): """ Set coordinates of field nodes. """ # Mesh vertex nodes. if self.n_vertex_dof: indx = self.vertex_remap_i self.coors[:self.n_vertex_dof] = nm.take(coors, indx.astype(nm.int32), axis=0) n_ex_dof = self.n_bubble_dof + self.n_edge_dof + self.n_face_dof # extra nodes if n_ex_dof: if extra_dofs: if self.n_nod != coors.shape[0]: raise NotImplementedError self.coors[:] = coors else: gps = self.gel.poly_space ps = self.poly_space eval_nodal_coors(self.coors, coors, self.region, ps, gps, self.econn) def setup_coors(self): """ Setup coordinates of field nodes. """ mesh = self.domain.mesh self.coors = nm.empty((self.n_nod, mesh.dim), nm.float64) self.set_coors(mesh.coors) def get_vertices(self): """ Return indices of vertices belonging to the field region. """ return self.vertex_remap_i def _get_facet_dofs(self, rfacets, remap, dofs): facets = remap[rfacets] return dofs[facets[facets >= 0]].ravel() def get_data_shape(self, integral, integration='volume', region_name=None): """ Get element data dimensions. Parameters ---------- integral : Integral instance The integral describing used numerical quadrature. integration : 'volume', 'surface', 'surface_extra', 'point' or 'custom' The term integration type. region_name : str The name of the region of the integral. Returns ------- data_shape : 4 ints The `(n_el, n_qp, dim, n_en)` for volume shape kind, `(n_fa, n_qp, dim, n_fn)` for surface shape kind and `(n_nod, 0, 0, 1)` for point shape kind. Notes ----- - `n_el`, `n_fa` = number of elements/facets - `n_qp` = number of quadrature points per element/facet - `dim` = spatial dimension - `n_en`, `n_fn` = number of element/facet nodes - `n_nod` = number of element nodes """ region = self.domain.regions[region_name] shape = region.shape dim = region.dim if integration in ('surface', 'surface_extra'): sd = self.surface_data[region_name] # This works also for surface fields. key = sd.face_type weights = self.get_qp(key, integral).weights n_qp = weights.shape[0] if integration == 'surface': data_shape = (sd.n_fa, n_qp, dim, sd.n_fp) else: data_shape = (sd.n_fa, n_qp, dim, self.econn.shape[1]) elif integration in ('volume', 'custom'): _, weights = integral.get_qp(self.gel.name) n_qp = weights.shape[0] data_shape = (shape.n_cell, n_qp, dim, self.econn.shape[1]) elif integration == 'point': dofs = self.get_dofs_in_region(region, merge=True) data_shape = (dofs.shape[0], 0, 0, 1) else: raise NotImplementedError('unsupported integration! (%s)' % integration) return data_shape def get_dofs_in_region(self, region, merge=True): """ Return indices of DOFs that belong to the given region and group. """ node_desc = self.node_desc dofs = [] vdofs = nm.empty((0,), dtype=nm.int32) if node_desc.vertex is not None: vdofs = self.vertex_remap[region.vertices] vdofs = vdofs[vdofs >= 0] dofs.append(vdofs) edofs = nm.empty((0,), dtype=nm.int32) if node_desc.edge is not None: edofs = self._get_facet_dofs(region.edges, self.edge_remap, self.edge_dofs) dofs.append(edofs) fdofs = nm.empty((0,), dtype=nm.int32) if node_desc.face is not None: fdofs = self._get_facet_dofs(region.faces, self.face_remap, self.face_dofs) dofs.append(fdofs) bdofs = nm.empty((0,), dtype=nm.int32) if (node_desc.bubble is not None) and region.has_cells(): els = self.bubble_remap[region.cells] bdofs = self.bubble_dofs[els[els >= 0]].ravel() dofs.append(bdofs) if merge: dofs = nm.concatenate(dofs) return dofs def clear_qp_base(self): """ Remove cached quadrature points and base functions. """ self.qp_coors = {} self.bf = {} def get_qp(self, key, integral): """ Get quadrature points and weights corresponding to the given key and integral. The key is 'v' or 's#', where # is the number of face vertices. """ qpkey = (integral.order, key) if qpkey not in self.qp_coors: if (key[0] == 's') and not self.is_surface: dim = self.gel.dim - 1 n_fp = self.gel.surface_facet.n_vertex geometry = '%d_%d' % (dim, n_fp) else: geometry = self.gel.name vals, weights = integral.get_qp(geometry) self.qp_coors[qpkey] = Struct(vals=vals, weights=weights) return self.qp_coors[qpkey] def substitute_dofs(self, subs, restore=False): """ Perform facet DOF substitutions according to `subs`. Modifies `self.econn` in-place and sets `self.econn0`, `self.unused_dofs` and `self.basis_transform`. """ if restore and (self.stored_subs is not None): self.econn0 = self.econn self.econn, self.unused_dofs, basis_transform = self.stored_subs else: if subs is None: self.econn0 = self.econn return else: self.econn0 = self.econn.copy() self._substitute_dofs(subs) self.unused_dofs = nm.setdiff1d(self.econn0, self.econn) basis_transform = self._eval_basis_transform(subs) self.set_basis_transform(basis_transform) def restore_dofs(self, store=False): """ Undoes the effect of :func:`FEField.substitute_dofs()`. """ if self.econn0 is None: raise ValueError('no original DOFs to restore!') if store: self.stored_subs = (self.econn, self.unused_dofs, self.basis_transform) else: self.stored_subs = None self.econn = self.econn0 self.econn0 = None self.unused_dofs = None self.basis_transform = None def set_basis_transform(self, transform): """ Set local element basis transformation. The basis transformation is applied in :func:`FEField.get_base()` and :func:`FEField.create_mapping()`. Parameters ---------- transform : array, shape `(n_cell, n_ep, n_ep)` The array with `(n_ep, n_ep)` transformation matrices for each cell in the field's region, where `n_ep` is the number of element DOFs. """ self.basis_transform = transform def restore_substituted(self, vec): """ Restore values of the unused DOFs using the transpose of the applied basis transformation. """ if (self.econn0 is None) or (self.basis_transform is None): raise ValueError('no original DOF values to restore!!') vec = vec.reshape((self.n_nod, self.n_components)).copy() evec = vec[self.econn] vec[self.econn0] = nm.einsum('cji,cjk->cik', self.basis_transform, evec) return vec.ravel() def get_base(self, key, derivative, integral, iels=None, from_geometry=False, base_only=True): qp = self.get_qp(key, integral) if from_geometry: ps = self.gel.poly_space else: ps = self.poly_space _key = key if not from_geometry else 'g' + key bf_key = (integral.order, _key, derivative) if bf_key not in self.bf: ori = self.ori self.bf[bf_key] = ps.eval_base(qp.vals, diff=derivative, ori=ori, transform=self.basis_transform) bf = self.bf[bf_key] if iels is not None and bf.ndim == 4: bf = bf[iels] if base_only: return bf else: return bf, qp.weights def create_bqp(self, region_name, integral): gel = self.gel sd = self.surface_data[region_name] bqpkey = (integral.order, sd.bkey) if not bqpkey in self.qp_coors: qp = self.get_qp(sd.face_type, integral) ps_s = self.gel.surface_facet.poly_space bf_s = ps_s.eval_base(qp.vals) coors, faces = gel.coors, gel.get_surface_entities() vals = _interp_to_faces(coors, bf_s, faces) self.qp_coors[bqpkey] = Struct(name='BQP_%s' % sd.bkey, vals=vals, weights=qp.weights) def extend_dofs(self, dofs, fill_value=None): """ Extend DOFs to the whole domain using the `fill_value`, or the smallest value in `dofs` if `fill_value` is None. """ if fill_value is None: if nm.isrealobj(dofs): fill_value = get_min_value(dofs) else: # Complex values - treat real and imaginary parts separately. fill_value = get_min_value(dofs.real) fill_value += 1j * get_min_value(dofs.imag) if self.approx_order != 0: indx = self.get_vertices() n_nod = self.domain.shape.n_nod new_dofs = nm.empty((n_nod, dofs.shape[1]), dtype=self.dtype) new_dofs.fill(fill_value) new_dofs[indx] = dofs[:indx.size] else: new_dofs = extend_cell_data(dofs, self.domain, self.region, val=fill_value) return new_dofs def remove_extra_dofs(self, dofs): """ Remove DOFs defined in higher order nodes (order > 1). """ if self.approx_order != 0: new_dofs = dofs[:self.n_vertex_dof] else: new_dofs = dofs return new_dofs def linearize(self, dofs, min_level=0, max_level=1, eps=1e-4): """ Linearize the solution for post-processing. Parameters ---------- dofs : array, shape (n_nod, n_component) The array of DOFs reshaped so that each column corresponds to one component. min_level : int The minimum required level of mesh refinement. max_level : int The maximum level of mesh refinement. eps : float The relative tolerance parameter of mesh adaptivity. Returns ------- mesh : Mesh instance The adapted, nonconforming, mesh. vdofs : array The DOFs defined in vertices of `mesh`. levels : array of ints The refinement level used for each element group. """ assert_(dofs.ndim == 2) n_nod, dpn = dofs.shape assert_(n_nod == self.n_nod) assert_(dpn == self.shape[0]) vertex_coors = self.coors[:self.n_vertex_dof, :] ps = self.poly_space gps = self.gel.poly_space vertex_conn = self.econn[:, :self.gel.n_vertex] eval_dofs = get_eval_dofs(dofs, self.econn, ps, ori=self.ori) eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps) (level, coors, conn, vdofs, mat_ids) = create_output(eval_dofs, eval_coors, vertex_conn.shape[0], ps, min_level=min_level, max_level=max_level, eps=eps) mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids], self.domain.mesh.descs) return mesh, vdofs, level def get_output_approx_order(self): """ Get the approximation order used in the output file. """ return min(self.approx_order, 1) def create_output(self, dofs, var_name, dof_names=None, key=None, extend=True, fill_value=None, linearization=None): """ Convert the DOFs corresponding to the field to a dictionary of output data usable by Mesh.write(). Parameters ---------- dofs : array, shape (n_nod, n_component) The array of DOFs reshaped so that each column corresponds to one component. var_name : str The variable name corresponding to `dofs`. dof_names : tuple of str The names of DOF components. key : str, optional The key to be used in the output dictionary instead of the variable name. extend : bool Extend the DOF values to cover the whole domain. fill_value : float or complex The value used to fill the missing DOF values if `extend` is True. linearization : Struct or None The linearization configuration for higher order approximations. Returns ------- out : dict The output dictionary. """ linearization = get_default(linearization, Struct(kind='strip')) out = {} if linearization.kind is None: out[key] = Struct(name='output_data', mode='full', data=dofs, var_name=var_name, dofs=dof_names, field_name=self.name) elif linearization.kind == 'strip': if extend: ext = self.extend_dofs(dofs, fill_value) else: ext = self.remove_extra_dofs(dofs) if ext is not None: approx_order = self.get_output_approx_order() if approx_order != 0: # Has vertex data. out[key] = Struct(name='output_data', mode='vertex', data=ext, var_name=var_name, dofs=dof_names) else: ext.shape = (ext.shape[0], 1, ext.shape[1], 1) out[key] = Struct(name='output_data', mode='cell', data=ext, var_name=var_name, dofs=dof_names) else: mesh, vdofs, levels = self.linearize(dofs, linearization.min_level, linearization.max_level, linearization.eps) out[key] = Struct(name='output_data', mode='vertex', data=vdofs, var_name=var_name, dofs=dof_names, mesh=mesh, levels=levels) out = convert_complex_output(out) return out def create_mesh(self, extra_nodes=True): """ Create a mesh from the field region, optionally including the field extra nodes. """ mesh = self.domain.mesh if self.approx_order != 0: if extra_nodes: conn = self.econn else: conn = self.econn[:, :self.gel.n_vertex] conns = [conn] mat_ids = [mesh.cmesh.cell_groups] descs = mesh.descs[:1] if extra_nodes: coors = self.coors else: coors = self.coors[:self.n_vertex_dof] mesh = Mesh.from_data(self.name, coors, None, conns, mat_ids, descs) return mesh def get_evaluate_cache(self, cache=None, share_geometry=False, verbose=False): """ Get the evaluate cache for :func:`Variable.evaluate_at() <sfepy.discrete.variables.Variable.evaluate_at()>`. Parameters ---------- cache : Struct instance, optional Optionally, use the provided instance to store the cache data. share_geometry : bool Set to True to indicate that all the evaluations will work on the same region. Certain data are then computed only for the first probe and cached. verbose : bool If False, reduce verbosity. Returns ------- cache : Struct instance The evaluate cache. """ try: from scipy.spatial import cKDTree as KDTree except ImportError: from scipy.spatial import KDTree from sfepy.discrete.fem.geometry_element import create_geometry_elements if cache is None: cache = Struct(name='evaluate_cache') timer = Timer(start=True) if (cache.get('cmesh', None) is None) or not share_geometry: mesh = self.create_mesh(extra_nodes=False) cache.cmesh = cmesh = mesh.cmesh gels = create_geometry_elements() cmesh.set_local_entities(gels) cmesh.setup_entities() cache.centroids = cmesh.get_centroids(cmesh.tdim) if self.gel.name != '3_8': cache.normals0 = cmesh.get_facet_normals() cache.normals1 = None else: cache.normals0 = cmesh.get_facet_normals(0) cache.normals1 = cmesh.get_facet_normals(1) output('cmesh setup: %f s' % timer.stop(), verbose=verbose) timer.start() if (cache.get('kdtree', None) is None) or not share_geometry: cache.kdtree = KDTree(cmesh.coors) output('kdtree: %f s' % timer.stop(), verbose=verbose) return cache def interp_to_qp(self, dofs): """ Interpolate DOFs into quadrature points. The quadrature order is given by the field approximation order. Parameters ---------- dofs : array The array of DOF values of shape `(n_nod, n_component)`. Returns ------- data_qp : array The values interpolated into the quadrature points. integral : Integral The corresponding integral defining the quadrature points. """ integral = Integral('i', order=self.approx_order) bf = self.get_base('v', False, integral) bf = bf[:,0,:].copy() data_qp = nm.dot(bf, dofs[self.econn]) data_qp = nm.swapaxes(data_qp, 0, 1) data_qp.shape = data_qp.shape + (1,) return data_qp, integral def get_coor(self, nods=None): """ Get coordinates of the field nodes. Parameters ---------- nods : array, optional The indices of the required nodes. If not given, the coordinates of all the nodes are returned. """ if nods is None: return self.coors else: return self.coors[nods] def get_connectivity(self, region, integration, is_trace=False): """ Convenience alias to `Field.get_econn()`, that is used in some terms. """ return self.get_econn(integration, region, is_trace=is_trace) def create_mapping(self, region, integral, integration, return_mapping=True): """ Create a new reference mapping. Compute jacobians, element volumes and base function derivatives for Volume-type geometries (volume mappings), and jacobians, normals and base function derivatives for Surface-type geometries (surface mappings). Notes ----- - surface mappings are defined on the surface region - surface mappings require field order to be > 0 """ domain = self.domain coors = domain.get_mesh_coors(actual=True) dconn = domain.get_conn() tco = integration in ('volume', 'custom') iels = region.get_cells(true_cells_only=tco) transform = (self.basis_transform[iels] if self.basis_transform is not None else None) if integration == 'volume': qp = self.get_qp('v', integral) geo_ps = self.gel.poly_space ps = self.poly_space bf = self.get_base('v', 0, integral, iels=iels) conn = nm.take(dconn, iels.astype(nm.int32), axis=0) mapping = VolumeMapping(coors, conn, poly_space=geo_ps) vg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps, ori=self.ori, transform=transform) out = vg elif (integration == 'surface') or (integration == 'surface_extra'): assert_(self.approx_order > 0) if self.ori is not None: msg = 'surface integrals do not work yet with the' \ ' hierarchical basis!' raise ValueError(msg) if self.basis_transform is not None: msg = 'surface integrals do not work with the' \ ' basis transform!' raise ValueError(msg) sd = domain.surface_groups[region.name] esd = self.surface_data[region.name] geo_ps = self.gel.poly_space ps = self.poly_space conn = sd.get_connectivity() mapping = SurfaceMapping(coors, conn, poly_space=geo_ps) if not self.is_surface: self.create_bqp(region.name, integral) qp = self.qp_coors[(integral.order, esd.bkey)] abf = ps.eval_base(qp.vals[0], transform=transform) bf = abf[..., self.efaces[0]] indx = self.gel.get_surface_entities()[0] # Fix geometry element's 1st facet orientation for gradients. indx = nm.roll(indx, -1)[::-1] mapping.set_basis_indices(indx) sg = mapping.get_mapping(qp.vals[0], qp.weights, poly_space=Struct(n_nod=bf.shape[-1]), mode=integration) if integration == 'surface_extra': sg.alloc_extra_data(self.econn.shape[1]) bf_bg = geo_ps.eval_base(qp.vals, diff=True) ebf_bg = self.get_base(esd.bkey, 1, integral) sg.evaluate_bfbgm(bf_bg, ebf_bg, coors, sd.fis, dconn) else: if self.basis_transform is not None: msg = 'surface fields do not work with the' \ ' basis transform!' raise ValueError(msg) # Do not use BQP for surface fields. qp = self.get_qp(sd.face_type, integral) bf = ps.eval_base(qp.vals, transform=transform) sg = mapping.get_mapping(qp.vals, qp.weights, poly_space=Struct(n_nod=bf.shape[-1]), mode=integration) out = sg elif integration == 'point': out = mapping = None elif integration == 'custom': raise ValueError('cannot create custom mapping!') else: raise ValueError('unknown integration geometry type: %s' % integration) if out is not None: # Store the integral used. out.integral = integral out.qp = qp out.ps = ps # Update base. out.bf[:] = bf if return_mapping: out = (out, mapping) return out class VolumeField(FEField): """ Finite element field base class over volume elements (element dimension equals space dimension). """ def _check_region(self, region): """ Check whether the `region` can be used for the field. Returns ------- ok : bool True if the region is usable for the field. """ ok = True domain = region.domain if region.kind != 'cell': output("bad region kind! (is: %r, should be: 'cell')" % region.kind) ok = False elif (region.kind_tdim != domain.shape.tdim): output('cells with a bad topological dimension! (%d == %d)' % (region.kind_tdim, domain.shape.tdim)) ok = False return ok def _setup_geometry(self): """ Setup the field region geometry. """ cmesh = self.domain.cmesh for key, gel in six.iteritems(self.domain.geom_els): ct = cmesh.cell_types if (ct[self.region.cells] == cmesh.key_to_index[gel.name]).all(): self.gel = gel break else: raise ValueError('region %s of field %s contains multiple' ' reference geometries!' % (self.region.name, self.name)) self.is_surface = False def _create_interpolant(self): name = '%s_%s_%s_%d%s' % (self.gel.name, self.space, self.poly_space_base, self.approx_order, 'B' * self.force_bubble) ps = PolySpace.any_from_args(name, self.gel, self.approx_order, base=self.poly_space_base, force_bubble=self.force_bubble) self.poly_space = ps def _init_econn(self): """ Initialize the extended DOF connectivity. """ n_ep = self.poly_space.n_nod n_cell = self.region.get_n_cells() self.econn = nm.zeros((n_cell, n_ep), nm.int32) def _setup_vertex_dofs(self): """ Setup vertex DOF connectivity. """ if self.node_desc.vertex is None: return 0, None region = self.region cmesh = self.domain.cmesh conn, offsets = cmesh.get_incident(0, region.cells, region.tdim, ret_offsets=True) vertices = nm.unique(conn) remap = prepare_remap(vertices, region.n_v_max) n_dof = vertices.shape[0] aux = nm.unique(nm.diff(offsets)) assert_(len(aux) == 1, 'region with multiple reference geometries!') offset = aux[0] # Remap vertex node connectivity to field-local numbering. aux = conn.reshape((-1, offset)).astype(nm.int32) self.econn[:, :offset] = nm.take(remap, aux) return n_dof, remap def setup_extra_data(self, geometry, info, is_trace): dct = info.dc_type.type if geometry != None: geometry_flag = 'surface' in geometry else: geometry_flag = False if (dct == 'surface') or (geometry_flag): reg = info.get_region() mreg_name = info.get_region_name(can_trace=False) self.domain.create_surface_group(reg) self.setup_surface_data(reg, is_trace, mreg_name) elif dct == 'edge': raise NotImplementedError('dof connectivity type %s' % dct) elif dct == 'point': self.setup_point_data(self, info.region) elif dct not in ('volume', 'scalar', 'custom'): raise ValueError('unknown dof connectivity type! (%s)' % dct) def setup_point_data(self, field, region): if region.name not in self.point_data: conn = field.get_dofs_in_region(region, merge=True) conn.shape += (1,) self.point_data[region.name] = conn def setup_surface_data(self, region, is_trace=False, trace_region=None): """nodes[leconn] == econn""" """nodes are sorted by node number -> same order as region.vertices""" if region.name not in self.surface_data: sd = FESurface('surface_data_%s' % region.name, region, self.efaces, self.econn, self.region) self.surface_data[region.name] = sd if region.name in self.surface_data and is_trace: sd = self.surface_data[region.name] sd.setup_mirror_connectivity(region, trace_region) return self.surface_data[region.name] def get_econn(self, conn_type, region, is_trace=False, integration=None, local=False): """ Get extended connectivity of the given type in the given region. """ ct = conn_type.type if isinstance(conn_type, Struct) else conn_type if ct in ('volume', 'custom'): if region.name == self.region.name: conn = self.econn else: tco = integration in ('volume', 'custom') cells = region.get_cells(true_cells_only=tco) ii = self.region.get_cell_indices(cells, true_cells_only=tco) conn = nm.take(self.econn, ii, axis=0) elif ct == 'surface': if region.name not in self.surface_data: self.domain.create_surface_group(region) self.setup_surface_data(region) sd = self.surface_data[region.name] conn = sd.get_connectivity(local=local, is_trace=is_trace) elif ct == 'edge': raise NotImplementedError('connectivity type %s' % ct) elif ct == 'point': conn = self.point_data[region.name] else: raise ValueError('unknown connectivity type! (%s)' % ct) return conn def average_qp_to_vertices(self, data_qp, integral): """ Average data given in quadrature points in region elements into region vertices. .. math:: u_n = \sum_e (u_{e,avg} * volume_e) / \sum_e volume_e = \sum_e \int_{volume_e} u / \sum volume_e """ region = self.region n_cells = region.get_n_cells() if n_cells != data_qp.shape[0]: msg = 'incomatible shape! (%d == %d)' % (n_cells, data_qp.shape[0]) raise ValueError(msg) n_vertex = self.n_vertex_dof nc = data_qp.shape[2] nod_vol = nm.zeros((n_vertex,), dtype=nm.float64) data_vertex = nm.zeros((n_vertex, nc), dtype=nm.float64) vg = self.get_mapping(self.region, integral, 'volume')[0] volume = nm.squeeze(vg.volume) iels = self.region.get_cells() data_e = nm.zeros((volume.shape[0], 1, nc, 1), dtype=nm.float64) vg.integrate(data_e, data_qp[iels]) ir = nm.arange(nc, dtype=nm.int32) conn = self.econn[:, :self.gel.n_vertex] for ii, cc in enumerate(conn): # Assumes unique nodes in cc! ind2, ind1 = nm.meshgrid(ir, cc) data_vertex[ind1,ind2] += data_e[iels[ii],0,:,0] nod_vol[cc] += volume[ii] data_vertex /= nod_vol[:,nm.newaxis] return data_vertex class SurfaceField(FEField): """ Finite element field base class over surface (element dimension is one less than space dimension). """ def _check_region(self, region): """ Check whether the `region` can be used for the field. Returns ------- ok : bool True if the region is usable for the field. """ ok1 = ((region.kind_tdim == (region.tdim - 1)) and (region.get_n_cells(True) > 0)) if not ok1: output('bad region topological dimension and kind! (%d, %s)' % (region.tdim, region.kind)) n_ns = region.get_facet_indices().shape[0] - region.get_n_cells(True) ok2 = n_ns == 0 if not ok2: output('%d region facets are not on the domain surface!' % n_ns) return ok1 and ok2 def _setup_geometry(self): """ Setup the field region geometry. """ for key, vgel in six.iteritems(self.domain.geom_els): self.gel = vgel.surface_facet break if self.gel is None: raise ValueError('cells with no surface!') self.is_surface = True def _create_interpolant(self): name = '%s_%s_%s_%d%s' % (self.gel.name, self.space, self.poly_space_base, self.approx_order, 'B' * self.force_bubble) ps = PolySpace.any_from_args(name, self.gel, self.approx_order, base=self.poly_space_base, force_bubble=self.force_bubble) self.poly_space = ps def setup_extra_data(self, geometry, info, is_trace): dct = info.dc_type.type if dct != 'surface': msg = "dof connectivity type must be 'surface'! (%s)" % dct raise ValueError(msg) reg = info.get_region() if reg.name not in self.surface_data: # Defined in setup_vertex_dofs() msg = 'no surface data of surface field! (%s)' % reg.name raise ValueError(msg) if reg.name in self.surface_data and is_trace: sd = self.surface_data[reg.name] mreg_name = info.get_region_name(can_trace=False) sd.setup_mirror_connectivity(reg, mreg_name) def _init_econn(self): """ Initialize the extended DOF connectivity. """ n_ep = self.poly_space.n_nod n_cell = self.region.get_n_cells(is_surface=self.is_surface) self.econn = nm.zeros((n_cell, n_ep), nm.int32) def _setup_vertex_dofs(self): """ Setup vertex DOF connectivity. """ if self.node_desc.vertex is None: return 0, None region = self.region remap = prepare_remap(region.vertices, region.n_v_max) n_dof = region.vertices.shape[0] # Remap vertex node connectivity to field-local numbering. conn, gel = self.domain.get_conn(ret_gel=True) faces = gel.get_surface_entities() aux = FESurface('aux', region, faces, conn) self.econn[:, :aux.n_fp] = aux.leconn self.surface_data[region.name] = aux return n_dof, remap def _setup_bubble_dofs(self): """ Setup bubble DOF connectivity. """ return 0, None, None def get_econn(self, conn_type, region, is_trace=False, integration=None): """ Get extended connectivity of the given type in the given region. """ ct = conn_type.type if isinstance(conn_type, Struct) else conn_type if ct != 'surface': msg = 'connectivity type must be "surface"! (%s)' % ct raise ValueError(msg) sd = self.surface_data[region.name] conn = sd.get_connectivity(local=True, is_trace=is_trace) return conn def average_qp_to_vertices(self, data_qp, integral): """ Average data given in quadrature points in region elements into region vertices. .. math:: u_n = \sum_e (u_{e,avg} * area_e) / \sum_e area_e = \sum_e \int_{area_e} u / \sum area_e """ region = self.region n_cells = region.get_n_cells(True) if n_cells != data_qp.shape[0]: msg = 'incomatible shape! (%d == %d)' % (n_cells, data_qp.shape[0]) raise ValueError(msg) n_vertex = len(region.vertices) nc = data_qp.shape[2] nod_vol = nm.zeros((n_vertex,), dtype=nm.float64) data_vertex = nm.zeros((n_vertex, nc), dtype=nm.float64) sg = self.get_mapping(self.region, integral, 'surface')[0] area = nm.squeeze(sg.volume) n_cells = region.get_n_cells(True) iels = nm.arange(n_cells, dtype=nm.int32) data_e = nm.zeros((area.shape[0], 1, nc, 1), dtype=nm.float64) sg.integrate(data_e, data_qp[iels]) ir = nm.arange(nc, dtype=nm.int32) sd = self.domain.surface_groups[region.name] # Should be vertex connectivity! conn = sd.get_connectivity(local=True) for ii, cc in enumerate(conn): # Assumes unique nodes in cc! ind2, ind1 = nm.meshgrid(ir, cc) data_vertex[ind1,ind2] += data_e[iels[ii],0,:,0] nod_vol[cc] += area[ii] data_vertex /= nod_vol[:,nm.newaxis] return data_vertex class H1Mixin(Struct): """ Methods of fields specific to H1 space. """ def _setup_shape(self): """ Setup the field's shape-related attributes, see :class:`Field`. """ self.n_components = nm.prod(self.shape) self.val_shape = self.shape
bsd-3-clause
agualis/test-django-nonrel
django/contrib/flatpages/tests/templatetags.py
228
5965
import os from django.conf import settings from django.contrib.auth.models import AnonymousUser, User from django.template import Template, Context, TemplateSyntaxError from django.test import TestCase class FlatpageTemplateTagTests(TestCase): fixtures = ['sample_flatpages'] urls = 'django.contrib.flatpages.tests.urls' def setUp(self): self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware' if flatpage_middleware_class not in settings.MIDDLEWARE_CLASSES: settings.MIDDLEWARE_CLASSES += (flatpage_middleware_class,) self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = ( os.path.join( os.path.dirname(__file__), 'templates' ), ) self.me = User.objects.create_user('testuser', 'test@example.com', 's3krit') def tearDown(self): settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS def test_get_flatpages_tag(self): "The flatpage template tag retrives unregistered prefixed flatpages by default" out = Template( "{% load flatpages %}" "{% get_flatpages as flatpages %}" "{% for page in flatpages %}" "{{ page.title }}," "{% endfor %}" ).render(Context()) self.assertEqual(out, "A Flatpage,A Nested Flatpage,") def test_get_flatpages_tag_for_anon_user(self): "The flatpage template tag retrives unregistered flatpages for an anonymous user" out = Template( "{% load flatpages %}" "{% get_flatpages for anonuser as flatpages %}" "{% for page in flatpages %}" "{{ page.title }}," "{% endfor %}" ).render(Context({ 'anonuser': AnonymousUser() })) self.assertEqual(out, "A Flatpage,A Nested Flatpage,") def test_get_flatpages_tag_for_user(self): "The flatpage template tag retrives all flatpages for an authenticated user" out = Template( "{% load flatpages %}" "{% get_flatpages for me as flatpages %}" "{% for page in flatpages %}" "{{ page.title }}," "{% endfor %}" ).render(Context({ 'me': self.me })) self.assertEqual(out, "A Flatpage,A Nested Flatpage,Sekrit Nested Flatpage,Sekrit Flatpage,") def test_get_flatpages_with_prefix(self): "The flatpage template tag retrives unregistered prefixed flatpages by default" out = Template( "{% load flatpages %}" "{% get_flatpages '/location/' as location_flatpages %}" "{% for page in location_flatpages %}" "{{ page.title }}," "{% endfor %}" ).render(Context()) self.assertEqual(out, "A Nested Flatpage,") def test_get_flatpages_with_prefix_for_anon_user(self): "The flatpage template tag retrives unregistered prefixed flatpages for an anonymous user" out = Template( "{% load flatpages %}" "{% get_flatpages '/location/' for anonuser as location_flatpages %}" "{% for page in location_flatpages %}" "{{ page.title }}," "{% endfor %}" ).render(Context({ 'anonuser': AnonymousUser() })) self.assertEqual(out, "A Nested Flatpage,") def test_get_flatpages_with_prefix_for_user(self): "The flatpage template tag retrive prefixed flatpages for an authenticated user" out = Template( "{% load flatpages %}" "{% get_flatpages '/location/' for me as location_flatpages %}" "{% for page in location_flatpages %}" "{{ page.title }}," "{% endfor %}" ).render(Context({ 'me': self.me })) self.assertEqual(out, "A Nested Flatpage,Sekrit Nested Flatpage,") def test_get_flatpages_with_variable_prefix(self): "The prefix for the flatpage template tag can be a template variable" out = Template( "{% load flatpages %}" "{% get_flatpages location_prefix as location_flatpages %}" "{% for page in location_flatpages %}" "{{ page.title }}," "{% endfor %}" ).render(Context({ 'location_prefix': '/location/' })) self.assertEqual(out, "A Nested Flatpage,") def test_parsing_errors(self): "There are various ways that the flatpages template tag won't parse" render = lambda t: Template(t).render(Context()) self.assertRaises(TemplateSyntaxError, render, "{% load flatpages %}{% get_flatpages %}") self.assertRaises(TemplateSyntaxError, render, "{% load flatpages %}{% get_flatpages as %}") self.assertRaises(TemplateSyntaxError, render, "{% load flatpages %}{% get_flatpages cheesecake flatpages %}") self.assertRaises(TemplateSyntaxError, render, "{% load flatpages %}{% get_flatpages as flatpages asdf%}") self.assertRaises(TemplateSyntaxError, render, "{% load flatpages %}{% get_flatpages cheesecake user as flatpages %}") self.assertRaises(TemplateSyntaxError, render, "{% load flatpages %}{% get_flatpages for user as flatpages asdf%}") self.assertRaises(TemplateSyntaxError, render, "{% load flatpages %}{% get_flatpages prefix for user as flatpages asdf%}")
bsd-3-clause
thebjorn/dkcoverage
dkcoverage/rtestcover.py
1
3325
# -*- coding: utf-8 -*- """Called from datakortet\dkcoverage.bat to record regression test coverage data in dashboard. """ import re import os # import sys # import time import glob # from datakortet.dkdash.status import send_status # from datakortet.utils import root from coverage import coverage, misc from coverage.files import find_python_files from coverage.parser import CodeParser from coverage.config import CoverageConfig from . import dkenv def linecount(fname, excludes): """Return the number of lines in ``fname``, counting the same way that coverage does. """ cp = CodeParser(filename=fname, exclude=re.compile(misc.join_regex(excludes))) lines, excluded = cp.parse_source() return len(lines), len(excluded) def skiplist(): cov = coverage(config_file=os.path.join(dkenv.DKROOT, '.coveragerc')) cwd = os.getcwd() skippatterns = [os.path.normpath(p.replace(cwd, dkenv.DKROOT)) for p in cov.omit] _skiplist = [] for pat in skippatterns: _skiplist += glob.glob(pat) return set(_skiplist) def abspath(fname): # cwd = os.getcwd() res = os.path.normcase( os.path.normpath( os.path.abspath(fname))) #.replace(cwd, root())))) return res def valid_file(fname, _skiplist=None): _skiplist = _skiplist or skiplist() if fname.endswith('.py'): absfname = abspath(fname) if absfname not in _skiplist: fpath, name = os.path.split(fname) if name != '__init__.py' or os.stat(absfname).st_size > 0: return absfname return False def python_files(folder): _skiplist = skiplist() for fname in find_python_files(folder): f = valid_file(fname, _skiplist) if f: yield f def pylinecount(rt=None, verbose=False): """Count Python lines the same way that coverage does. """ res = 0 cov = coverage(config_file=os.path.join(dkenv.DKROOT, '.coveragerc')) rt = rt or dkenv.DKROOT _skiplist = skiplist() exclude_lines = cov.get_exclude_list() for fname in python_files(rt): if os.path.normpath(fname) not in _skiplist: lcount, excount = linecount(fname, exclude_lines) if verbose: print '%5d %5d %s' % (lcount, excount, fname) res += lcount else: if verbose: print '-----', fname return res # def report_test_coverage(reportline, dashboard=True): # start = time.time() # parts = reportline.split() # # stmts = int(parts[1]) # skipped = int(parts[2]) # covered = stmts - skipped # print >> sys.stderr, "COVERED:", covered # # linecount = pylinecount() # print >> sys.stderr, "TOTAL: ", linecount # # coverage = 100.0 * covered / linecount # severity = 'green' # if coverage < 85: # severity = 'yellow' # if coverage < 60: # severity = 'red' # # sys.stdout.write("Coverage: " + str(coverage) + '\n') # # if dashboard: # send_status(tag='code.testcov', # value=coverage, # duration=time.time() - start, # server='appsrv') # if __name__ == "__main__": # intxt = sys.stdin.read() # report_test_coverage(intxt) # sys.exit(0)
gpl-2.0
ptisserand/ansible
lib/ansible/modules/network/cloudengine/ce_vxlan_arp.py
43
23695
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: ce_vxlan_arp version_added: "2.4" short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices. description: - Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices. author: QijunPan (@CloudEngine-Ansible) options: evn_bgp: description: - Enables EVN BGP. choices: ['enable', 'disable'] evn_source_ip: description: - Specifies the source address of an EVN BGP peer. The value is in dotted decimal notation. evn_peer_ip: description: - Specifies the IP address of an EVN BGP peer. The value is in dotted decimal notation. evn_server: description: - Configures the local device as the router reflector (RR) on the EVN network. choices: ['enable', 'disable'] evn_reflect_client: description: - Configures the local device as the route reflector (RR) and its peer as the client. choices: ['enable', 'disable'] vbdif_name: description: - Full name of VBDIF interface, i.e. Vbdif100. arp_collect_host: description: - Enables EVN BGP or BGP EVPN to collect host information. choices: ['enable', 'disable'] host_collect_protocol: description: - Enables EVN BGP or BGP EVPN to advertise host information. choices: ['bgp','none'] bridge_domain_id: description: - Specifies a BD(bridge domain) ID. The value is an integer ranging from 1 to 16777215. arp_suppress: description: - Enables ARP broadcast suppression in a BD. choices: ['enable', 'disable'] state: description: - Determines whether the config should be present or not on the device. default: present choices: ['present', 'absent'] """ EXAMPLES = ''' - name: vxlan arp module test hosts: ce128 connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships. ce_vxlan_arp: evn_bgp: enable evn_source_ip: 6.6.6.6 evn_peer_ip: 7.7.7.7 provider: "{{ cli }}" - name: Configure a Layer 3 VXLAN gateway as a BGP RR. ce_vxlan_arp: evn_bgp: enable evn_server: enable provider: "{{ cli }}" - name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information. ce_vxlan_arp: vbdif_name: Vbdif100 arp_collect_host: enable provider: "{{ cli }}" - name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information. ce_vxlan_arp: host_collect_protocol: bgp provider: "{{ cli }}" - name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway. ce_vxlan_arp: bridge_domain_id: 100 arp_suppress: enable provider: "{{ cli }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: verbose mode type: dict sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"} existing: description: k/v pairs of existing configuration returned: verbose mode type: dict sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []} end_state: description: k/v pairs of configuration after module execution returned: verbose mode type: dict sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]} updates: description: commands sent to the device returned: always type: list sample: ["evn bgp", "source-address 6.6.6.6", "peer 7.7.7.7"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.cloudengine.ce import get_config, load_config from ansible.module_utils.network.cloudengine.ce import ce_argument_spec def is_config_exist(cmp_cfg, test_cfg): """is configuration exist""" if not cmp_cfg or not test_cfg: return False return bool(test_cfg in cmp_cfg) def is_valid_v4addr(addr): """check is ipv4 addr is valid""" if addr.count('.') == 3: addr_list = addr.split('.') if len(addr_list) != 4: return False for each_num in addr_list: if not each_num.isdigit(): return False if int(each_num) > 255: return False return True return False def get_evn_peers(config): """get evn peer ip list""" get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config) if not get: return None else: return list(set(get)) def get_evn_srouce(config): """get evn peer ip list""" get = re.findall( r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config) if not get: return None else: return get[0] def get_evn_reflect_client(config): """get evn reflect client list""" get = re.findall( r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config) if not get: return None else: return list(get) class VxlanArp(object): """ Manages arp attributes of VXLAN. """ def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.init_module() # module input info self.evn_bgp = self.module.params['evn_bgp'] self.evn_source_ip = self.module.params['evn_source_ip'] self.evn_peer_ip = self.module.params['evn_peer_ip'] self.evn_server = self.module.params['evn_server'] self.evn_reflect_client = self.module.params['evn_reflect_client'] self.vbdif_name = self.module.params['vbdif_name'] self.arp_collect_host = self.module.params['arp_collect_host'] self.host_collect_protocol = self.module.params[ 'host_collect_protocol'] self.bridge_domain_id = self.module.params['bridge_domain_id'] self.arp_suppress = self.module.params['arp_suppress'] self.state = self.module.params['state'] # host info self.host = self.module.params['host'] self.username = self.module.params['username'] self.port = self.module.params['port'] # state self.config = "" # current config self.changed = False self.updates_cmd = list() self.commands = list() self.results = dict() self.proposed = dict() self.existing = dict() self.end_state = dict() def init_module(self): """init module""" required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")] self.module = AnsibleModule(argument_spec=self.spec, required_together=required_together, supports_check_mode=True) def cli_load_config(self, commands): """load config by cli""" if not self.module.check_mode: load_config(self.module, commands) def get_current_config(self): """get current configuration""" flags = list() exp = "| ignore-case section include evn bgp|host collect protocol bgp" if self.vbdif_name: exp += "|^interface %s$" % self.vbdif_name if self.bridge_domain_id: exp += "|^bridge-domain %s$" % self.bridge_domain_id flags.append(exp) config = get_config(self.module, flags) return config def cli_add_command(self, command, undo=False): """add command to self.update_cmd and self.commands""" if undo and command.lower() not in ["quit", "return"]: cmd = "undo " + command else: cmd = command self.commands.append(cmd) # set to device if command.lower() not in ["quit", "return"]: self.updates_cmd.append(cmd) # show updates result def config_bridge_domain(self): """manage bridge domain configuration""" if not self.bridge_domain_id: return # bridge-domain bd-id # [undo] arp broadcast-suppress enable cmd = "bridge-domain %s" % self.bridge_domain_id if not is_config_exist(self.config, cmd): self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id) cmd = "arp broadcast-suppress enable" exist = is_config_exist(self.config, cmd) if self.arp_suppress == "enable" and not exist: self.cli_add_command("bridge-domain %s" % self.bridge_domain_id) self.cli_add_command(cmd) self.cli_add_command("quit") elif self.arp_suppress == "disable" and exist: self.cli_add_command("bridge-domain %s" % self.bridge_domain_id) self.cli_add_command(cmd, undo=True) self.cli_add_command("quit") def config_evn_bgp(self): """enables EVN BGP and configure evn bgp command""" evn_bgp_view = False evn_bgp_enable = False cmd = "evn bgp" exist = is_config_exist(self.config, cmd) if self.evn_bgp == "enable" or exist: evn_bgp_enable = True # [undo] evn bgp if self.evn_bgp: if self.evn_bgp == "enable" and not exist: self.cli_add_command(cmd) evn_bgp_view = True elif self.evn_bgp == "disable" and exist: self.cli_add_command(cmd, undo=True) return # [undo] source-address ip-address if evn_bgp_enable and self.evn_source_ip: cmd = "source-address %s" % self.evn_source_ip exist = is_config_exist(self.config, cmd) if self.state == "present" and not exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd) elif self.state == "absent" and exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd, undo=True) # [undo] peer ip-address # [undo] peer ipv4-address reflect-client if evn_bgp_enable and self.evn_peer_ip: cmd = "peer %s" % self.evn_peer_ip exist = is_config_exist(self.config, cmd) if self.state == "present": if not exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd) if self.evn_reflect_client == "enable": self.cli_add_command( "peer %s reflect-client" % self.evn_peer_ip) else: if self.evn_reflect_client: cmd = "peer %s reflect-client" % self.evn_peer_ip exist = is_config_exist(self.config, cmd) if self.evn_reflect_client == "enable" and not exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd) elif self.evn_reflect_client == "disable" and exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd, undo=True) else: if exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd, undo=True) # [undo] server enable if evn_bgp_enable and self.evn_server: cmd = "server enable" exist = is_config_exist(self.config, cmd) if self.evn_server == "enable" and not exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd) elif self.evn_server == "disable" and exist: if not evn_bgp_view: self.cli_add_command("evn bgp") evn_bgp_view = True self.cli_add_command(cmd, undo=True) if evn_bgp_view: self.cli_add_command("quit") def config_vbdif(self): """configure command at the VBDIF interface view""" # interface vbdif bd-id # [undo] arp collect host enable cmd = "interface %s" % self.vbdif_name.lower().capitalize() exist = is_config_exist(self.config, cmd) if not exist: self.module.fail_json( msg="Error: Interface %s does not exist." % self.vbdif_name) cmd = "arp collect host enable" exist = is_config_exist(self.config, cmd) if self.arp_collect_host == "enable" and not exist: self.cli_add_command("interface %s" % self.vbdif_name.lower().capitalize()) self.cli_add_command(cmd) self.cli_add_command("quit") elif self.arp_collect_host == "disable" and exist: self.cli_add_command("interface %s" % self.vbdif_name.lower().capitalize()) self.cli_add_command(cmd, undo=True) self.cli_add_command("quit") def config_host_collect_protocal(self): """Enable EVN BGP or BGP EVPN to advertise host information""" # [undo] host collect protocol bgp cmd = "host collect protocol bgp" exist = is_config_exist(self.config, cmd) if self.state == "present": if self.host_collect_protocol == "bgp" and not exist: self.cli_add_command(cmd) elif self.host_collect_protocol == "none" and exist: self.cli_add_command(cmd, undo=True) else: if self.host_collect_protocol == "bgp" and exist: self.cli_add_command(cmd, undo=True) def is_valid_vbdif(self, ifname): """check is interface vbdif is valid""" if not ifname.upper().startswith('VBDIF'): return False bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "") if not bdid.isdigit(): return False if int(bdid) < 1 or int(bdid) > 16777215: return False return True def check_params(self): """Check all input params""" # bridge domain id check if self.bridge_domain_id: if not self.bridge_domain_id.isdigit(): self.module.fail_json( msg="Error: Bridge domain id is not digit.") if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215: self.module.fail_json( msg="Error: Bridge domain id is not in the range from 1 to 16777215.") # evn_source_ip check if self.evn_source_ip: if not is_valid_v4addr(self.evn_source_ip): self.module.fail_json(msg="Error: evn_source_ip is invalid.") # evn_peer_ip check if self.evn_peer_ip: if not is_valid_v4addr(self.evn_peer_ip): self.module.fail_json(msg="Error: evn_peer_ip is invalid.") # vbdif_name check if self.vbdif_name: self.vbdif_name = self.vbdif_name.replace( " ", "").lower().capitalize() if not self.is_valid_vbdif(self.vbdif_name): self.module.fail_json(msg="Error: vbdif_name is invalid.") # evn_reflect_client and evn_peer_ip must set at the same time if self.evn_reflect_client and not self.evn_peer_ip: self.module.fail_json( msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.") # evn_server and evn_reflect_client can not set at the same time if self.evn_server == "enable" and self.evn_reflect_client == "enable": self.module.fail_json( msg="Error: evn_server and evn_reflect_client can not set at the same time.") def get_proposed(self): """get proposed info""" if self.evn_bgp: self.proposed["evn_bgp"] = self.evn_bgp if self.evn_source_ip: self.proposed["evn_source_ip"] = self.evn_source_ip if self.evn_peer_ip: self.proposed["evn_peer_ip"] = self.evn_peer_ip if self.evn_server: self.proposed["evn_server"] = self.evn_server if self.evn_reflect_client: self.proposed["evn_reflect_client"] = self.evn_reflect_client if self.arp_collect_host: self.proposed["arp_collect_host"] = self.arp_collect_host if self.host_collect_protocol: self.proposed["host_collect_protocol"] = self.host_collect_protocol if self.arp_suppress: self.proposed["arp_suppress"] = self.arp_suppress if self.vbdif_name: self.proposed["vbdif_name"] = self.evn_peer_ip if self.bridge_domain_id: self.proposed["bridge_domain_id"] = self.bridge_domain_id self.proposed["state"] = self.state def get_existing(self): """get existing info""" evn_bgp_exist = is_config_exist(self.config, "evn bgp") if evn_bgp_exist: self.existing["evn_bgp"] = "enable" else: self.existing["evn_bgp"] = "disable" if evn_bgp_exist: if is_config_exist(self.config, "server enable"): self.existing["evn_server"] = "enable" else: self.existing["evn_server"] = "disable" self.existing["evn_source_ip"] = get_evn_srouce(self.config) self.existing["evn_peer_ip"] = get_evn_peers(self.config) self.existing["evn_reflect_client"] = get_evn_reflect_client( self.config) if is_config_exist(self.config, "arp collect host enable"): self.existing["host_collect_protocol"] = "enable" else: self.existing["host_collect_protocol"] = "disable" if is_config_exist(self.config, "host collect protocol bgp"): self.existing["host_collect_protocol"] = "bgp" else: self.existing["host_collect_protocol"] = None if is_config_exist(self.config, "arp broadcast-suppress enable"): self.existing["arp_suppress"] = "enable" else: self.existing["arp_suppress"] = "disable" def get_end_state(self): """get end state info""" config = self.get_current_config() evn_bgp_exist = is_config_exist(config, "evn bgp") if evn_bgp_exist: self.end_state["evn_bgp"] = "enable" else: self.end_state["evn_bgp"] = "disable" if evn_bgp_exist: if is_config_exist(config, "server enable"): self.end_state["evn_server"] = "enable" else: self.end_state["evn_server"] = "disable" self.end_state["evn_source_ip"] = get_evn_srouce(config) self.end_state["evn_peer_ip"] = get_evn_peers(config) self.end_state[ "evn_reflect_client"] = get_evn_reflect_client(config) if is_config_exist(config, "arp collect host enable"): self.end_state["host_collect_protocol"] = "enable" else: self.end_state["host_collect_protocol"] = "disable" if is_config_exist(config, "host collect protocol bgp"): self.end_state["host_collect_protocol"] = "bgp" else: self.end_state["host_collect_protocol"] = None if is_config_exist(config, "arp broadcast-suppress enable"): self.end_state["arp_suppress"] = "enable" else: self.end_state["arp_suppress"] = "disable" def work(self): """worker""" self.check_params() self.config = self.get_current_config() self.get_existing() self.get_proposed() # deal present or absent if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip: self.config_evn_bgp() if self.vbdif_name and self.arp_collect_host: self.config_vbdif() if self.host_collect_protocol: self.config_host_collect_protocal() if self.bridge_domain_id and self.arp_suppress: self.config_bridge_domain() if self.commands: self.cli_load_config(self.commands) self.changed = True self.get_end_state() self.results['changed'] = self.changed self.results['proposed'] = self.proposed self.results['existing'] = self.existing self.results['end_state'] = self.end_state if self.changed: self.results['updates'] = self.updates_cmd else: self.results['updates'] = list() self.module.exit_json(**self.results) def main(): """Module main""" argument_spec = dict( evn_bgp=dict(required=False, type='str', choices=['enable', 'disable']), evn_source_ip=dict(required=False, type='str'), evn_peer_ip=dict(required=False, type='str'), evn_server=dict(required=False, type='str', choices=['enable', 'disable']), evn_reflect_client=dict( required=False, type='str', choices=['enable', 'disable']), vbdif_name=dict(required=False, type='str'), arp_collect_host=dict(required=False, type='str', choices=['enable', 'disable']), host_collect_protocol=dict( required=False, type='str', choices=['bgp', 'none']), bridge_domain_id=dict(required=False, type='str'), arp_suppress=dict(required=False, type='str', choices=['enable', 'disable']), state=dict(required=False, default='present', choices=['present', 'absent']) ) argument_spec.update(ce_argument_spec) module = VxlanArp(argument_spec) module.work() if __name__ == '__main__': main()
gpl-3.0
hughperkins/gpu-experiments
gpuexperiments/old/sharedmemory.py
1
4036
# Note that this will erase your nvidia cache, ~/.nv/ComputeCache This may or may not be an undesirable side-effect for you. For example, cutorch will take 1-2 minutes or so to start after this cache has been emptied. from __future__ import print_function, division import time import string import random import numpy as np import pyopencl as cl import subprocess import os from os.path import join from gpuexperiments.callkernel import call_cl_kernel #import gpuexperiments.cpu_check from gpuexperiments.timecheck import inittime, timecheck gpu_idx = 0 platforms = cl.get_platforms() i = 0 for platform in platforms: gpu_devices = platform.get_devices(device_type=cl.device_type.GPU) if gpu_idx < i + len(gpu_devices): ctx = cl.Context(devices=[gpu_devices[gpu_idx-i]]) break i += len(gpu_devices) print('context', ctx) q = cl.CommandQueue(ctx) mf = cl.mem_flags sources = { 'kernel_store_to_local': r""" kernel void kernel_store_to_local(global int *data) { local int F[32]; F[0] = 123; } """ , 'kernel_init_local': r""" kernel void kernel_init_local(global int *data) { local int F[32]; for(int i = 0; i < 32; i++) { F[i] = 0; }; } """ , 'kernel_init_local_noloop': r""" kernel void kernel_init_local_noloop(global int *data) { local int F[32]; F[get_local_id(0)] = 0; } """ , 'kernel_copy_local_to_global': r""" kernel void kernel_copy_local_to_global(global int *data) { local int F[32]; int tid = get_local_id(0); data[tid] = F[tid]; } """ , 'kernel_copy_local_from_global': r""" kernel void kernel_copy_local_from_global(global int *data) { local int F[32]; int tid = get_local_id(0); F[tid] = data[tid]; } """ , 'kernel_copy_local_to_global_gid': r""" kernel void kernel_copy_local_to_global_gid(global int *data) { local int F[32]; int tid = get_local_id(0); int gid = get_global_id(0); data[gid] = F[tid]; } """ , 'kernel_copy_local_from_global_gid': r""" kernel void kernel_copy_local_from_global_gid(global int *data) { local int F[32]; int tid = get_local_id(0); int gid = get_global_id(0); F[tid] = data[gid]; } """ } optimized = set() def clearComputeCache(): cache_dir = join(os.environ['HOME'], '.nv/ComputeCache') for subdir in os.listdir(cache_dir): if subdir == 'index': continue print('clean', subdir) subprocess.call(['rm', '-Rf', join(cache_dir, subdir)]) # subprocess.call(['rm', '-Rf', join(os.environ['HOME'], '.nv/ComputeCache')]) def getPtx(kernelName): with open('/tmp/gpucmd.sh', 'w') as f: f.write(r"""#!/bin/bash cat $(grep -r %s ~/.nv/ComputeCache | awk '{print $3}') """ % kernelName) filepath = subprocess.check_output(['/bin/bash', '/tmp/gpucmd.sh']) filepath_utf8 = '' for byte in filepath: # print(byte) if byte >= 10 and byte < 128: if chr(byte) in string.printable: filepath_utf8 += chr(byte) # print('filepath', filepath) #print(kernelName) print(filepath_utf8.split('--opt-level')[0]) def buildKernel(name, source): options = '-cl-opt-disable' if name in optimized: print('ENABLING OPTIMIZATIONS') options = '' return cl.Program(ctx, source).build(options=options).__getattr__(name) d = np.zeros((1024*1024 * 32 * 2,), dtype=np.float32) d_cl = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=d) def timeKernel(name, kernel): # clearComputeCache() grid = (1024*1024,1,1) block = (32,1,1) q.finish() inittime() call_cl_kernel(kernel, q, grid, block, d_cl) q.finish() return timecheck(name) # print(getPtx('mykernel')) times = {} for name, source in sorted(sources.items()): clearComputeCache() kernel = buildKernel(name, source) print('built kernel') for it in range(3): t = timeKernel(name, kernel) times[name] = t print(getPtx(name)) for name, time in sorted(times.items()): print(name, time)
bsd-2-clause
vlinhd11/vlinhd11-android-scripting
python/src/Tools/i18n/pygettext.py
86
22104
#! /usr/bin/env python # -*- coding: iso-8859-1 -*- # Originally written by Barry Warsaw <barry@zope.com> # # Minimally patched to make it even more xgettext compatible # by Peter Funk <pf@artcom-gmbh.de> # # 2002-11-22 Jürgen Hermann <jh@web.de> # Added checks that _() only contains string literals, and # command line args are resolved to module lists, i.e. you # can now pass a filename, a module or package name, or a # directory (including globbing chars, important for Win32). # Made docstring fit in 80 chars wide displays using pydoc. # # for selftesting try: import fintl _ = fintl.gettext except ImportError: _ = lambda s: s __doc__ = _("""pygettext -- Python equivalent of xgettext(1) Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the internationalization of C programs. Most of these tools are independent of the programming language and can be used from within Python programs. Martin von Loewis' work[1] helps considerably in this regard. There's one problem though; xgettext is the program that scans source code looking for message strings, but it groks only C (or C++). Python introduces a few wrinkles, such as dual quoting characters, triple quoted strings, and raw strings. xgettext understands none of this. Enter pygettext, which uses Python's standard tokenize module to scan Python source code, generating .pot files identical to what GNU xgettext[2] generates for C and C++ code. From there, the standard GNU tools can be used. A word about marking Python strings as candidates for translation. GNU xgettext recognizes the following keywords: gettext, dgettext, dcgettext, and gettext_noop. But those can be a lot of text to include all over your code. C and C++ have a trick: they use the C preprocessor. Most internationalized C source includes a #define for gettext() to _() so that what has to be written in the source is much less. Thus these are both translatable strings: gettext("Translatable String") _("Translatable String") Python of course has no preprocessor so this doesn't work so well. Thus, pygettext searches only for _() by default, but see the -k/--keyword flag below for how to augment this. [1] http://www.python.org/workshops/1997-10/proceedings/loewis.html [2] http://www.gnu.org/software/gettext/gettext.html NOTE: pygettext attempts to be option and feature compatible with GNU xgettext where ever possible. However some options are still missing or are not fully implemented. Also, xgettext's use of command line switches with option arguments is broken, and in these cases, pygettext just defines additional switches. Usage: pygettext [options] inputfile ... Options: -a --extract-all Extract all strings. -d name --default-domain=name Rename the default output file from messages.pot to name.pot. -E --escape Replace non-ASCII characters with octal escape sequences. -D --docstrings Extract module, class, method, and function docstrings. These do not need to be wrapped in _() markers, and in fact cannot be for Python to consider them docstrings. (See also the -X option). -h --help Print this help message and exit. -k word --keyword=word Keywords to look for in addition to the default set, which are: %(DEFAULTKEYWORDS)s You can have multiple -k flags on the command line. -K --no-default-keywords Disable the default set of keywords (see above). Any keywords explicitly added with the -k/--keyword option are still recognized. --no-location Do not write filename/lineno location comments. -n --add-location Write filename/lineno location comments indicating where each extracted string is found in the source. These lines appear before each msgid. The style of comments is controlled by the -S/--style option. This is the default. -o filename --output=filename Rename the default output file from messages.pot to filename. If filename is `-' then the output is sent to standard out. -p dir --output-dir=dir Output files will be placed in directory dir. -S stylename --style stylename Specify which style to use for location comments. Two styles are supported: Solaris # File: filename, line: line-number GNU #: filename:line The style name is case insensitive. GNU style is the default. -v --verbose Print the names of the files being processed. -V --version Print the version of pygettext and exit. -w columns --width=columns Set width of output to columns. -x filename --exclude-file=filename Specify a file that contains a list of strings that are not be extracted from the input files. Each string to be excluded must appear on a line by itself in the file. -X filename --no-docstrings=filename Specify a file that contains a list of files (one per line) that should not have their docstrings extracted. This is only useful in conjunction with the -D option above. If `inputfile' is -, standard input is read. """) import os import imp import sys import glob import time import getopt import token import tokenize import operator __version__ = '1.5' default_keywords = ['_'] DEFAULTKEYWORDS = ', '.join(default_keywords) EMPTYSTRING = '' # The normal pot-file header. msgmerge and Emacs's po-mode work better if it's # there. pot_header = _('''\ # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR ORGANIZATION # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. # msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\\n" "POT-Creation-Date: %(time)s\\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n" "Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n" "Language-Team: LANGUAGE <LL@li.org>\\n" "MIME-Version: 1.0\\n" "Content-Type: text/plain; charset=CHARSET\\n" "Content-Transfer-Encoding: ENCODING\\n" "Generated-By: pygettext.py %(version)s\\n" ''') def usage(code, msg=''): print >> sys.stderr, __doc__ % globals() if msg: print >> sys.stderr, msg sys.exit(code) escapes = [] def make_escapes(pass_iso8859): global escapes if pass_iso8859: # Allow iso-8859 characters to pass through so that e.g. 'msgid # "Höhe"' would result not result in 'msgid "H\366he"'. Otherwise we # escape any character outside the 32..126 range. mod = 128 else: mod = 256 for i in range(256): if 32 <= (i % mod) <= 126: escapes.append(chr(i)) else: escapes.append("\\%03o" % i) escapes[ord('\\')] = '\\\\' escapes[ord('\t')] = '\\t' escapes[ord('\r')] = '\\r' escapes[ord('\n')] = '\\n' escapes[ord('\"')] = '\\"' def escape(s): global escapes s = list(s) for i in range(len(s)): s[i] = escapes[ord(s[i])] return EMPTYSTRING.join(s) def safe_eval(s): # unwrap quotes, safely return eval(s, {'__builtins__':{}}, {}) def normalize(s): # This converts the various Python string types into a format that is # appropriate for .po files, namely much closer to C style. lines = s.split('\n') if len(lines) == 1: s = '"' + escape(s) + '"' else: if not lines[-1]: del lines[-1] lines[-1] = lines[-1] + '\n' for i in range(len(lines)): lines[i] = escape(lines[i]) lineterm = '\\n"\n"' s = '""\n"' + lineterm.join(lines) + '"' return s def containsAny(str, set): """Check whether 'str' contains ANY of the chars in 'set'""" return 1 in [c in str for c in set] def _visit_pyfiles(list, dirname, names): """Helper for getFilesForName().""" # get extension for python source files if not globals().has_key('_py_ext'): global _py_ext _py_ext = [triple[0] for triple in imp.get_suffixes() if triple[2] == imp.PY_SOURCE][0] # don't recurse into CVS directories if 'CVS' in names: names.remove('CVS') # add all *.py files to list list.extend( [os.path.join(dirname, file) for file in names if os.path.splitext(file)[1] == _py_ext] ) def _get_modpkg_path(dotted_name, pathlist=None): """Get the filesystem path for a module or a package. Return the file system path to a file for a module, and to a directory for a package. Return None if the name is not found, or is a builtin or extension module. """ # split off top-most name parts = dotted_name.split('.', 1) if len(parts) > 1: # we have a dotted path, import top-level package try: file, pathname, description = imp.find_module(parts[0], pathlist) if file: file.close() except ImportError: return None # check if it's indeed a package if description[2] == imp.PKG_DIRECTORY: # recursively handle the remaining name parts pathname = _get_modpkg_path(parts[1], [pathname]) else: pathname = None else: # plain name try: file, pathname, description = imp.find_module( dotted_name, pathlist) if file: file.close() if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]: pathname = None except ImportError: pathname = None return pathname def getFilesForName(name): """Get a list of module files for a filename, a module or package name, or a directory. """ if not os.path.exists(name): # check for glob chars if containsAny(name, "*?[]"): files = glob.glob(name) list = [] for file in files: list.extend(getFilesForName(file)) return list # try to find module or package name = _get_modpkg_path(name) if not name: return [] if os.path.isdir(name): # find all python files in directory list = [] os.path.walk(name, _visit_pyfiles, list) return list elif os.path.exists(name): # a single file return [name] return [] class TokenEater: def __init__(self, options): self.__options = options self.__messages = {} self.__state = self.__waiting self.__data = [] self.__lineno = -1 self.__freshmodule = 1 self.__curfile = None def __call__(self, ttype, tstring, stup, etup, line): # dispatch ## import token ## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \ ## 'tstring:', tstring self.__state(ttype, tstring, stup[0]) def __waiting(self, ttype, tstring, lineno): opts = self.__options # Do docstring extractions, if enabled if opts.docstrings and not opts.nodocstrings.get(self.__curfile): # module docstring? if self.__freshmodule: if ttype == tokenize.STRING: self.__addentry(safe_eval(tstring), lineno, isdocstring=1) self.__freshmodule = 0 elif ttype not in (tokenize.COMMENT, tokenize.NL): self.__freshmodule = 0 return # class docstring? if ttype == tokenize.NAME and tstring in ('class', 'def'): self.__state = self.__suiteseen return if ttype == tokenize.NAME and tstring in opts.keywords: self.__state = self.__keywordseen def __suiteseen(self, ttype, tstring, lineno): # ignore anything until we see the colon if ttype == tokenize.OP and tstring == ':': self.__state = self.__suitedocstring def __suitedocstring(self, ttype, tstring, lineno): # ignore any intervening noise if ttype == tokenize.STRING: self.__addentry(safe_eval(tstring), lineno, isdocstring=1) self.__state = self.__waiting elif ttype not in (tokenize.NEWLINE, tokenize.INDENT, tokenize.COMMENT): # there was no class docstring self.__state = self.__waiting def __keywordseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == '(': self.__data = [] self.__lineno = lineno self.__state = self.__openseen else: self.__state = self.__waiting def __openseen(self, ttype, tstring, lineno): if ttype == tokenize.OP and tstring == ')': # We've seen the last of the translatable strings. Record the # line number of the first line of the strings and update the list # of messages seen. Reset state for the next batch. If there # were no strings inside _(), then just ignore this entry. if self.__data: self.__addentry(EMPTYSTRING.join(self.__data)) self.__state = self.__waiting elif ttype == tokenize.STRING: self.__data.append(safe_eval(tstring)) elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]: # warn if we see anything else than STRING or whitespace print >> sys.stderr, _( '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' ) % { 'token': tstring, 'file': self.__curfile, 'lineno': self.__lineno } self.__state = self.__waiting def __addentry(self, msg, lineno=None, isdocstring=0): if lineno is None: lineno = self.__lineno if not msg in self.__options.toexclude: entry = (self.__curfile, lineno) self.__messages.setdefault(msg, {})[entry] = isdocstring def set_filename(self, filename): self.__curfile = filename self.__freshmodule = 1 def write(self, fp): options = self.__options timestamp = time.strftime('%Y-%m-%d %H:%M+%Z') # The time stamp in the header doesn't have the same format as that # generated by xgettext... print >> fp, pot_header % {'time': timestamp, 'version': __version__} # Sort the entries. First sort each particular entry's keys, then # sort all the entries by their first item. reverse = {} for k, v in self.__messages.items(): keys = v.keys() keys.sort() reverse.setdefault(tuple(keys), []).append((k, v)) rkeys = reverse.keys() rkeys.sort() for rkey in rkeys: rentries = reverse[rkey] rentries.sort() for k, v in rentries: isdocstring = 0 # If the entry was gleaned out of a docstring, then add a # comment stating so. This is to aid translators who may wish # to skip translating some unimportant docstrings. if reduce(operator.__add__, v.values()): isdocstring = 1 # k is the message string, v is a dictionary-set of (filename, # lineno) tuples. We want to sort the entries in v first by # file name and then by line number. v = v.keys() v.sort() if not options.writelocations: pass # location comments are different b/w Solaris and GNU: elif options.locationstyle == options.SOLARIS: for filename, lineno in v: d = {'filename': filename, 'lineno': lineno} print >>fp, _( '# File: %(filename)s, line: %(lineno)d') % d elif options.locationstyle == options.GNU: # fit as many locations on one line, as long as the # resulting line length doesn't exceeds 'options.width' locline = '#:' for filename, lineno in v: d = {'filename': filename, 'lineno': lineno} s = _(' %(filename)s:%(lineno)d') % d if len(locline) + len(s) <= options.width: locline = locline + s else: print >> fp, locline locline = "#:" + s if len(locline) > 2: print >> fp, locline if isdocstring: print >> fp, '#, docstring' print >> fp, 'msgid', normalize(k) print >> fp, 'msgstr ""\n' def main(): global default_keywords try: opts, args = getopt.getopt( sys.argv[1:], 'ad:DEhk:Kno:p:S:Vvw:x:X:', ['extract-all', 'default-domain=', 'escape', 'help', 'keyword=', 'no-default-keywords', 'add-location', 'no-location', 'output=', 'output-dir=', 'style=', 'verbose', 'version', 'width=', 'exclude-file=', 'docstrings', 'no-docstrings', ]) except getopt.error, msg: usage(1, msg) # for holding option values class Options: # constants GNU = 1 SOLARIS = 2 # defaults extractall = 0 # FIXME: currently this option has no effect at all. escape = 0 keywords = [] outpath = '' outfile = 'messages.pot' writelocations = 1 locationstyle = GNU verbose = 0 width = 78 excludefilename = '' docstrings = 0 nodocstrings = {} options = Options() locations = {'gnu' : options.GNU, 'solaris' : options.SOLARIS, } # parse options for opt, arg in opts: if opt in ('-h', '--help'): usage(0) elif opt in ('-a', '--extract-all'): options.extractall = 1 elif opt in ('-d', '--default-domain'): options.outfile = arg + '.pot' elif opt in ('-E', '--escape'): options.escape = 1 elif opt in ('-D', '--docstrings'): options.docstrings = 1 elif opt in ('-k', '--keyword'): options.keywords.append(arg) elif opt in ('-K', '--no-default-keywords'): default_keywords = [] elif opt in ('-n', '--add-location'): options.writelocations = 1 elif opt in ('--no-location',): options.writelocations = 0 elif opt in ('-S', '--style'): options.locationstyle = locations.get(arg.lower()) if options.locationstyle is None: usage(1, _('Invalid value for --style: %s') % arg) elif opt in ('-o', '--output'): options.outfile = arg elif opt in ('-p', '--output-dir'): options.outpath = arg elif opt in ('-v', '--verbose'): options.verbose = 1 elif opt in ('-V', '--version'): print _('pygettext.py (xgettext for Python) %s') % __version__ sys.exit(0) elif opt in ('-w', '--width'): try: options.width = int(arg) except ValueError: usage(1, _('--width argument must be an integer: %s') % arg) elif opt in ('-x', '--exclude-file'): options.excludefilename = arg elif opt in ('-X', '--no-docstrings'): fp = open(arg) try: while 1: line = fp.readline() if not line: break options.nodocstrings[line[:-1]] = 1 finally: fp.close() # calculate escapes make_escapes(options.escape) # calculate all keywords options.keywords.extend(default_keywords) # initialize list of strings to exclude if options.excludefilename: try: fp = open(options.excludefilename) options.toexclude = fp.readlines() fp.close() except IOError: print >> sys.stderr, _( "Can't read --exclude-file: %s") % options.excludefilename sys.exit(1) else: options.toexclude = [] # resolve args to module lists expanded = [] for arg in args: if arg == '-': expanded.append(arg) else: expanded.extend(getFilesForName(arg)) args = expanded # slurp through all the files eater = TokenEater(options) for filename in args: if filename == '-': if options.verbose: print _('Reading standard input') fp = sys.stdin closep = 0 else: if options.verbose: print _('Working on %s') % filename fp = open(filename) closep = 1 try: eater.set_filename(filename) try: tokenize.tokenize(fp.readline, eater) except tokenize.TokenError, e: print >> sys.stderr, '%s: %s, line %d, column %d' % ( e[0], filename, e[1][0], e[1][1]) finally: if closep: fp.close() # write the output if options.outfile == '-': fp = sys.stdout closep = 0 else: if options.outpath: options.outfile = os.path.join(options.outpath, options.outfile) fp = open(options.outfile, 'w') closep = 1 try: eater.write(fp) finally: if closep: fp.close() if __name__ == '__main__': main() # some more test strings _(u'a unicode string') # this one creates a warning _('*** Seen unexpected token "%(token)s"') % {'token': 'test'} _('more' 'than' 'one' 'string')
apache-2.0
DedMemez/ODS-August-2017
suit/DistributedSellbotBoss.py
1
55846
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.suit.DistributedSellbotBoss from panda3d.core import CollideMask, CollisionNode, CollisionPolygon, CollisionTube, GeomNode, Lens, NodePath, Point3, RopeNode, TextNode, Texture, VBase3, VBase4, Vec3, headsUp from direct.directnotify import DirectNotifyGlobal from direct.directutil import Mopath from direct.distributed.ClockDelta import * from direct.fsm import ClassicFSM, State from direct.fsm import FSM from direct.gui.DirectGui import * from direct.interval.IntervalGlobal import * from direct.showbase.PythonUtil import Functor from direct.showutil import Rope from direct.task import Task import math import random import DistributedBossCog import SuitDNA from toontown.battle import BattleBase from toontown.battle import MovieToonVictory from toontown.battle import RewardPanel from toontown.battle import SuitBattleGlobals from toontown.battle.BattleProps import * from toontown.coghq import CogDisguiseGlobals from toontown.distributed import DelayDelete from toontown.suit import SellbotBossGlobals from toontown.toon import NPCToons from toontown.toonbase import TTLocalizer from toontown.toonbase import ToontownBattleGlobals from toontown.toonbase import ToontownGlobals from otp.nametag.NametagConstants import * from otp.nametag import NametagGlobals from DistributedSuitBase import DistributedSuitBase OneBossCog = None class DistributedSellbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSellbotBoss') cageHeights = [100, 81, 63, 44, 25, 18] def __init__(self, cr): DistributedBossCog.DistributedBossCog.__init__(self, cr) FSM.FSM.__init__(self, 'DistributedSellbotBoss') self.cagedToonNpcId = None self.doobers = [] self.dooberDna = None self.bossDamage = 0 self.attackCode = None self.attackAvId = 0 self.recoverRate = 0 self.recoverStartTime = 0 self.bossDamageMovie = None self.cagedToon = None self.cageShadow = None self.cageIndex = 0 self.everThrownPie = 0 self.battleThreeMusicTime = 0 self.insidesANodePath = None self.insidesBNodePath = None self.rampA = None self.rampB = None self.rampC = None self.strafeInterval = None self.onscreenMessage = None self.toonMopathInterval = [] self.nerfed = base.cr.newsManager.isHolidayRunning(ToontownGlobals.SELLBOT_NERF_HOLIDAY) self.localToonPromoted = True self.resetMaxDamage() return def announceGenerate(self): global OneBossCog DistributedBossCog.DistributedBossCog.announceGenerate(self) self.setName(TTLocalizer.SellbotBossName) nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self.name, 'dept': SuitDNA.getDeptFullname(self.style.dept)} self.setDisplayName(nameInfo) self.cageDoorSfx = loader.loadSfx('phase_5/audio/sfx/CHQ_SOS_cage_door.ogg') self.cageLandSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_SOS_cage_land.ogg') self.cageLowerSfx = loader.loadSfx('phase_5/audio/sfx/CHQ_SOS_cage_lower.ogg') self.piesRestockSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_SOS_pies_restock.ogg') self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.ogg') self.strafeSfx = [] for i in xrange(10): self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.ogg')) render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog)) insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5)) insidesANode = CollisionNode('BossZap') insidesANode.addSolid(insidesA) insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesANodePath = self.axle.attachNewNode(insidesANode) self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesANodePath.stash() insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5)) insidesBNode = CollisionNode('BossZap') insidesBNode.addSolid(insidesB) insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask) self.insidesBNodePath = self.axle.attachNewNode(insidesBNode) self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides)) self.insidesBNodePath.stash() target = CollisionTube(0, -1, 4, 0, -1, 9, 3.5) targetNode = CollisionNode('BossZap') targetNode.addSolid(target) targetNode.setCollideMask(ToontownGlobals.PieBitmask) self.targetNodePath = self.pelvis.attachNewNode(targetNode) self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog)) shield = CollisionTube(0, 1, 4, 0, 1, 7, 3.5) shieldNode = CollisionNode('BossZap') shieldNode.addSolid(shield) shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask) shieldNodePath = self.pelvis.attachNewNode(shieldNode) disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide') disk.find('**/+CollisionNode').setName('BossZap') disk.reparentTo(self.pelvis) disk.setZ(0.8) self.loadEnvironment() self.__makeCagedToon() self.__loadMopaths() if OneBossCog is not None: self.notify.warning('Multiple BossCogs visible.') OneBossCog = self return def disable(self): global OneBossCog DistributedBossCog.DistributedBossCog.disable(self) self.request('Off') self.unloadEnvironment() self.__unloadMopaths() self.__cleanupCagedToon() self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) self.__cleanupStrafe() render.clearTag('pieCode') self.targetNodePath.detachNode() self.betweenBattleMusic.stop() self.promotionMusic.stop() self.stingMusic.stop() self.battleTwoMusic.stop() self.battleThreeMusic.stop() self.epilogueMusic.stop() self.destroyDoobers() del self.dooberDna while len(self.toonMopathInterval): toonMopath = self.toonMopathInterval[0] toonMopath.finish() toonMopath.destroy() self.toonMopathInterval.remove(toonMopath) if OneBossCog == self: OneBossCog = None return def handlePracticeStart(self, toon): toon.setPosHpr(-0.09, 77.25, 17.95, 180, 0, 0) def resetMaxDamage(self): if self.nerfed: self.bossMaxDamage = ToontownGlobals.SellbotBossMaxDamageNerfed else: self.bossMaxDamage = ToontownGlobals.SellbotBossMaxDamage def d_hitBoss(self, bossDamage): self.sendUpdate('hitBoss', [bossDamage]) def d_hitBossInsides(self): self.sendUpdate('hitBossInsides', []) def d_hitToon(self, toonId): self.sendUpdate('hitToon', [toonId]) def setCagedToonNpcId(self, npcId): self.cagedToonNpcId = npcId def gotToon(self, toon): stateName = self.state if stateName == 'Elevator': self.placeToonInElevator(toon) def setDoobers(self, dooberDna): self.dooberDna = dooberDna def getDoobers(self): return self.dooberDna def setBossDamage(self, bossDamage, recoverRate, timestamp): recoverStartTime = globalClockDelta.networkToLocalTime(timestamp) self.bossDamage = bossDamage self.recoverRate = recoverRate self.recoverStartTime = recoverStartTime taskName = 'RecoverBossDamage' taskMgr.remove(taskName) if self.bossDamageMovie: if self.bossDamage >= self.bossMaxDamage: self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration()) else: self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie) if self.recoverRate: taskMgr.add(self.__recoverBossDamage, taskName) def getBossDamage(self): now = globalClock.getFrameTime() elapsed = now - self.recoverStartTime return max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0) def __recoverBossDamage(self, task): self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie) return Task.cont def __makeCagedToon(self): if self.cagedToon: return self.cagedToon = NPCToons.createLocalNPC(self.cagedToonNpcId) self.cagedToon.addActive() self.cagedToon.reparentTo(self.cage) self.cagedToon.setPosHpr(0, -2, 0, 180, 0, 0) self.cagedToon.loop('neutral') self.cagedToon.setActiveShadow(0) touch = CollisionPolygon(Point3(-3.0382, 3.0382, -1), Point3(3.0382, 3.0382, -1), Point3(3.0382, -3.0382, -1), Point3(-3.0382, -3.0382, -1)) touch.setTangible(0) touchNode = CollisionNode('Cage') touchNode.setCollideMask(ToontownGlobals.WallBitmask) touchNode.addSolid(touch) self.cage.attachNewNode(touchNode) def __cleanupCagedToon(self): if self.cagedToon: self.cagedToon.removeActive() self.cagedToon.delete() self.cagedToon = None return def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes): toon = base.cr.doId2do.get(toonId) if toon: destPos = toon.getPos() self.placeToonInElevator(toon) toon.wrtReparentTo(render) walkMopath = MopathInterval(mopath, toon) ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(walkMopath, toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral')) self.toonMopathInterval.append(walkMopath) track.append(ival) delayDeletes.append(DelayDelete.DelayDelete(toon, 'SellbotBoss.__walkToonToPromotion')) def __walkDoober(self, suit, delay, turnPos, track): turnPos = Point3(*turnPos) turnPosDown = Point3(*ToontownGlobals.SellbotBossDooberTurnPosDown) turnPosDown2 = Point3(*ToontownGlobals.SellbotBossDooberTurnPosDown2) turnPosDown3 = Point3(*ToontownGlobals.SellbotBossDooberTurnPosDown3) flyPos = Point3(*ToontownGlobals.SellbotBossDooberFlyPos) seq = Sequence(Func(suit.headsUp, turnPos), Wait(delay), Func(suit.loop, 'walk', 0), self.__walkSuitToPoint(suit, suit.getPos(), turnPos), self.__walkSuitToPoint(suit, turnPos, turnPosDown), self.__walkSuitToPoint(suit, turnPosDown, turnPosDown2), self.__walkSuitToPoint(suit, turnPosDown2, turnPosDown3), self.__walkSuitToPoint(suit, turnPosDown3, flyPos), suit.beginSupaFlyMove(flyPos, 0, 'flyAway', skyPosCurrent=False), Func(suit.delete)) track.append(seq) def __walkSuitToPoint(self, node, fromPos, toPos): vector = Vec3(toPos - fromPos) distance = vector.length() time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8) return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos)) def destroyDoobers(self): if not self.doobers: return for doober in self.doobers: if doober: doober.delete() self.doobers = [] def makeIntroductionMovie(self, delayDeletes): track = Parallel() camera.reparentTo(render) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) dooberTrack = Parallel() if not self.doobers: for dna in self.dooberDna: dna = SuitDNA.SuitDNA(dna) suit = DistributedSuitBase(self.cr) suit.doId = id(suit) suit.setDNA(dna) suit.setDisplayLevel() suit.reparentTo(render) self.doobers.append(suit) self.__doobersToPromotionPosition(self.doobers[:4], self.battleANode) self.__doobersToPromotionPosition(self.doobers[4:], self.battleBNode) turnPosA = ToontownGlobals.SellbotBossDooberTurnPosA turnPosB = ToontownGlobals.SellbotBossDooberTurnPosB self.__walkDoober(self.doobers[0], 0, turnPosA, dooberTrack) self.__walkDoober(self.doobers[1], 2, turnPosA, dooberTrack) self.__walkDoober(self.doobers[2], 6, turnPosA, dooberTrack) self.__walkDoober(self.doobers[3], 10, turnPosA, dooberTrack) self.__walkDoober(self.doobers[7], 1, turnPosB, dooberTrack) self.__walkDoober(self.doobers[6], 4, turnPosB, dooberTrack) self.__walkDoober(self.doobers[5], 8, turnPosB, dooberTrack) self.__walkDoober(self.doobers[4], 12, turnPosB, dooberTrack) toonTrack = Parallel() self.__toonsToPromotionPosition(self.toonsA, self.battleANode) self.__toonsToPromotionPosition(self.toonsB, self.battleBNode) delay = 0 for toonId in self.toonsA: self.__walkToonToPromotion(toonId, delay, self.toonsEnterA, toonTrack, delayDeletes) delay += 1 for toonId in self.toonsB: self.__walkToonToPromotion(toonId, delay, self.toonsEnterB, toonTrack, delayDeletes) delay += 1 toonTrack.append(Sequence(Wait(delay), self.closeDoors)) self.rampA.request('extended') self.rampB.request('extended') self.rampC.request('retracted') self.clearChat() self.cagedToon.clearChat() promoteDoobers = TTLocalizer.BossCogPromoteDoobers % SuitDNA.getDeptFullnameP(self.style.dept) doobersAway = TTLocalizer.BossCogDoobersAway[self.style.dept] welcomeToons = TTLocalizer.BossCogWelcomeToons promoteToons = TTLocalizer.BossCogPromoteToons % SuitDNA.getDeptFullnameP(self.style.dept) discoverToons = TTLocalizer.BossCogDiscoverToons attackToons = TTLocalizer.BossCogAttackToons interruptBoss = TTLocalizer.CagedToonInterruptBoss rescueQuery = TTLocalizer.CagedToonRescueQuery bossAnimTrack = Sequence(ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1), ActorInterval(self, 'ltTurn2Wave', duration=2), ActorInterval(self, 'wave', duration=4, loop=1), ActorInterval(self, 'ltTurn2Wave', startTime=2, endTime=0), ActorInterval(self, 'Ff_speech', duration=7, loop=1)) track.append(bossAnimTrack) dialogTrack = Track((0, Parallel(camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), blendType='easeInOut'), IndirectInterval(toonTrack, 0, 18))), (5.6, Func(self.setChatAbsolute, promoteDoobers, CFSpeech)), (9, dooberTrack), (10, Sequence(Func(self.clearChat), camera.posHprInterval(5, (0, -61.6, 6.66), (0, 25.2, 0), blendType='easeInOut'))), (12, Func(self.setChatAbsolute, doobersAway, CFSpeech)), (16, Parallel(Func(self.clearChat), camera.posHprInterval(4, (-25, -99, 10), (-14, 10, 0), blendType='easeInOut'), IndirectInterval(toonTrack, 30))), (18, Func(self.setChatAbsolute, welcomeToons, CFSpeech)), (22, Func(self.setChatAbsolute, promoteToons, CFSpeech)), (22.2, Sequence(Func(self.cagedToon.nametag3d.setScale, 2), Func(self.cagedToon.setChatAbsolute, interruptBoss, CFSpeech), ActorInterval(self.cagedToon, 'wave'), Func(self.cagedToon.loop, 'neutral'))), (25, Sequence(Func(self.clearChat), Func(self.cagedToon.clearChat), Func(camera.setPosHpr, -12, -15, 27, -151, -15, 0), ActorInterval(self, 'Ff_lookRt'))), (27, Sequence(Func(self.cagedToon.setChatAbsolute, rescueQuery, CFSpeech), Func(camera.setPosHpr, -12, 48, 94, -26, 20, 0), ActorInterval(self.cagedToon, 'wave'), Func(self.cagedToon.loop, 'neutral'))), (31, Sequence(camera.posHprInterval(1, (-20, -35, 10), (-88, 25, 0), blendType='easeOut'), Func(self.setChatAbsolute, discoverToons, CFSpeech), Func(self.cagedToon.nametag3d.setScale, 1), Func(self.cagedToon.clearChat), ActorInterval(self, 'turn2Fb'))), (35, Sequence(Func(self.clearChat), self.loseCogSuits(self.toonsA, self.battleANode, (0, 18, 5, -180, 0, 0)), self.loseCogSuits(self.toonsB, self.battleBNode, (0, 18, 5, -180, 0, 0)))), (38, Sequence(self.toonNormalEyes(self.involvedToons), camera.posHprInterval(1, (-23.4, -145.6, 44.0), (-10.0, -12.5, 0), blendType='easeInOut'), Func(self.loop, 'Fb_neutral'), Func(self.rampA.request, 'retract'), Func(self.rampB.request, 'retract'), Parallel(self.backupToonsToBattlePosition(self.toonsA, self.battleANode), self.backupToonsToBattlePosition(self.toonsB, self.battleBNode), Sequence(Wait(3), camera.posHprInterval(1, (-25, -35, 20.5), (-90, 0, 0), blendType='easeOut'), Func(self.setChatAbsolute, attackToons, CFSpeech), Wait(3), Func(self.destroyDoobers)))))) track.append(dialogTrack) return Sequence(Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction')) def __makeRollToBattleTwoMovie(self): startPos = Point3(ToontownGlobals.SellbotBossBattleOnePosHpr[0], ToontownGlobals.SellbotBossBattleOnePosHpr[1], ToontownGlobals.SellbotBossBattleOnePosHpr[2]) if self.arenaSide: topRampPos = Point3(*ToontownGlobals.SellbotBossTopRampPosB) topRampTurnPos = Point3(*ToontownGlobals.SellbotBossTopRampTurnPosB) p3Pos = Point3(*ToontownGlobals.SellbotBossP3PosB) else: topRampPos = Point3(*ToontownGlobals.SellbotBossTopRampPosA) topRampTurnPos = Point3(*ToontownGlobals.SellbotBossTopRampTurnPosA) p3Pos = Point3(*ToontownGlobals.SellbotBossP3PosA) battlePos = Point3(ToontownGlobals.SellbotBossBattleTwoPosHpr[0], ToontownGlobals.SellbotBossBattleTwoPosHpr[1], ToontownGlobals.SellbotBossBattleTwoPosHpr[2]) battleHpr = VBase3(ToontownGlobals.SellbotBossBattleTwoPosHpr[3], ToontownGlobals.SellbotBossBattleTwoPosHpr[4], ToontownGlobals.SellbotBossBattleTwoPosHpr[5]) bossTrack = Sequence() bossTrack.append(Func(self.setChatAbsolute, TTLocalizer.VPRampMessage, CFSpeech | CFTimeout)) bossTrack.append(Func(self.getGeomNode().setH, 180)) bossTrack.append(Func(self.loop, 'Fb_neutral')) track, hpr = self.rollBossToPoint(startPos, None, topRampPos, None, 0) bossTrack.append(track) track, hpr = self.rollBossToPoint(topRampPos, hpr, topRampTurnPos, None, 0) bossTrack.append(track) track, hpr = self.rollBossToPoint(topRampTurnPos, hpr, p3Pos, None, 0) bossTrack.append(track) track, hpr = self.rollBossToPoint(p3Pos, hpr, battlePos, None, 0) bossTrack.append(track) return Sequence(bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwo')) def cagedToonMovieFunction(self, instruct, cageIndex): self.notify.debug('cagedToonMovieFunction()') if not (hasattr(self, 'cagedToon') and hasattr(self.cagedToon, 'nametag') and hasattr(self.cagedToon, 'nametag3d')): return if instruct == 1: self.cagedToon.nametag3d.setScale(2) elif instruct == 2: self.cagedToon.setChatAbsolute(TTLocalizer.CagedToonDrop[cageIndex], CFSpeech) elif instruct == 3: self.cagedToon.nametag3d.setScale(1) elif instruct == 4: self.cagedToon.clearChat() def makeEndOfBattleMovie(self, hasLocalToon): name = self.uniqueName('CageDrop') seq = Sequence(name=name) seq.append(Func(self.cage.setPos, self.cagePos[self.cageIndex])) if hasLocalToon: seq += [Func(camera.reparentTo, render), Func(camera.setPosHpr, self.cage, 0, -50, 0, 0, 0, 0), Func(localAvatar.setCameraFov, ToontownGlobals.CogHQCameraFov), Func(self.hide)] seq += [Wait(0.5), Parallel(self.cage.posInterval(1, self.cagePos[self.cageIndex + 1], blendType='easeInOut'), SoundInterval(self.cageLowerSfx, duration=1)), Func(self.cagedToonMovieFunction, 1, self.cageIndex), Func(self.cagedToonMovieFunction, 2, self.cageIndex), Wait(3), Func(self.cagedToonMovieFunction, 3, self.cageIndex), Func(self.cagedToonMovieFunction, 4, self.cageIndex)] if hasLocalToon: seq += [Func(self.show), Func(camera.reparentTo, localAvatar), Func(camera.setPos, localAvatar.cameraPositions[0][0]), Func(camera.setHpr, 0, 0, 0)] self.cageIndex += 1 return seq def __makeBossDamageMovie(self): startPos = Point3(ToontownGlobals.SellbotBossBattleTwoPosHpr[0], ToontownGlobals.SellbotBossBattleTwoPosHpr[1], ToontownGlobals.SellbotBossBattleTwoPosHpr[2]) startHpr = Point3(*ToontownGlobals.SellbotBossBattleThreeHpr) bottomPos = Point3(*ToontownGlobals.SellbotBossBottomPos) deathPos = Point3(*ToontownGlobals.SellbotBossDeathPos) self.setPosHpr(startPos, startHpr) bossTrack = Sequence() bossTrack.append(Func(self.loop, 'Fb_neutral')) track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1) bossTrack.append(track) track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1) bossTrack.append(track) duration = bossTrack.getDuration() return bossTrack def __talkAboutPromotion(self, speech): if self.getPractice(): return speech if not self.localToonPromoted: pass elif self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.CagedToonPromotion newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)] if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel: speech += TTLocalizer.CagedToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1) if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels: speech += TTLocalizer.CagedToonHPBoost else: speech += TTLocalizer.CagedToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1) if self.keyReward: speech += TTLocalizer.BossRTKeyReward return speech def __makeCageOpenMovie(self): speech = TTLocalizer.CagedToonThankYou speech = self.__talkAboutPromotion(speech) name = self.uniqueName('CageOpen') seq = Sequence(Func(self.cage.setPos, self.cagePos[4]), Func(self.cageDoor.setHpr, VBase3(0, 0, 0)), Func(self.cagedToon.setPos, Point3(0, -2, 0)), Parallel(self.cage.posInterval(0.5, self.cagePos[5], blendType='easeOut'), SoundInterval(self.cageLowerSfx, duration=0.5)), Parallel(self.cageDoor.hprInterval(0.5, VBase3(0, 90, 0), blendType='easeOut'), Sequence(SoundInterval(self.cageDoorSfx), duration=0)), Wait(0.2), Func(self.cagedToon.loop, 'walk'), self.cagedToon.posInterval(0.8, Point3(0, -6, 0)), Func(self.cagedToon.setChatAbsolute, TTLocalizer.CagedToonYippee, CFSpeech), ActorInterval(self.cagedToon, 'jump'), Func(self.cagedToon.loop, 'neutral'), Func(self.cagedToon.headsUp, localAvatar), Func(self.cagedToon.setLocalPageChat, speech, 0), Func(camera.reparentTo, localAvatar), Func(camera.setPos, 0, -9, 9), Func(camera.lookAt, self.cagedToon, Point3(0, 0, 2)), name=name) return seq def __showOnscreenMessage(self, text): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1) return def __clearOnscreenMessage(self): if self.onscreenMessage: self.onscreenMessage.destroy() self.onscreenMessage = None return def __showWaitingMessage(self, task): self.__showOnscreenMessage(TTLocalizer.WaitingForOtherToons) def __placeCageShadow(self): if self.cageShadow == None: self.cageShadow = loader.loadModel('phase_3/models/props/drop_shadow') self.cageShadow.setPos(0, 77.9, 18) self.cageShadow.setColorScale(1, 1, 1, 0.6) self.cageShadow.reparentTo(render) return def __removeCageShadow(self): if self.cageShadow != None: self.cageShadow.detachNode() return def setCageIndex(self, cageIndex): self.cageIndex = cageIndex self.cage.setPos(self.cagePos[self.cageIndex]) if self.cageIndex >= 4: self.__placeCageShadow() else: self.__removeCageShadow() def loadEnvironment(self): DistributedBossCog.DistributedBossCog.loadEnvironment(self) self.geom = loader.loadModel('phase_9/models/cogHQ/BossRoomHQ') self.rampA = self.__findRamp('rampA', '**/west_ramp2') self.rampB = self.__findRamp('rampB', '**/west_ramp') self.rampC = self.__findRamp('rampC', '**/west_ramp1') self.cage = self.geom.find('**/cage') elevatorEntrance = self.geom.find('**/elevatorEntrance') elevatorEntrance.getChildren().detach() elevatorEntrance.setScale(1) elevatorModel = loader.loadModel('phase_9/models/cogHQ/cogHQ_elevator') elevatorModel.reparentTo(elevatorEntrance) self.setupElevator(elevatorModel) pos = self.cage.getPos() self.cagePos = [] for height in self.cageHeights: self.cagePos.append(Point3(pos[0], pos[1], height)) self.cageDoor = self.geom.find('**/cage_door') self.cage.setScale(1) self.rope = Rope.Rope(name='supportChain') self.rope.reparentTo(self.cage) self.rope.setup(2, ((self.cage, (0.15, 0.13, 16)), (self.geom, (0.23, 78, 120)))) self.rope.ropeNode.setRenderMode(RopeNode.RMBillboard) self.rope.ropeNode.setUvMode(RopeNode.UVDistance) self.rope.ropeNode.setUvDirection(0) self.rope.ropeNode.setUvScale(0.8) self.rope.setTexture(self.cage.findTexture('hq_chain')) self.rope.setTransparency(1) self.promotionMusic = loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.betweenBattleMusic = loader.loadMusic('phase_9/audio/bgm/encntr_toon_winning.ogg') self.battleTwoMusic = loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.battleThreeMusic = loader.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg') self.geom.reparentTo(render) def unloadEnvironment(self): DistributedBossCog.DistributedBossCog.unloadEnvironment(self) self.geom.removeNode() del self.geom del self.cage self.rampA.requestFinalState() self.rampB.requestFinalState() self.rampC.requestFinalState() del self.rampA del self.rampB del self.rampC def __loadMopaths(self): self.toonsEnterA = Mopath.Mopath() self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA') self.toonsEnterA.fFaceForward = 1 self.toonsEnterA.timeScale = 35 self.toonsEnterB = Mopath.Mopath() self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB') self.toonsEnterB.fFaceForward = 1 self.toonsEnterB.timeScale = 35 def __unloadMopaths(self): self.toonsEnterA.reset() self.toonsEnterB.reset() def __findRamp(self, name, path): ramp = self.geom.find(path) children = ramp.getChildren() animate = ramp.attachNewNode(name) children.reparentTo(animate) fsm = ClassicFSM.ClassicFSM(name, [State.State('extend', Functor(self.enterRampExtend, animate), Functor(self.exitRampExtend, animate), ['extended', 'retract', 'retracted']), State.State('extended', Functor(self.enterRampExtended, animate), Functor(self.exitRampExtended, animate), ['retract', 'retracted']), State.State('retract', Functor(self.enterRampRetract, animate), Functor(self.exitRampRetract, animate), ['extend', 'extended', 'retracted']), State.State('retracted', Functor(self.enterRampRetracted, animate), Functor(self.exitRampRetracted, animate), ['extend', 'extended']), State.State('off', Functor(self.enterRampOff, animate), Functor(self.exitRampOff, animate))], 'off', 'off', onUndefTransition=ClassicFSM.ClassicFSM.DISALLOW) fsm.enterInitialState() return fsm def enterRampExtend(self, animate): intervalName = self.uniqueName('extend-%s' % animate.getName()) adjustTime = 2.0 * animate.getX() / 18.0 ival = Parallel(SoundInterval(self.rampSlideSfx, node=animate), animate.posInterval(adjustTime, Point3(0, 0, 0), blendType='easeInOut', name=intervalName)) ival.start() self.storeInterval(ival, intervalName) def exitRampExtend(self, animate): intervalName = self.uniqueName('extend-%s' % animate.getName()) self.clearInterval(intervalName) def enterRampExtended(self, animate): animate.setPos(0, 0, 0) def exitRampExtended(self, animate): pass def enterRampRetract(self, animate): intervalName = self.uniqueName('retract-%s' % animate.getName()) adjustTime = 2.0 * (18 - animate.getX()) / 18.0 ival = Parallel(SoundInterval(self.rampSlideSfx, node=animate), animate.posInterval(adjustTime, Point3(18, 0, 0), blendType='easeInOut', name=intervalName)) ival.start() self.storeInterval(ival, intervalName) def exitRampRetract(self, animate): intervalName = self.uniqueName('retract-%s' % animate.getName()) self.clearInterval(intervalName) def enterRampRetracted(self, animate): animate.setPos(18, 0, 0) def exitRampRetracted(self, animate): pass def enterRampOff(self, animate): pass def exitRampOff(self, animate): pass def enterOff(self): DistributedBossCog.DistributedBossCog.enterOff(self) if self.cagedToon: self.cagedToon.clearChat() if self.rampA: self.rampA.request('off') if self.rampB: self.rampB.request('off') if self.rampC: self.rampC.request('off') def enterWaitForToons(self): DistributedBossCog.DistributedBossCog.enterWaitForToons(self) self.geom.hide() self.cagedToon.removeActive() def exitWaitForToons(self): DistributedBossCog.DistributedBossCog.exitWaitForToons(self) self.geom.show() self.cagedToon.addActive() def enterElevator(self): DistributedBossCog.DistributedBossCog.enterElevator(self) self.rampA.request('extended') self.rampB.request('extended') self.rampC.request('retracted') self.setCageIndex(0) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr) self.happy = 1 self.raised = 1 self.forward = 1 self.doAnimate() self.cagedToon.removeActive() base.camLens.setMinFov(ToontownGlobals.VPElevatorFov / (4.0 / 3.0)) def exitElevator(self): DistributedBossCog.DistributedBossCog.exitElevator(self) self.cagedToon.addActive() def enterIntroduction(self): self.reparentTo(render) self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr) self.stopAnimate() DistributedBossCog.DistributedBossCog.enterIntroduction(self) self.rampA.request('extended') self.rampB.request('extended') self.rampC.request('retracted') self.setCageIndex(0) base.playMusic(self.promotionMusic, looping=1, volume=0.9) def exitIntroduction(self): DistributedBossCog.DistributedBossCog.exitIntroduction(self) self.promotionMusic.stop() def enterBattleOne(self): DistributedBossCog.DistributedBossCog.enterBattleOne(self) self.reparentTo(render) self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr) self.clearChat() self.cagedToon.clearChat() self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('retract') if self.battleA == None or self.battleB == None: cageIndex = 1 else: cageIndex = 0 self.setCageIndex(cageIndex) return def exitBattleOne(self): DistributedBossCog.DistributedBossCog.exitBattleOne(self) def enterRollToBattleTwo(self): self.disableToonCollision() self.releaseToons() if self.arenaSide: self.rampA.request('retract') self.rampB.request('extend') else: self.rampA.request('extend') self.rampB.request('retract') self.rampC.request('retract') self.reparentTo(render) self.setCageIndex(2) self.stickBossToFloor() intervalName = 'RollToBattleTwo' seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName) seq.start() self.storeInterval(seq, intervalName) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) self.__showEasyBarrels() taskMgr.doMethodLater(0.5, self.enableToonCollision, 'enableToonCollision') def __onToPrepareBattleTwo(self): self.disableToonCollision() self.unstickBoss() self.setPosHpr(*ToontownGlobals.SellbotBossBattleTwoPosHpr) self.doneBarrier('RollToBattleTwo') def exitRollToBattleTwo(self): self.unstickBoss() intervalName = 'RollToBattleTwo' self.clearInterval(intervalName) self.betweenBattleMusic.stop() def disableToonCollision(self): base.localAvatar.collisionsOff() def enableToonCollision(self, task): base.localAvatar.collisionsOn() def enterPrepareBattleTwo(self): self.cleanupIntervals() self.__hideEasyBarrels() self.controlToons() self.clearChat() self.cagedToon.clearChat() self.reparentTo(render) if self.arenaSide: self.rampA.request('retract') self.rampB.request('extend') else: self.rampA.request('extend') self.rampB.request('retract') self.rampC.request('retract') self.reparentTo(render) self.setCageIndex(2) camera.reparentTo(render) camera.setPosHpr(self.cage, 0, -17, 3.3, 0, 0, 0) (localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov),) self.hide() self.acceptOnce(self.cagedToon.uniqueName('doneChatPage'), self.__onToBattleTwo) self.cagedToon.setLocalPageChat(TTLocalizer.CagedToonPrepareBattleTwo, 1) base.playMusic(self.stingMusic, looping=0, volume=1.0) taskMgr.doMethodLater(0.5, self.enableToonCollision, 'enableToonCollision') def __onToBattleTwo(self, elapsed): self.doneBarrier('PrepareBattleTwo') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleTwo(self): self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore(self.cagedToon.uniqueName('doneChatPage')) self.__clearOnscreenMessage() self.stingMusic.stop() def enterBattleTwo(self): self.cleanupIntervals() self.reparentTo(render) self.setPosHpr(*ToontownGlobals.SellbotBossBattleTwoPosHpr) self.clearChat() self.cagedToon.clearChat() self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('retract') self.releaseToons() self.toonsToBattlePosition(self.toonsA, self.battleANode) self.toonsToBattlePosition(self.toonsB, self.battleBNode) if self.battleA == None or self.battleB == None: cageIndex = 3 else: cageIndex = 2 self.setCageIndex(cageIndex) base.playMusic(self.battleTwoMusic, looping=1, volume=0.9) return def exitBattleTwo(self): intervalName = self.uniqueName('cageDrop') self.clearInterval(intervalName) self.cleanupBattles() self.battleTwoMusic.stop() def enterPrepareBattleThree(self): self.cleanupIntervals() self.controlToons() self.clearChat() self.cagedToon.clearChat() self.reparentTo(render) self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('extend') self.setCageIndex(4) camera.reparentTo(render) camera.setPosHpr(self.cage, 0, -17, 3.3, 0, 0, 0) (localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov),) self.hide() self.acceptOnce(self.cagedToon.uniqueName('doneChatPage'), self.__onToBattleThree) self.cagedToon.setLocalPageChat(TTLocalizer.CagedToonPrepareBattleThree, 1) base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9) def __onToBattleThree(self, elapsed = 0): self.doneBarrier('PrepareBattleThree') taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage')) def exitPrepareBattleThree(self): self.show() taskMgr.remove(self.uniqueName('WaitingMessage')) self.ignore(self.cagedToon.uniqueName('doneChatPage')) intervalName = 'PrepareBattleThree' self.clearInterval(intervalName) self.__clearOnscreenMessage() self.betweenBattleMusic.stop() def enterBattleThree(self): DistributedBossCog.DistributedBossCog.enterBattleThree(self) self.clearChat() self.cagedToon.clearChat() self.reparentTo(render) self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('extend') self.setCageIndex(4) self.happy = 0 self.raised = 1 self.forward = 1 self.doAnimate() self.accept('enterCage', self.__touchedCage) self.accept('pieSplat', self.__pieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) self.accept('begin-pie', self.__foundPieButton) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice')) self.stickBossToFloor() self.bossDamageMovie = self.__makeBossDamageMovie() bossDoneEventName = self.uniqueName('DestroyedBoss') self.bossDamageMovie.setDoneEvent(bossDoneEventName) self.acceptOnce(bossDoneEventName, self.__doneBattleThree) self.resetMaxDamage() self.bossDamageToMovie = self.bossDamageMovie.getDuration() / self.bossMaxDamage self.bossDamageMovie.setT(self.bossDamage * self.bossDamageToMovie) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9) def __doneBattleThree(self): self.setState('NearVictory') self.unstickBoss() def exitBattleThree(self): DistributedBossCog.DistributedBossCog.exitBattleThree(self) bossDoneEventName = self.uniqueName('DestroyedBoss') self.ignore(bossDoneEventName) taskMgr.remove(self.uniqueName('StandUp')) self.ignore('enterCage') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.ignore('begin-pie') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.__removeCageShadow() self.bossDamageMovie.finish() self.bossDamageMovie = None self.unstickBoss() taskName = 'RecoverBossDamage' taskMgr.remove(taskName) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() return def enterNearVictory(self): self.cleanupIntervals() self.reparentTo(render) self.setPos(*ToontownGlobals.SellbotBossDeathPos) self.setHpr(*ToontownGlobals.SellbotBossBattleThreeHpr) self.clearChat() self.cagedToon.clearChat() self.setCageIndex(4) self.releaseToons(finalBattle=1) self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('extend') self.accept('enterCage', self.__touchedCage) self.accept('pieSplat', self.__finalPieSplat) self.accept('localPieSplat', self.__localPieSplat) self.accept('outOfPies', self.__outOfPies) localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.happy = 0 self.raised = 0 self.forward = 1 self.doAnimate() self.setDizzy(1) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def exitNearVictory(self): self.ignore('enterCage') self.ignore('pieSplat') self.ignore('localPieSplat') self.ignore('outOfPies') self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.__removeCageShadow() self.setDizzy(0) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterVictory(self): self.cleanupIntervals() localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov) self.reparentTo(render) self.setPos(*ToontownGlobals.SellbotBossDeathPos) self.setHpr(*ToontownGlobals.SellbotBossBattleThreeHpr) self.clearChat() self.cagedToon.clearChat() self.setCageIndex(4) self.releaseToons(finalBattle=1) self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('extend') self.happy = 0 self.raised = 0 self.forward = 1 self.setChatAbsolute(TTLocalizer.VPDeathTaunt, CFSpeech | CFTimeout) self.doAnimate('Fb_fall', now=1) self.acceptOnce(self.animDoneEvent, self.__continueVictory) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __continueVictory(self): self.stopAnimate() self.stash() self.doneBarrier('Victory') def exitVictory(self): self.stopAnimate() self.unstash() self.__removeCageShadow() localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov) self.battleThreeMusicTime = self.battleThreeMusic.getTime() self.battleThreeMusic.stop() def enterReward(self): self.cleanupIntervals() self.clearChat() self.cagedToon.clearChat() self.stash() self.stopAnimate() self.setCageIndex(4) self.releaseToons(finalBattle=1) self.toMovieMode() self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('extend') panelName = self.uniqueName('reward') self.rewardPanel = RewardPanel.RewardPanel(panelName) victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True) ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward)) intervalName = 'RewardMovie' delayDeletes = [] for toonId in self.involvedToons: toon = self.cr.doId2do.get(toonId) if toon: delayDeletes.append(DelayDelete.DelayDelete(toon, 'SellbotBoss.enterReward')) ival.delayDeletes = delayDeletes ival.start() self.storeInterval(ival, intervalName) base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime) def __doneReward(self): self.doneBarrier('Reward') self.toWalkMode() def exitReward(self): intervalName = 'RewardMovie' self.clearInterval(intervalName) self.unstash() self.rewardPanel.destroy() del self.rewardPanel self.__removeCageShadow() self.battleThreeMusicTime = 0 self.battleThreeMusic.stop() def enterEpilogue(self): self.cleanupIntervals() self.clearChat() self.cagedToon.clearChat() self.stash() self.stopAnimate() self.setCageIndex(4) self.controlToons() self.rampA.request('retract') self.rampB.request('retract') self.rampC.request('extend') self.__arrangeToonsAroundCage() camera.reparentTo(render) camera.setPosHpr(-24, 52, 27.5, -53, -13, 0) intervalName = 'EpilogueMovie' seq = Sequence(self.__makeCageOpenMovie(), name=intervalName) seq.start() self.storeInterval(seq, intervalName) self.accept(self.cagedToon.uniqueName('nextChatPage'), self.__epilogueChatNext) self.accept(self.cagedToon.uniqueName('doneChatPage'), self.__epilogueChatDone) base.playMusic(self.epilogueMusic, looping=1, volume=0.9) def __epilogueChatNext(self, pageNumber, elapsed): if pageNumber == 2: if self.cagedToon.style.torso[1] == 'd': track = ActorInterval(self.cagedToon, 'curtsy') else: track = ActorInterval(self.cagedToon, 'bow') track = Sequence(track, Func(self.cagedToon.loop, 'neutral')) intervalName = 'EpilogueMovieToonAnim' self.storeInterval(track, intervalName) track.start() def __epilogueChatDone(self, elapsed): self.cagedToon.setChatAbsolute(TTLocalizer.CagedToonGoodbye, CFSpeech) self.ignore(self.cagedToon.uniqueName('nextChatPage')) self.ignore(self.cagedToon.uniqueName('doneChatPage')) intervalName = 'EpilogueMovieToonAnim' self.clearInterval(intervalName) track = Parallel(Sequence(ActorInterval(self.cagedToon, 'wave'), Func(self.cagedToon.loop, 'neutral')), Sequence(Wait(0.5), Func(self.localToonToSafeZone))) self.storeInterval(track, intervalName) track.start() def exitEpilogue(self): self.clearInterval('EpilogueMovieToonAnim') self.unstash() self.__removeCageShadow() self.epilogueMusic.stop() def __arrangeToonsAroundCage(self): radius = 15 numToons = len(self.involvedToons) center = (numToons - 1) / 2.0 for i in xrange(numToons): toon = base.cr.doId2do.get(self.involvedToons[i]) if toon: angle = 270 - 15 * (i - center) radians = angle * math.pi / 180.0 x = math.cos(radians) * radius y = math.sin(radians) * radius toon.setPos(self.cage, x, y, 0) toon.setZ(18.0) toon.headsUp(self.cage) def enterFrolic(self): DistributedBossCog.DistributedBossCog.enterFrolic(self) self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr) def doorACallback(self, isOpen): if self.insidesANodePath: if isOpen: self.insidesANodePath.unstash() else: self.insidesANodePath.stash() def doorBCallback(self, isOpen): if self.insidesBNodePath: if isOpen: self.insidesBNodePath.unstash() else: self.insidesBNodePath.stash() def __toonsToPromotionPosition(self, toonIds, battleNode): points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1] for i in xrange(len(toonIds)): toon = base.cr.doId2do.get(toonIds[i]) if toon: toon.reparentTo(render) pos, h = points[i] toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0) def __doobersToPromotionPosition(self, doobers, battleNode): points = BattleBase.BattleBase.toonPoints[len(doobers) - 1] for i, suit in enumerate(doobers): suit.loop('neutral') pos, h = points[i] suit.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0) def __touchedCage(self, entry): self.sendUpdate('touchCage', []) self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) base.playSfx(self.piesRestockSfx) if not self.everThrownPie: taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice')) def __outOfPies(self): self.__showOnscreenMessage(TTLocalizer.BossBattleNeedMorePies) taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice')) def __howToGetPies(self, task): self.__showOnscreenMessage(TTLocalizer.BossBattleHowToGetPies) def __howToThrowPies(self, task): self.__showOnscreenMessage(TTLocalizer.BossBattleHowToThrowPies) def __foundPieButton(self): self.everThrownPie = 1 self.__clearOnscreenMessage() taskMgr.remove(self.uniqueName('PieAdvice')) def __pieSplat(self, toon, pieCode): if config.GetBool('easy-vp', 0): if not self.dizzy: pieCode = ToontownGlobals.PieCodeBossInsides if pieCode == ToontownGlobals.PieCodeBossInsides: if toon == localAvatar: self.d_hitBossInsides() self.flashRed() elif pieCode == ToontownGlobals.PieCodeBossCog: if toon == localAvatar: self.d_hitBoss(1) if self.dizzy: self.flashRed() self.doAnimate('hit', now=1) def __localPieSplat(self, pieCode, entry): if pieCode != ToontownGlobals.PieCodeToon: return avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId') if avatarDoId == '': self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath())) return doId = int(avatarDoId) if doId != localAvatar.doId: self.d_hitToon(doId) def __finalPieSplat(self, toon, pieCode): if pieCode != ToontownGlobals.PieCodeBossCog: return self.sendUpdate('finalPieSplat', []) self.ignore('pieSplat') def cagedToonBattleThree(self, index, avId): str = TTLocalizer.CagedToonBattleThree.get(index) if str: toonName = '' if avId: toon = self.cr.doId2do.get(avId) if not toon: self.cagedToon.clearChat() return toonName = toon.getName() text = str % {'toon': toonName} self.cagedToon.setChatAbsolute(text, CFSpeech | CFTimeout) else: self.cagedToon.clearChat() def cleanupAttacks(self): self.__cleanupStrafe() def __cleanupStrafe(self): if self.strafeInterval: self.strafeInterval.finish() self.strafeInterval = None return def doStrafe(self, side, direction): gearRoot = self.rotateNode.attachNewNode('gearRoot') if side == 0: gearRoot.setPos(0, -7, 3) gearRoot.setHpr(180, 0, 0) door = self.doorA else: gearRoot.setPos(0, 7, 3) door = self.doorB gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack)) gearModel = self.getGearFrisbee() gearModel.setScale(0.1) t = self.getBossDamage() / 100.0 gearTrack = Parallel() numGears = int(4 + 6 * t + 0.5) time = 5.0 - 4.0 * t spread = 60 * math.pi / 180.0 if direction == 1: spread = -spread dist = 50 rate = time / numGears for i in xrange(numGears): node = gearRoot.attachNewNode(str(i)) node.hide() node.setPos(0, 0, 0) gear = gearModel.instanceTo(node) angle = (float(i) / (numGears - 1) - 0.5) * spread x = dist * math.sin(angle) y = dist * math.cos(angle) h = random.uniform(-720, 720) gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode))) seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close')) self.__cleanupStrafe() self.strafeInterval = seq seq.start() def __showEasyBarrels(self): barrelNodes = hidden.findAllMatches('**/Distributed*Barrel-*') if not barrelNodes or barrelNodes.isEmpty(): return if render.find('barrelsRootNode'): self.notify.warning('__showEasyBarrels(): barrelsRootNode already exists') return self.barrelsRootNode = render.attachNewNode('barrelsRootNode') self.barrelsRootNode.setPos(*SellbotBossGlobals.BarrelsStartPos) if self.arenaSide == 0: self.barrelsRootNode.setHpr(180, 0, 0) else: self.barrelsRootNode.setHpr(0, 0, 0) for i, barrelNode in enumerate(barrelNodes): barrel = base.cr.doId2do.get(int(barrelNode.getNetTag('doId'))) SellbotBossGlobals.setBarrelAttr(barrel, barrel.entId) if hasattr(barrel, 'applyLabel'): barrel.applyLabel() barrel.setPosHpr(barrel.pos, barrel.hpr) barrel.reparentTo(self.barrelsRootNode) intervalName = 'MakeBarrelsAppear' seq = Sequence(LerpPosInterval(self.barrelsRootNode, 0.5, Vec3(*SellbotBossGlobals.BarrelsFinalPos), blendType='easeInOut'), name=intervalName) seq.start() self.storeInterval(seq, intervalName) def __hideEasyBarrels(self): if hasattr(self, 'barrelsRootNode'): self.barrelsRootNode.removeNode() intervalName = 'MakeBarrelsAppear' self.clearInterval(intervalName) def toonPromoted(self, promoted): self.localToonPromoted = promoted
apache-2.0
jhseu/tensorflow
tensorflow/python/autograph/core/naming.py
11
4124
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Symbol naming utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import enum from tensorflow.python.autograph.pyct import qual_names from tensorflow.python.autograph.utils import misc class _NamingStyle(enum.Enum): SNAKE = 1 CAMEL = 2 class Namer(object): """Symbol name generartor.""" def __init__(self, global_namespace): self.global_namespace = global_namespace self.generated_names = set() def _as_symbol_name(self, fqn, style=_NamingStyle.SNAKE): """Returns a symbol name that matches a fully-qualified name. The returned name is safe to use for Python symbols. Any special characters present in fqn are replaced according to the style argument. Examples: self._as_symbol_name('foo.bar', style=_NamingStyle.CAMEL) == 'FooBar' self._as_symbol_name('foo.bar', style=_NamingStyle.SNAKE) == 'foo_bar' See the unit tests for more examples. Args: fqn: Union[Text, Tuple[Text]] a fully-qualified symbol name. The qualifier may include module, class names, attributes, etc. style: _NamingStyle Returns: Text """ assert style in _NamingStyle if isinstance(fqn, tuple): cn = '.'.join(fqn) else: cn = fqn # Until we clean up the whole FQN mechanism, `fqn` may not be # canonical, that is, in can appear as ('foo.bar', 'baz') # This replaces any characters that might remain because of that. pieces = cn.split('.') if style == _NamingStyle.CAMEL: pieces = tuple(misc.capitalize_initial(p) for p in pieces) return ''.join(pieces) elif style == _NamingStyle.SNAKE: return '_'.join(pieces) def class_name(self, original_fqn): """Returns the name of a converted class.""" canonical_name = self._as_symbol_name( original_fqn, style=_NamingStyle.CAMEL) new_name_root = 'Tf%s' % canonical_name new_name = new_name_root n = 0 while new_name in self.global_namespace: n += 1 new_name = '%s_%d' % (new_name_root, n) self.generated_names.add(new_name) return new_name def function_name(self, original_fqn): """Returns the name of a converted function.""" canonical_name = self._as_symbol_name( original_fqn, style=_NamingStyle.SNAKE) new_name_root = 'tf__%s' % canonical_name new_name = new_name_root n = 0 while new_name in self.global_namespace: n += 1 new_name = '%s_%d' % (new_name_root, n) self.generated_names.add(new_name) return new_name def new_symbol(self, name_root, reserved_locals): """See control_flow.SymbolNamer.new_symbol.""" # reserved_locals may contain QNs. all_reserved_locals = set() for s in reserved_locals: if isinstance(s, qual_names.QN): all_reserved_locals.update(s.qn) elif isinstance(s, str): all_reserved_locals.add(s) else: raise ValueError('Unexpected symbol type "%s"' % type(s)) pieces = name_root.split('_') if pieces[-1].isdigit(): name_root = '_'.join(pieces[:-1]) n = int(pieces[-1]) else: n = 0 new_name = name_root while (new_name in self.global_namespace or new_name in all_reserved_locals or new_name in self.generated_names): n += 1 new_name = '%s_%d' % (name_root, n) self.generated_names.add(new_name) return new_name
apache-2.0
munfor/laravel-angular-cms
node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py
1824
3474
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """gypd output module This module produces gyp input as its output. Output files are given the .gypd extension to avoid overwriting the .gyp files that they are generated from. Internal references to .gyp files (such as those found in "dependencies" sections) are not adjusted to point to .gypd files instead; unlike other paths, which are relative to the .gyp or .gypd file, such paths are relative to the directory from which gyp was run to create the .gypd file. This generator module is intended to be a sample and a debugging aid, hence the "d" for "debug" in .gypd. It is useful to inspect the results of the various merges, expansions, and conditional evaluations performed by gyp and to see a representation of what would be fed to a generator module. It's not advisable to rename .gypd files produced by this module to .gyp, because they will have all merges, expansions, and evaluations already performed and the relevant constructs not present in the output; paths to dependencies may be wrong; and various sections that do not belong in .gyp files such as such as "included_files" and "*_excluded" will be present. Output will also be stripped of comments. This is not intended to be a general-purpose gyp pretty-printer; for that, you probably just want to run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip comments but won't do all of the other things done to this module's output. The specific formatting of the output generated by this module is subject to change. """ import gyp.common import errno import os import pprint # These variables should just be spit back out as variable references. _generator_identity_variables = [ 'CONFIGURATION_NAME', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'INTERMEDIATE_DIR', 'LIB_DIR', 'PRODUCT_DIR', 'RULE_INPUT_ROOT', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'RULE_INPUT_NAME', 'RULE_INPUT_PATH', 'SHARED_INTERMEDIATE_DIR', 'SHARED_LIB_DIR', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', ] # gypd doesn't define a default value for OS like many other generator # modules. Specify "-D OS=whatever" on the command line to provide a value. generator_default_variables = { } # gypd supports multiple toolsets generator_supports_multiple_toolsets = True # TODO(mark): This always uses <, which isn't right. The input module should # notify the generator to tell it which phase it is operating in, and this # module should use < for the early phase and then switch to > for the late # phase. Bonus points for carrying @ back into the output too. for v in _generator_identity_variables: generator_default_variables[v] = '<(%s)' % v def GenerateOutput(target_list, target_dicts, data, params): output_files = {} for qualified_target in target_list: [input_file, target] = \ gyp.common.ParseQualifiedTarget(qualified_target)[0:2] if input_file[-4:] != '.gyp': continue input_file_stem = input_file[:-4] output_file = input_file_stem + params['options'].suffix + '.gypd' if not output_file in output_files: output_files[output_file] = input_file for output_file, input_file in output_files.iteritems(): output = open(output_file, 'w') pprint.pprint(data[input_file], output) output.close()
mit
caisq/tensorflow
tensorflow/python/keras/preprocessing/text_test.py
40
5147
# -*- coding: utf-8 -*- # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for text data preprocessing utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.platform import test class TestText(test.TestCase): def test_one_hot(self): text = 'The cat sat on the mat.' encoded = keras.preprocessing.text.one_hot(text, 5) self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 0) # Test on unicode. text = u'The cat sat on the mat.' encoded = keras.preprocessing.text.one_hot(text, 5) self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 0) def test_tokenizer(self): texts = [ 'The cat sat on the mat.', 'The dog sat on the log.', 'Dogs and cats living together.' ] tokenizer = keras.preprocessing.text.Tokenizer(num_words=10) tokenizer.fit_on_texts(texts) sequences = [] for seq in tokenizer.texts_to_sequences_generator(texts): sequences.append(seq) self.assertLess(np.max(np.max(sequences)), 10) self.assertEqual(np.min(np.min(sequences)), 1) tokenizer.fit_on_sequences(sequences) for mode in ['binary', 'count', 'tfidf', 'freq']: matrix = tokenizer.texts_to_matrix(texts, mode) self.assertEqual(matrix.shape, (3, 10)) def test_hashing_trick_hash(self): text = 'The cat sat on the mat.' encoded = keras.preprocessing.text.hashing_trick(text, 5) self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 1) def test_hashing_trick_md5(self): text = 'The cat sat on the mat.' encoded = keras.preprocessing.text.hashing_trick( text, 5, hash_function='md5') self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 1) def test_tokenizer_oov_flag(self): x_train = ['This text has only known words'] x_test = ['This text has some unknown words'] # 2 OOVs: some, unknown # Default, without OOV flag tokenizer = keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) self.assertEqual(len(x_test_seq[0]), 4) # discards 2 OOVs # With OOV feature tokenizer = keras.preprocessing.text.Tokenizer(oov_token='<unk>') tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) self.assertEqual(len(x_test_seq[0]), 6) # OOVs marked in place def test_sequential_fit(self): texts = [ 'The cat sat on the mat.', 'The dog sat on the log.', 'Dogs and cats living together.' ] word_sequences = [['The', 'cat', 'is', 'sitting'], ['The', 'dog', 'is', 'standing']] tokenizer = keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(texts) tokenizer.fit_on_texts(word_sequences) self.assertEqual(tokenizer.document_count, 5) tokenizer.texts_to_matrix(texts) tokenizer.texts_to_matrix(word_sequences) def test_text_to_word_sequence(self): text = 'hello! ? world!' seq = keras.preprocessing.text.text_to_word_sequence(text) self.assertEqual(seq, ['hello', 'world']) def test_text_to_word_sequence_multichar_split(self): text = 'hello!stop?world!' seq = keras.preprocessing.text.text_to_word_sequence(text, split='stop') self.assertEqual(seq, ['hello', 'world']) def test_text_to_word_sequence_unicode(self): text = u'ali! veli? kırk dokuz elli' seq = keras.preprocessing.text.text_to_word_sequence(text) self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli']) def test_text_to_word_sequence_unicode_multichar_split(self): text = u'ali!stopveli?stopkırkstopdokuzstopelli' seq = keras.preprocessing.text.text_to_word_sequence(text, split='stop') self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli']) def test_tokenizer_unicode(self): texts = [ u'ali veli kırk dokuz elli', u'ali veli kırk dokuz elli veli kırk dokuz' ] tokenizer = keras.preprocessing.text.Tokenizer(num_words=5) tokenizer.fit_on_texts(texts) self.assertEqual(len(tokenizer.word_counts), 5) if __name__ == '__main__': test.main()
apache-2.0
dirn/ansible
lib/ansible/plugins/strategies/linear.py
3
12455
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleError from ansible.executor.play_iterator import PlayIterator from ansible.playbook.block import Block from ansible.playbook.included_file import IncludedFile from ansible.playbook.task import Task from ansible.plugins import action_loader from ansible.plugins.strategies import StrategyBase from ansible.template import Templar from ansible.utils.debug import debug class StrategyModule(StrategyBase): def _get_next_task_lockstep(self, hosts, iterator): ''' Returns a list of (host, task) tuples, where the task may be a noop task to keep the iterator in lock step across all hosts. ''' noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) host_tasks = {} for host in hosts: host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) num_setups = 0 num_tasks = 0 num_rescue = 0 num_always = 0 lowest_cur_block = len(iterator._blocks) for (k, v) in host_tasks.iteritems(): if v is None: continue (s, t) = v if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE: lowest_cur_block = s.cur_block if s.run_state == PlayIterator.ITERATING_SETUP: num_setups += 1 elif s.run_state == PlayIterator.ITERATING_TASKS: num_tasks += 1 elif s.run_state == PlayIterator.ITERATING_RESCUE: num_rescue += 1 elif s.run_state == PlayIterator.ITERATING_ALWAYS: num_always += 1 def _advance_selected_hosts(hosts, cur_block, cur_state): ''' This helper returns the task for all hosts in the requested state, otherwise they get a noop dummy task. This also advances the state of the host, since the given states are determined while using peek=True. ''' # we return the values in the order they were originally # specified in the given hosts array rvals = [] for host in hosts: (s, t) = host_tasks[host.name] if s.run_state == cur_state and s.cur_block == cur_block: new_t = iterator.get_next_task_for_host(host) #if new_t != t: # raise AnsibleError("iterator error, wtf?") rvals.append((host, t)) else: rvals.append((host, noop_task)) return rvals # if any hosts are in ITERATING_SETUP, return the setup task # while all other hosts get a noop if num_setups: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP) # if any hosts are in ITERATING_TASKS, return the next normal # task for these hosts, while all other hosts get a noop if num_tasks: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS) # if any hosts are in ITERATING_RESCUE, return the next rescue # task for these hosts, while all other hosts get a noop if num_rescue: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE) # if any hosts are in ITERATING_ALWAYS, return the next always # task for these hosts, while all other hosts get a noop if num_always: return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS) # at this point, everything must be ITERATING_COMPLETE, so we # return None for all hosts in the list return [(host, None) for host in hosts] def run(self, iterator, play_context): ''' The linear strategy is simple - get the next task and queue it for all hosts, then wait for the queue to drain before moving on to the next task ''' # iteratate over each task, while there is one left to run result = True work_to_do = True while work_to_do and not self._tqm._terminated: try: debug("getting the remaining hosts for this loop") hosts_left = self._inventory.get_hosts(iterator._play.hosts) debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory callback_sent = False work_to_do = False host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) for (host, task) in host_tasks: if not task: continue run_once = False work_to_do = True # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. try: action = action_loader.get(task.action, class_only=True) if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin pass # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: debug("'%s' skipped because role has already run" % task) continue if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? continue elif meta_action == 'flush_handlers': self.run_handlers(iterator, play_context) else: raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) else: debug("getting variables") task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) debug("done getting variables") if not callback_sent: temp_task = task.copy() temp_task.name = templar.template(temp_task.get_name(), fail_on_undefined=False) self._tqm.send_callback('v2_playbook_on_task_start', temp_task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True self._queue_task(host, task, task_vars, play_context) results = self._process_pending_results(iterator) host_results.extend(results) # if we're bypassing the host loop, break out now if run_once: break debug("done queuing things up, now waiting for results queue to drain") results = self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: debug("out of hosts to run on") self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break try: included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError, e: return False if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: new_blocks = self._load_included_file(included_file, iterator=iterator) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) # FIXME: callback here? print(e) continue for new_block in new_blocks: noop_block = Block(parent_block=task._block) noop_block.block = [noop_task for t in new_block.block] noop_block.always = [noop_task for t in new_block.always] noop_block.rescue = [noop_task for t in new_block.rescue] for host in hosts_left: if host in included_file._hosts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task) final_block = new_block.filter_tagged_tasks(play_context, task_vars) all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) debug("results queue empty") except (IOError, EOFError), e: debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed return False # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
gpl-3.0
gauribhoite/personfinder
env/google_appengine/lib/django-1.2/django/contrib/gis/gdal/envelope.py
321
7044
""" The GDAL/OGR library uses an Envelope structure to hold the bounding box information for a geometry. The envelope (bounding box) contains two pairs of coordinates, one for the lower left coordinate and one for the upper right coordinate: +----------o Upper right; (max_x, max_y) | | | | | | Lower left (min_x, min_y) o----------+ """ from ctypes import Structure, c_double from django.contrib.gis.gdal.error import OGRException # The OGR definition of an Envelope is a C structure containing four doubles. # See the 'ogr_core.h' source file for more information: # http://www.gdal.org/ogr/ogr__core_8h-source.html class OGREnvelope(Structure): "Represents the OGREnvelope C Structure." _fields_ = [("MinX", c_double), ("MaxX", c_double), ("MinY", c_double), ("MaxY", c_double), ] class Envelope(object): """ The Envelope object is a C structure that contains the minimum and maximum X, Y coordinates for a rectangle bounding box. The naming of the variables is compatible with the OGR Envelope structure. """ def __init__(self, *args): """ The initialization function may take an OGREnvelope structure, 4-element tuple or list, or 4 individual arguments. """ if len(args) == 1: if isinstance(args[0], OGREnvelope): # OGREnvelope (a ctypes Structure) was passed in. self._envelope = args[0] elif isinstance(args[0], (tuple, list)): # A tuple was passed in. if len(args[0]) != 4: raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0])) else: self._from_sequence(args[0]) else: raise TypeError('Incorrect type of argument: %s' % str(type(args[0]))) elif len(args) == 4: # Individiual parameters passed in. # Thanks to ww for the help self._from_sequence(map(float, args)) else: raise OGRException('Incorrect number (%d) of arguments.' % len(args)) # Checking the x,y coordinates if self.min_x > self.max_x: raise OGRException('Envelope minimum X > maximum X.') if self.min_y > self.max_y: raise OGRException('Envelope minimum Y > maximum Y.') def __eq__(self, other): """ Returns True if the envelopes are equivalent; can compare against other Envelopes and 4-tuples. """ if isinstance(other, Envelope): return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \ (self.max_x == other.max_x) and (self.max_y == other.max_y) elif isinstance(other, tuple) and len(other) == 4: return (self.min_x == other[0]) and (self.min_y == other[1]) and \ (self.max_x == other[2]) and (self.max_y == other[3]) else: raise OGRException('Equivalence testing only works with other Envelopes.') def __str__(self): "Returns a string representation of the tuple." return str(self.tuple) def _from_sequence(self, seq): "Initializes the C OGR Envelope structure from the given sequence." self._envelope = OGREnvelope() self._envelope.MinX = seq[0] self._envelope.MinY = seq[1] self._envelope.MaxX = seq[2] self._envelope.MaxY = seq[3] def expand_to_include(self, *args): """ Modifies the envelope to expand to include the boundaries of the passed-in 2-tuple (a point), 4-tuple (an extent) or envelope. """ # We provide a number of different signatures for this method, # and the logic here is all about converting them into a # 4-tuple single parameter which does the actual work of # expanding the envelope. if len(args) == 1: if isinstance(args[0], Envelope): return self.expand_to_include(args[0].tuple) elif hasattr(args[0], 'x') and hasattr(args[0], 'y'): return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y) elif isinstance(args[0], (tuple, list)): # A tuple was passed in. if len(args[0]) == 2: return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1])) elif len(args[0]) == 4: (minx, miny, maxx, maxy) = args[0] if minx < self._envelope.MinX: self._envelope.MinX = minx if miny < self._envelope.MinY: self._envelope.MinY = miny if maxx > self._envelope.MaxX: self._envelope.MaxX = maxx if maxy > self._envelope.MaxY: self._envelope.MaxY = maxy else: raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0])) else: raise TypeError('Incorrect type of argument: %s' % str(type(args[0]))) elif len(args) == 2: # An x and an y parameter were passed in return self.expand_to_include((args[0], args[1], args[0], args[1])) elif len(args) == 4: # Individiual parameters passed in. return self.expand_to_include(args) else: raise OGRException('Incorrect number (%d) of arguments.' % len(args[0])) @property def min_x(self): "Returns the value of the minimum X coordinate." return self._envelope.MinX @property def min_y(self): "Returns the value of the minimum Y coordinate." return self._envelope.MinY @property def max_x(self): "Returns the value of the maximum X coordinate." return self._envelope.MaxX @property def max_y(self): "Returns the value of the maximum Y coordinate." return self._envelope.MaxY @property def ur(self): "Returns the upper-right coordinate." return (self.max_x, self.max_y) @property def ll(self): "Returns the lower-left coordinate." return (self.min_x, self.min_y) @property def tuple(self): "Returns a tuple representing the envelope." return (self.min_x, self.min_y, self.max_x, self.max_y) @property def wkt(self): "Returns WKT representing a Polygon for this envelope." # TODO: Fix significant figures. return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \ (self.min_x, self.min_y, self.min_x, self.max_y, self.max_x, self.max_y, self.max_x, self.min_y, self.min_x, self.min_y)
apache-2.0
jimcunderwood/MissionPlanner
Lib/site-packages/numpy/oldnumeric/compat.py
81
3182
# Compatibility module containing deprecated names __all__ = ['NewAxis', 'UFuncType', 'UfuncType', 'ArrayType', 'arraytype', 'LittleEndian', 'arrayrange', 'matrixmultiply', 'array_constructor', 'pickle_array', 'DumpArray', 'LoadArray', 'multiarray', # from cPickle 'dump', 'dumps', 'load', 'loads', 'Unpickler', 'Pickler' ] import numpy.core.multiarray as multiarray import numpy.core.umath as um from numpy.core.numeric import array import functions import sys from cPickle import dump, dumps mu = multiarray #Use this to add a new axis to an array #compatibility only NewAxis = None #deprecated UFuncType = type(um.sin) UfuncType = type(um.sin) ArrayType = mu.ndarray arraytype = mu.ndarray LittleEndian = (sys.byteorder == 'little') from numpy import deprecate # backward compatibility arrayrange = deprecate(functions.arange, 'arrayrange', 'arange') # deprecated names matrixmultiply = deprecate(mu.dot, 'matrixmultiply', 'dot') def DumpArray(m, fp): m.dump(fp) def LoadArray(fp): import cPickle return cPickle.load(fp) def array_constructor(shape, typecode, thestr, Endian=LittleEndian): if typecode == "O": x = array(thestr, "O") else: x = mu.fromstring(thestr, typecode) x.shape = shape if LittleEndian != Endian: return x.byteswap(True) else: return x def pickle_array(a): if a.dtype.hasobject: return (array_constructor, a.shape, a.dtype.char, a.tolist(), LittleEndian) else: return (array_constructor, (a.shape, a.dtype.char, a.tostring(), LittleEndian)) def loads(astr): import cPickle arr = cPickle.loads(astr.replace('Numeric', 'numpy.oldnumeric')) return arr def load(fp): return loads(fp.read()) def _LoadArray(fp): import typeconv ln = fp.readline().split() if ln[0][0] == 'A': ln[0] = ln[0][1:] typecode = ln[0][0] endian = ln[0][1] itemsize = int(ln[0][2:]) shape = [int(x) for x in ln[1:]] sz = itemsize for val in shape: sz *= val dstr = fp.read(sz) m = mu.fromstring(dstr, typeconv.convtypecode(typecode)) m.shape = shape if (LittleEndian and endian == 'B') or (not LittleEndian and endian == 'L'): return m.byteswap(True) else: return m import pickle, copy if sys.version_info[0] >= 3: class Unpickler(pickle.Unpickler): # XXX: should we implement this? It's not completely straightforward # to do. def __init__(self, *a, **kw): raise NotImplementedError( "numpy.oldnumeric.Unpickler is not supported on Python 3") else: class Unpickler(pickle.Unpickler): def load_array(self): self.stack.append(_LoadArray(self)) dispatch = copy.copy(pickle.Unpickler.dispatch) dispatch['A'] = load_array class Pickler(pickle.Pickler): def __init__(self, *args, **kwds): raise NotImplementedError, "Don't pickle new arrays with this" def save_array(self, object): raise NotImplementedError, "Don't pickle new arrays with this"
gpl-3.0
fujicoin/electrum-fjc
electrum/gui/qt/history_list.py
2
31462
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import datetime from datetime import date from typing import TYPE_CHECKING, Tuple, Dict import threading from enum import IntEnum from decimal import Decimal from PyQt5.QtGui import QMouseEvent, QFont, QBrush, QColor from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, QAbstractItemModel, QSortFilterProxyModel, QVariant, QItemSelectionModel, QDate, QPoint) from PyQt5.QtWidgets import (QMenu, QHeaderView, QLabel, QMessageBox, QPushButton, QComboBox, QVBoxLayout, QCalendarWidget, QGridLayout) from electrum.address_synchronizer import TX_HEIGHT_LOCAL from electrum.i18n import _ from electrum.util import (block_explorer_URL, profiler, TxMinedInfo, OrderedDictWithIndex, timestamp_to_datetime) from electrum.logging import get_logger, Logger from .util import (read_QIcon, MONOSPACE_FONT, Buttons, CancelButton, OkButton, filename_field, MyTreeView, AcceptFileDragDrop, WindowModalDialog, CloseButton, webopen) if TYPE_CHECKING: from electrum.wallet import Abstract_Wallet _logger = get_logger(__name__) try: from electrum.plot import plot_history, NothingToPlotException except: _logger.info("could not import electrum.plot. This feature needs matplotlib to be installed.") plot_history = None # note: this list needs to be kept in sync with another in kivy TX_ICONS = [ "unconfirmed.png", "warning.png", "unconfirmed.png", "offline_tx.png", "clock1.png", "clock2.png", "clock3.png", "clock4.png", "clock5.png", "confirmed.png", ] class HistoryColumns(IntEnum): STATUS_ICON = 0 STATUS_TEXT = 1 DESCRIPTION = 2 COIN_VALUE = 3 RUNNING_COIN_BALANCE = 4 FIAT_VALUE = 5 FIAT_ACQ_PRICE = 6 FIAT_CAP_GAINS = 7 TXID = 8 class HistorySortModel(QSortFilterProxyModel): def lessThan(self, source_left: QModelIndex, source_right: QModelIndex): item1 = self.sourceModel().data(source_left, Qt.UserRole) item2 = self.sourceModel().data(source_right, Qt.UserRole) if item1 is None or item2 is None: raise Exception(f'UserRole not set for column {source_left.column()}') v1 = item1.value() v2 = item2.value() if v1 is None or isinstance(v1, Decimal) and v1.is_nan(): v1 = -float("inf") if v2 is None or isinstance(v2, Decimal) and v2.is_nan(): v2 = -float("inf") try: return v1 < v2 except: return False class HistoryModel(QAbstractItemModel, Logger): def __init__(self, parent): QAbstractItemModel.__init__(self, parent) Logger.__init__(self) self.parent = parent self.view = None # type: HistoryList self.transactions = OrderedDictWithIndex() self.tx_status_cache = {} # type: Dict[str, Tuple[int, str]] self.summary = None def set_view(self, history_list: 'HistoryList'): # FIXME HistoryModel and HistoryList mutually depend on each other. # After constructing both, this method needs to be called. self.view = history_list # type: HistoryList self.set_visibility_of_columns() def columnCount(self, parent: QModelIndex): return len(HistoryColumns) def rowCount(self, parent: QModelIndex): return len(self.transactions) def index(self, row: int, column: int, parent: QModelIndex): return self.createIndex(row, column) def data(self, index: QModelIndex, role: Qt.ItemDataRole) -> QVariant: # note: this method is performance-critical. # it is called a lot, and so must run extremely fast. assert index.isValid() col = index.column() tx_item = self.transactions.value_from_pos(index.row()) tx_hash = tx_item['txid'] conf = tx_item['confirmations'] txpos = tx_item['txpos_in_block'] or 0 height = tx_item['height'] try: status, status_str = self.tx_status_cache[tx_hash] except KeyError: tx_mined_info = self.tx_mined_info_from_tx_item(tx_item) status, status_str = self.parent.wallet.get_tx_status(tx_hash, tx_mined_info) if role == Qt.UserRole: # for sorting d = { HistoryColumns.STATUS_ICON: # height breaks ties for unverified txns # txpos breaks ties for verified same block txns (conf, -status, -height, -txpos), HistoryColumns.STATUS_TEXT: status_str, HistoryColumns.DESCRIPTION: tx_item['label'], HistoryColumns.COIN_VALUE: tx_item['value'].value, HistoryColumns.RUNNING_COIN_BALANCE: tx_item['balance'].value, HistoryColumns.FIAT_VALUE: tx_item['fiat_value'].value if 'fiat_value' in tx_item else None, HistoryColumns.FIAT_ACQ_PRICE: tx_item['acquisition_price'].value if 'acquisition_price' in tx_item else None, HistoryColumns.FIAT_CAP_GAINS: tx_item['capital_gain'].value if 'capital_gain' in tx_item else None, HistoryColumns.TXID: tx_hash, } return QVariant(d[col]) if role not in (Qt.DisplayRole, Qt.EditRole): if col == HistoryColumns.STATUS_ICON and role == Qt.DecorationRole: return QVariant(read_QIcon(TX_ICONS[status])) elif col == HistoryColumns.STATUS_ICON and role == Qt.ToolTipRole: return QVariant(str(conf) + _(" confirmation" + ("s" if conf != 1 else ""))) elif col > HistoryColumns.DESCRIPTION and role == Qt.TextAlignmentRole: return QVariant(Qt.AlignRight | Qt.AlignVCenter) elif col != HistoryColumns.STATUS_TEXT and role == Qt.FontRole: monospace_font = QFont(MONOSPACE_FONT) return QVariant(monospace_font) elif col == HistoryColumns.DESCRIPTION and role == Qt.DecorationRole \ and self.parent.wallet.invoices.paid.get(tx_hash): return QVariant(read_QIcon("seal")) elif col in (HistoryColumns.DESCRIPTION, HistoryColumns.COIN_VALUE) \ and role == Qt.ForegroundRole and tx_item['value'].value < 0: red_brush = QBrush(QColor("#BC1E1E")) return QVariant(red_brush) elif col == HistoryColumns.FIAT_VALUE and role == Qt.ForegroundRole \ and not tx_item.get('fiat_default') and tx_item.get('fiat_value') is not None: blue_brush = QBrush(QColor("#1E1EFF")) return QVariant(blue_brush) return QVariant() if col == HistoryColumns.STATUS_TEXT: return QVariant(status_str) elif col == HistoryColumns.DESCRIPTION: return QVariant(tx_item['label']) elif col == HistoryColumns.COIN_VALUE: value = tx_item['value'].value v_str = self.parent.format_amount(value, is_diff=True, whitespaces=True) return QVariant(v_str) elif col == HistoryColumns.RUNNING_COIN_BALANCE: balance = tx_item['balance'].value balance_str = self.parent.format_amount(balance, whitespaces=True) return QVariant(balance_str) elif col == HistoryColumns.FIAT_VALUE and 'fiat_value' in tx_item: value_str = self.parent.fx.format_fiat(tx_item['fiat_value'].value) return QVariant(value_str) elif col == HistoryColumns.FIAT_ACQ_PRICE and \ tx_item['value'].value < 0 and 'acquisition_price' in tx_item: # fixme: should use is_mine acq = tx_item['acquisition_price'].value return QVariant(self.parent.fx.format_fiat(acq)) elif col == HistoryColumns.FIAT_CAP_GAINS and 'capital_gain' in tx_item: cg = tx_item['capital_gain'].value return QVariant(self.parent.fx.format_fiat(cg)) elif col == HistoryColumns.TXID: return QVariant(tx_hash) return QVariant() def parent(self, index: QModelIndex): return QModelIndex() def hasChildren(self, index: QModelIndex): return not index.isValid() def update_label(self, row): tx_item = self.transactions.value_from_pos(row) tx_item['label'] = self.parent.wallet.get_label(tx_item['txid']) topLeft = bottomRight = self.createIndex(row, 2) self.dataChanged.emit(topLeft, bottomRight, [Qt.DisplayRole]) def get_domain(self): '''Overridden in address_dialog.py''' return self.parent.wallet.get_addresses() @profiler def refresh(self, reason: str): self.logger.info(f"refreshing... reason: {reason}") assert self.parent.gui_thread == threading.current_thread(), 'must be called from GUI thread' assert self.view, 'view not set' selected = self.view.selectionModel().currentIndex() selected_row = None if selected: selected_row = selected.row() fx = self.parent.fx if fx: fx.history_used_spot = False r = self.parent.wallet.get_full_history(domain=self.get_domain(), from_timestamp=None, to_timestamp=None, fx=fx) self.set_visibility_of_columns() if r['transactions'] == list(self.transactions.values()): return old_length = len(self.transactions) if old_length != 0: self.beginRemoveRows(QModelIndex(), 0, old_length) self.transactions.clear() self.endRemoveRows() self.beginInsertRows(QModelIndex(), 0, len(r['transactions'])-1) for tx_item in r['transactions']: txid = tx_item['txid'] self.transactions[txid] = tx_item self.endInsertRows() if selected_row: self.view.selectionModel().select(self.createIndex(selected_row, 0), QItemSelectionModel.Rows | QItemSelectionModel.SelectCurrent) self.view.filter() # update summary self.summary = r['summary'] if not self.view.years and self.transactions: start_date = date.today() end_date = date.today() if len(self.transactions) > 0: start_date = self.transactions.value_from_pos(0).get('date') or start_date end_date = self.transactions.value_from_pos(len(self.transactions) - 1).get('date') or end_date self.view.years = [str(i) for i in range(start_date.year, end_date.year + 1)] self.view.period_combo.insertItems(1, self.view.years) # update tx_status_cache self.tx_status_cache.clear() for txid, tx_item in self.transactions.items(): tx_mined_info = self.tx_mined_info_from_tx_item(tx_item) self.tx_status_cache[txid] = self.parent.wallet.get_tx_status(txid, tx_mined_info) def set_visibility_of_columns(self): def set_visible(col: int, b: bool): self.view.showColumn(col) if b else self.view.hideColumn(col) # txid set_visible(HistoryColumns.TXID, False) # fiat history = self.parent.fx.show_history() cap_gains = self.parent.fx.get_history_capital_gains_config() set_visible(HistoryColumns.FIAT_VALUE, history) set_visible(HistoryColumns.FIAT_ACQ_PRICE, history and cap_gains) set_visible(HistoryColumns.FIAT_CAP_GAINS, history and cap_gains) def update_fiat(self, row, idx): tx_item = self.transactions.value_from_pos(row) key = tx_item['txid'] fee = tx_item.get('fee') value = tx_item['value'].value fiat_fields = self.parent.wallet.get_tx_item_fiat(key, value, self.parent.fx, fee.value if fee else None) tx_item.update(fiat_fields) self.dataChanged.emit(idx, idx, [Qt.DisplayRole, Qt.ForegroundRole]) def update_tx_mined_status(self, tx_hash: str, tx_mined_info: TxMinedInfo): try: row = self.transactions.pos_from_key(tx_hash) tx_item = self.transactions[tx_hash] except KeyError: return self.tx_status_cache[tx_hash] = self.parent.wallet.get_tx_status(tx_hash, tx_mined_info) tx_item.update({ 'confirmations': tx_mined_info.conf, 'timestamp': tx_mined_info.timestamp, 'txpos_in_block': tx_mined_info.txpos, 'date': timestamp_to_datetime(tx_mined_info.timestamp), }) topLeft = self.createIndex(row, 0) bottomRight = self.createIndex(row, len(HistoryColumns) - 1) self.dataChanged.emit(topLeft, bottomRight) def on_fee_histogram(self): for tx_hash, tx_item in list(self.transactions.items()): tx_mined_info = self.tx_mined_info_from_tx_item(tx_item) if tx_mined_info.conf > 0: # note: we could actually break here if we wanted to rely on the order of txns in self.transactions continue self.update_tx_mined_status(tx_hash, tx_mined_info) def headerData(self, section: int, orientation: Qt.Orientation, role: Qt.ItemDataRole): assert orientation == Qt.Horizontal if role != Qt.DisplayRole: return None fx = self.parent.fx fiat_title = 'n/a fiat value' fiat_acq_title = 'n/a fiat acquisition price' fiat_cg_title = 'n/a fiat capital gains' if fx and fx.show_history(): fiat_title = '%s '%fx.ccy + _('Value') fiat_acq_title = '%s '%fx.ccy + _('Acquisition price') fiat_cg_title = '%s '%fx.ccy + _('Capital Gains') return { HistoryColumns.STATUS_ICON: '', HistoryColumns.STATUS_TEXT: _('Date'), HistoryColumns.DESCRIPTION: _('Description'), HistoryColumns.COIN_VALUE: _('Amount'), HistoryColumns.RUNNING_COIN_BALANCE: _('Balance'), HistoryColumns.FIAT_VALUE: fiat_title, HistoryColumns.FIAT_ACQ_PRICE: fiat_acq_title, HistoryColumns.FIAT_CAP_GAINS: fiat_cg_title, HistoryColumns.TXID: 'TXID', }[section] def flags(self, idx): extra_flags = Qt.NoItemFlags # type: Qt.ItemFlag if idx.column() in self.view.editable_columns: extra_flags |= Qt.ItemIsEditable return super().flags(idx) | extra_flags @staticmethod def tx_mined_info_from_tx_item(tx_item): tx_mined_info = TxMinedInfo(height=tx_item['height'], conf=tx_item['confirmations'], timestamp=tx_item['timestamp']) return tx_mined_info class HistoryList(MyTreeView, AcceptFileDragDrop): filter_columns = [HistoryColumns.STATUS_TEXT, HistoryColumns.DESCRIPTION, HistoryColumns.COIN_VALUE, HistoryColumns.TXID] def tx_item_from_proxy_row(self, proxy_row): hm_idx = self.model().mapToSource(self.model().index(proxy_row, 0)) return self.hm.transactions.value_from_pos(hm_idx.row()) def should_hide(self, proxy_row): if self.start_timestamp and self.end_timestamp: tx_item = self.tx_item_from_proxy_row(proxy_row) date = tx_item['date'] if date: in_interval = self.start_timestamp <= date <= self.end_timestamp if not in_interval: return True return False def __init__(self, parent, model: HistoryModel): super().__init__(parent, self.create_menu, stretch_column=HistoryColumns.DESCRIPTION) self.hm = model self.proxy = HistorySortModel(self) self.proxy.setSourceModel(model) self.setModel(self.proxy) self.config = parent.config AcceptFileDragDrop.__init__(self, ".txn") self.setSortingEnabled(True) self.start_timestamp = None self.end_timestamp = None self.years = [] self.create_toolbar_buttons() self.wallet = self.parent.wallet # type: Abstract_Wallet self.sortByColumn(HistoryColumns.STATUS_ICON, Qt.AscendingOrder) self.editable_columns |= {HistoryColumns.FIAT_VALUE} self.header().setStretchLastSection(False) for col in HistoryColumns: sm = QHeaderView.Stretch if col == self.stretch_column else QHeaderView.ResizeToContents self.header().setSectionResizeMode(col, sm) def format_date(self, d): return str(datetime.date(d.year, d.month, d.day)) if d else _('None') def on_combo(self, x): s = self.period_combo.itemText(x) x = s == _('Custom') self.start_button.setEnabled(x) self.end_button.setEnabled(x) if s == _('All'): self.start_timestamp = None self.end_timestamp = None self.start_button.setText("-") self.end_button.setText("-") else: try: year = int(s) except: return self.start_timestamp = start_date = datetime.datetime(year, 1, 1) self.end_timestamp = end_date = datetime.datetime(year+1, 1, 1) self.start_button.setText(_('From') + ' ' + self.format_date(start_date)) self.end_button.setText(_('To') + ' ' + self.format_date(end_date)) self.hide_rows() def create_toolbar_buttons(self): self.period_combo = QComboBox() self.start_button = QPushButton('-') self.start_button.pressed.connect(self.select_start_date) self.start_button.setEnabled(False) self.end_button = QPushButton('-') self.end_button.pressed.connect(self.select_end_date) self.end_button.setEnabled(False) self.period_combo.addItems([_('All'), _('Custom')]) self.period_combo.activated.connect(self.on_combo) def get_toolbar_buttons(self): return self.period_combo, self.start_button, self.end_button def on_hide_toolbar(self): self.start_timestamp = None self.end_timestamp = None self.hide_rows() def save_toolbar_state(self, state, config): config.set_key('show_toolbar_history', state) def select_start_date(self): self.start_timestamp = self.select_date(self.start_button) self.hide_rows() def select_end_date(self): self.end_timestamp = self.select_date(self.end_button) self.hide_rows() def select_date(self, button): d = WindowModalDialog(self, _("Select date")) d.setMinimumSize(600, 150) d.date = None vbox = QVBoxLayout() def on_date(date): d.date = date cal = QCalendarWidget() cal.setGridVisible(True) cal.clicked[QDate].connect(on_date) vbox.addWidget(cal) vbox.addLayout(Buttons(OkButton(d), CancelButton(d))) d.setLayout(vbox) if d.exec_(): if d.date is None: return None date = d.date.toPyDate() button.setText(self.format_date(date)) return datetime.datetime(date.year, date.month, date.day) def show_summary(self): h = self.model().sourceModel().summary if not h: self.parent.show_message(_("Nothing to summarize.")) return start_date = h.get('start_date') end_date = h.get('end_date') format_amount = lambda x: self.parent.format_amount(x.value) + ' ' + self.parent.base_unit() d = WindowModalDialog(self, _("Summary")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() grid = QGridLayout() grid.addWidget(QLabel(_("Start")), 0, 0) grid.addWidget(QLabel(self.format_date(start_date)), 0, 1) grid.addWidget(QLabel(str(h.get('fiat_start_value')) + '/BTC'), 0, 2) grid.addWidget(QLabel(_("Initial balance")), 1, 0) grid.addWidget(QLabel(format_amount(h['start_balance'])), 1, 1) grid.addWidget(QLabel(str(h.get('fiat_start_balance'))), 1, 2) grid.addWidget(QLabel(_("End")), 2, 0) grid.addWidget(QLabel(self.format_date(end_date)), 2, 1) grid.addWidget(QLabel(str(h.get('fiat_end_value')) + '/BTC'), 2, 2) grid.addWidget(QLabel(_("Final balance")), 4, 0) grid.addWidget(QLabel(format_amount(h['end_balance'])), 4, 1) grid.addWidget(QLabel(str(h.get('fiat_end_balance'))), 4, 2) grid.addWidget(QLabel(_("Income")), 5, 0) grid.addWidget(QLabel(format_amount(h.get('incoming'))), 5, 1) grid.addWidget(QLabel(str(h.get('fiat_incoming'))), 5, 2) grid.addWidget(QLabel(_("Expenditures")), 6, 0) grid.addWidget(QLabel(format_amount(h.get('outgoing'))), 6, 1) grid.addWidget(QLabel(str(h.get('fiat_outgoing'))), 6, 2) grid.addWidget(QLabel(_("Capital gains")), 7, 0) grid.addWidget(QLabel(str(h.get('fiat_capital_gains'))), 7, 2) grid.addWidget(QLabel(_("Unrealized gains")), 8, 0) grid.addWidget(QLabel(str(h.get('fiat_unrealized_gains', ''))), 8, 2) vbox.addLayout(grid) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() def plot_history_dialog(self): if plot_history is None: self.parent.show_message( _("Can't plot history.") + '\n' + _("Perhaps some dependencies are missing...") + " (matplotlib?)") return try: plt = plot_history(list(self.hm.transactions.values())) plt.show() except NothingToPlotException as e: self.parent.show_message(str(e)) def on_edited(self, index, user_role, text): index = self.model().mapToSource(index) row, column = index.row(), index.column() tx_item = self.hm.transactions.value_from_pos(row) key = tx_item['txid'] if column == HistoryColumns.DESCRIPTION: if self.wallet.set_label(key, text): #changed self.hm.update_label(row) self.parent.update_completions() elif column == HistoryColumns.FIAT_VALUE: self.wallet.set_fiat_value(key, self.parent.fx.ccy, text, self.parent.fx, tx_item['value'].value) value = tx_item['value'].value if value is not None: self.hm.update_fiat(row, index) else: assert False def mouseDoubleClickEvent(self, event: QMouseEvent): idx = self.indexAt(event.pos()) if not idx.isValid(): return tx_item = self.tx_item_from_proxy_row(idx.row()) if self.hm.flags(self.model().mapToSource(idx)) & Qt.ItemIsEditable: super().mouseDoubleClickEvent(event) else: self.show_transaction(tx_item['txid']) def show_transaction(self, tx_hash): tx = self.wallet.db.get_transaction(tx_hash) if not tx: return label = self.wallet.get_label(tx_hash) or None # prefer 'None' if not defined (force tx dialog to hide Description field if missing) self.parent.show_transaction(tx, label) def create_menu(self, position: QPoint): org_idx: QModelIndex = self.indexAt(position) idx = self.proxy.mapToSource(org_idx) if not idx.isValid(): # can happen e.g. before list is populated for the first time return tx_item = self.hm.transactions.value_from_pos(idx.row()) column = idx.column() if column == HistoryColumns.STATUS_ICON: column_title = _('Transaction ID') column_data = tx_item['txid'] else: column_title = self.hm.headerData(column, Qt.Horizontal, Qt.DisplayRole) column_data = self.hm.data(idx, Qt.DisplayRole).value() tx_hash = tx_item['txid'] tx = self.wallet.db.get_transaction(tx_hash) if not tx: return tx_URL = block_explorer_URL(self.config, 'tx', tx_hash) height = self.wallet.get_tx_height(tx_hash).height is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) is_unconfirmed = height <= 0 pr_key = self.wallet.invoices.paid.get(tx_hash) menu = QMenu() if height == TX_HEIGHT_LOCAL: menu.addAction(_("Remove"), lambda: self.remove_local_tx(tx_hash)) amount_columns = [HistoryColumns.COIN_VALUE, HistoryColumns.RUNNING_COIN_BALANCE, HistoryColumns.FIAT_VALUE, HistoryColumns.FIAT_ACQ_PRICE, HistoryColumns.FIAT_CAP_GAINS] if column in amount_columns: column_data = column_data.strip() menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data)) for c in self.editable_columns: if self.isColumnHidden(c): continue label = self.hm.headerData(c, Qt.Horizontal, Qt.DisplayRole) # TODO use siblingAtColumn when min Qt version is >=5.11 persistent = QPersistentModelIndex(org_idx.sibling(org_idx.row(), c)) menu.addAction(_("Edit {}").format(label), lambda p=persistent: self.edit(QModelIndex(p))) menu.addAction(_("Details"), lambda: self.show_transaction(tx_hash)) if is_unconfirmed and tx: # note: the current implementation of RBF *needs* the old tx fee rbf = is_mine and not tx.is_final() and fee is not None if rbf: menu.addAction(_("Increase fee"), lambda: self.parent.bump_fee_dialog(tx)) else: child_tx = self.wallet.cpfp(tx, 0) if child_tx: menu.addAction(_("Child pays for parent"), lambda: self.parent.cpfp(tx, child_tx)) if pr_key: menu.addAction(read_QIcon("seal"), _("View invoice"), lambda: self.parent.show_invoice(pr_key)) if tx_URL: menu.addAction(_("View on block explorer"), lambda: webopen(tx_URL)) menu.exec_(self.viewport().mapToGlobal(position)) def remove_local_tx(self, delete_tx): to_delete = {delete_tx} to_delete |= self.wallet.get_depending_transactions(delete_tx) question = _("Are you sure you want to remove this transaction?") if len(to_delete) > 1: question = (_("Are you sure you want to remove this transaction and {} child transactions?") .format(len(to_delete) - 1)) if not self.parent.question(msg=question, title=_("Please confirm")): return for tx in to_delete: self.wallet.remove_transaction(tx) self.wallet.storage.write() # need to update at least: history_list, utxo_list, address_list self.parent.need_update.set() def onFileAdded(self, fn): try: with open(fn) as f: tx = self.parent.tx_from_text(f.read()) self.parent.save_transaction_into_wallet(tx) except IOError as e: self.parent.show_error(e) def export_history_dialog(self): d = WindowModalDialog(self, _('Export History')) d.setMinimumSize(400, 200) vbox = QVBoxLayout(d) defaultname = os.path.expanduser('~/electrum-history.csv') select_msg = _('Select file to export your wallet transactions to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) vbox.addStretch(1) hbox = Buttons(CancelButton(d), OkButton(d, _('Export'))) vbox.addLayout(hbox) #run_hook('export_history_dialog', self, hbox) self.update() if not d.exec_(): return filename = filename_e.text() if not filename: return try: self.do_export_history(filename, csv_button.isChecked()) except (IOError, os.error) as reason: export_error_label = _("Electrum was unable to produce a transaction export.") self.parent.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history")) return self.parent.show_message(_("Your wallet history has been successfully exported.")) def do_export_history(self, file_name, is_csv): hist = self.wallet.get_full_history(domain=self.hm.get_domain(), from_timestamp=None, to_timestamp=None, fx=self.parent.fx, show_fees=True) txns = hist['transactions'] lines = [] if is_csv: for item in txns: lines.append([item['txid'], item.get('label', ''), item['confirmations'], item['value'], item.get('fiat_value', ''), item.get('fee', ''), item.get('fiat_fee', ''), item['date']]) with open(file_name, "w+", encoding='utf-8') as f: if is_csv: import csv transaction = csv.writer(f, lineterminator='\n') transaction.writerow(["transaction_hash", "label", "confirmations", "value", "fiat_value", "fee", "fiat_fee", "timestamp"]) for line in lines: transaction.writerow(line) else: from electrum.util import json_encode f.write(json_encode(txns)) def text_txid_from_coordinate(self, row, col): idx = self.model().mapToSource(self.model().index(row, col)) tx_item = self.hm.transactions.value_from_pos(idx.row()) return self.hm.data(idx, Qt.DisplayRole).value(), tx_item['txid']
mit
twiest/openshift-tools
openshift/installer/vendored/openshift-ansible-3.7.52-1/roles/lib_utils/src/generate.py
104
3264
#!/usr/bin/env python ''' Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules. ''' import argparse import os import yaml import six OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__)) OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501 LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/') class GenerateAnsibleException(Exception): '''General Exception for generate function''' pass def parse_args(): '''parse arguments to generate''' parser = argparse.ArgumentParser(description="Generate ansible modules.") parser.add_argument('--verify', action='store_true', default=False, help='Verify library code matches the generated code.') return parser.parse_args() def fragment_banner(fragment_path, side, data): """Generate a banner to wrap around file fragments :param string fragment_path: A path to a module fragment :param string side: ONE OF: "header", "footer" :param StringIO data: A StringIO object to write the banner to """ side_msg = { "header": "Begin included fragment: {}", "footer": "End included fragment: {}" } annotation = side_msg[side].format(fragment_path) banner = """ # -*- -*- -*- {} -*- -*- -*- """.format(annotation) # Why skip? # # * 'generated' - This is the head of the script, we don't want to # put comments before the #!shebang # # * 'license' - Wrapping this just seemed like gratuitous extra if ("generated" not in fragment_path) and ("license" not in fragment_path): data.write(banner) # Make it self-contained testable return banner def generate(parts): '''generate the source code for the ansible modules :param Array parts: An array of paths (strings) to module fragments ''' data = six.StringIO() for fpart in parts: # first line is pylint disable so skip it with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd: fragment_banner(fpart, "header", data) for idx, line in enumerate(pfd): if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501 continue data.write(line) fragment_banner(fpart, "footer", data) return data def get_sources(): '''return the path to the generate sources''' return yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read()) def verify(): '''verify if the generated code matches the library code''' for fname, parts in get_sources().items(): data = generate(parts) fname = os.path.join(LIBRARY, fname) if not open(fname).read() == data.getvalue(): raise GenerateAnsibleException('Generated content does not match for %s' % fname) def main(): ''' combine the necessary files to create the ansible module ''' args = parse_args() if args.verify: verify() for fname, parts in get_sources().items(): data = generate(parts) fname = os.path.join(LIBRARY, fname) with open(fname, 'w') as afd: afd.seek(0) afd.write(data.getvalue()) if __name__ == '__main__': main()
apache-2.0
factorlibre/OCB
addons/hr_gamification/models/gamification.py
388
4836
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp.osv import fields, osv class hr_gamification_badge_user(osv.Model): """User having received a badge""" _name = 'gamification.badge.user' _inherit = ['gamification.badge.user'] _columns = { 'employee_id': fields.many2one("hr.employee", string='Employee'), } def _check_employee_related_user(self, cr, uid, ids, context=None): for badge_user in self.browse(cr, uid, ids, context=context): if badge_user.user_id and badge_user.employee_id: if badge_user.employee_id not in badge_user.user_id.employee_ids: return False return True _constraints = [ (_check_employee_related_user, "The selected employee does not correspond to the selected user.", ['employee_id']), ] class gamification_badge(osv.Model): _name = 'gamification.badge' _inherit = ['gamification.badge'] def get_granted_employees(self, cr, uid, badge_ids, context=None): if context is None: context = {} employee_ids = [] badge_user_ids = self.pool.get('gamification.badge.user').search(cr, uid, [('badge_id', 'in', badge_ids), ('employee_id', '!=', False)], context=context) for badge_user in self.pool.get('gamification.badge.user').browse(cr, uid, badge_user_ids, context): employee_ids.append(badge_user.employee_id.id) # remove duplicates employee_ids = list(set(employee_ids)) return { 'type': 'ir.actions.act_window', 'name': 'Granted Employees', 'view_mode': 'kanban,tree,form', 'view_type': 'form', 'res_model': 'hr.employee', 'domain': [('id', 'in', employee_ids)] } class hr_employee(osv.osv): _name = "hr.employee" _inherit = "hr.employee" def _get_employee_goals(self, cr, uid, ids, field_name, arg, context=None): """Return the list of goals assigned to the employee""" res = {} for employee in self.browse(cr, uid, ids, context=context): res[employee.id] = self.pool.get('gamification.goal').search(cr,uid,[('user_id', '=', employee.user_id.id), ('challenge_id.category', '=', 'hr')], context=context) return res def _get_employee_badges(self, cr, uid, ids, field_name, arg, context=None): """Return the list of badge_users assigned to the employee""" res = {} for employee in self.browse(cr, uid, ids, context=context): res[employee.id] = self.pool.get('gamification.badge.user').search(cr, uid, [ '|', ('employee_id', '=', employee.id), '&', ('employee_id', '=', False), ('user_id', '=', employee.user_id.id) ], context=context) return res def _has_badges(self, cr, uid, ids, field_name, arg, context=None): """Return the list of badge_users assigned to the employee""" res = {} for employee in self.browse(cr, uid, ids, context=context): employee_badge_ids = self.pool.get('gamification.badge.user').search(cr, uid, [ '|', ('employee_id', '=', employee.id), '&', ('employee_id', '=', False), ('user_id', '=', employee.user_id.id) ], context=context) res[employee.id] = len(employee_badge_ids) > 0 return res _columns = { 'goal_ids': fields.function(_get_employee_goals, type="one2many", obj='gamification.goal', string="Employee HR Goals"), 'badge_ids': fields.function(_get_employee_badges, type="one2many", obj='gamification.badge.user', string="Employee Badges"), 'has_badges': fields.function(_has_badges, type="boolean", string="Has Badges"), }
agpl-3.0
marinho/geraldo
site/newsite/django_1_0/django/contrib/localflavor/ch/forms.py
35
3917
""" Swiss-specific Form helpers """ from django.forms import ValidationError from django.forms.fields import Field, RegexField, Select, EMPTY_VALUES from django.utils.encoding import smart_unicode from django.utils.translation import ugettext_lazy as _ import re id_re = re.compile(r"^(?P<idnumber>\w{8})(?P<pos9>(\d{1}|<))(?P<checksum>\d{1})$") phone_digits_re = re.compile(r'^0([1-9]{1})\d{8}$') class CHZipCodeField(RegexField): default_error_messages = { 'invalid': _('Enter a zip code in the format XXXX.'), } def __init__(self, *args, **kwargs): super(CHZipCodeField, self).__init__(r'^\d{4}$', max_length=None, min_length=None, *args, **kwargs) class CHPhoneNumberField(Field): """ Validate local Swiss phone number (not international ones) The correct format is '0XX XXX XX XX'. '0XX.XXX.XX.XX' and '0XXXXXXXXX' validate but are corrected to '0XX XXX XX XX'. """ default_error_messages = { 'invalid': 'Phone numbers must be in 0XX XXX XX XX format.', } def clean(self, value): super(CHPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return u'' value = re.sub('(\.|\s|/|-)', '', smart_unicode(value)) m = phone_digits_re.search(value) if m: return u'%s %s %s %s' % (value[0:3], value[3:6], value[6:8], value[8:10]) raise ValidationError(self.error_messages['invalid']) class CHStateSelect(Select): """ A Select widget that uses a list of CH states as its choices. """ def __init__(self, attrs=None): from ch_states import STATE_CHOICES # relative import super(CHStateSelect, self).__init__(attrs, choices=STATE_CHOICES) class CHIdentityCardNumberField(Field): """ A Swiss identity card number. Checks the following rules to determine whether the number is valid: * Conforms to the X1234567<0 or 1234567890 format. * Included checksums match calculated checksums Algorithm is documented at http://adi.kousz.ch/artikel/IDCHE.htm """ default_error_messages = { 'invalid': _('Enter a valid Swiss identity or passport card number in X1234567<0 or 1234567890 format.'), } def has_valid_checksum(self, number): given_number, given_checksum = number[:-1], number[-1] new_number = given_number calculated_checksum = 0 fragment = "" parameter = 7 first = str(number[:1]) if first.isalpha(): num = ord(first.upper()) - 65 if num < 0 or num > 8: return False new_number = str(num) + new_number[1:] new_number = new_number[:8] + '0' if not new_number.isdigit(): return False for i in range(len(new_number)): fragment = int(new_number[i])*parameter calculated_checksum += fragment if parameter == 1: parameter = 7 elif parameter == 3: parameter = 1 elif parameter ==7: parameter = 3 return str(calculated_checksum)[-1] == given_checksum def clean(self, value): super(CHIdentityCardNumberField, self).clean(value) if value in EMPTY_VALUES: return u'' match = re.match(id_re, value) if not match: raise ValidationError(self.error_messages['invalid']) idnumber, pos9, checksum = match.groupdict()['idnumber'], match.groupdict()['pos9'], match.groupdict()['checksum'] if idnumber == '00000000' or \ idnumber == 'A0000000': raise ValidationError(self.error_messages['invalid']) all_digits = "%s%s%s" % (idnumber, pos9, checksum) if not self.has_valid_checksum(all_digits): raise ValidationError(self.error_messages['invalid']) return u'%s%s%s' % (idnumber, pos9, checksum)
lgpl-3.0
DarKnight24/owtf
dictionaries/dict_merger_svndigger_raft.py
2
2196
#!/usr/bin/env python """ 2013/05/08 - Bharadwaj Machiraju (@tunnelshade) - Initial merge script creation """ import os import urllib import codecs # Order of the files in the list is important raft_lowercase = [ 'raft-small-directories-lowercase.txt', 'raft-small-files-lowercase.txt', 'raft-medium-directories-lowercase.txt', 'raft-medium-files-lowercase.txt', 'raft-large-directories-lowercase.txt', 'raft-large-files-lowercase.txt' ] raft_mixedcase = [ 'raft-small-directories.txt', 'raft-small-files.txt', 'raft-medium-directories.txt', 'raft-medium-files.txt', 'raft-large-directories.txt', 'raft-large-files.txt' ] case_dict = {'lowercase': raft_lowercase, 'mixedcase': raft_mixedcase} abs_path = os.path.dirname(os.path.abspath(__file__)) raft_path = os.path.join(abs_path, 'restricted', 'raft') svndigger_path = os.path.join(abs_path, 'restricted', 'svndigger') output_path = os.path.join(abs_path, 'restricted', 'combined') # Two files will be formed for case in ['lowercase', 'mixedcase']: f = codecs.open(os.path.join(output_path, 'combined_%s.txt' % case), 'w', 'UTF-8') merged_list = {} # The svndigger list is added at the beginning for line in codecs.open(os.path.join(svndigger_path, 'all.txt'), 'r', 'UTF-8').readlines(): line = line.rstrip() f.write('%s\n' % line) merged_list[line] = 1 # Non repeated entries from raft dicts are added for file_path in case_dict[case]: for line in codecs.open(os.path.join(raft_path, file_path), 'r', 'ISO-8859-1').readlines(): try: line = line.rstrip() a = merged_list[line] except KeyError: # Error happens if this line is not already added merged_list[line] = 1 f.write('%s\n' % line) f.close() # Prepare filtered version for using with dirbuster f = codecs.open(os.path.join(output_path, 'filtered_combined_%s.txt' % case), 'w', 'UTF-8') for line in codecs.open(os.path.join(output_path, 'combined_%s.txt' % case), 'r', 'UTF-8').readlines(): f.write(urllib.quote_plus(line.encode('utf-8'), './\r\n')) f.close()
bsd-3-clause
pomegranited/edx-platform
lms/djangoapps/course_api/blocks/transformers/block_counts.py
24
1699
""" Block Counts Transformer """ from openedx.core.lib.block_cache.transformer import BlockStructureTransformer class BlockCountsTransformer(BlockStructureTransformer): """ Keep a count of descendant blocks of the requested types """ VERSION = 1 BLOCK_COUNTS = 'block_counts' def __init__(self, block_types_to_count): self.block_types_to_count = block_types_to_count @classmethod def name(cls): return "blocks_api:block_counts" @classmethod def collect(cls, block_structure): """ Collects any information that's necessary to execute this transformer's transform method. """ # collect basic xblock fields block_structure.request_xblock_fields('category') def transform(self, usage_info, block_structure): """ Mutates block_structure based on the given usage_info. """ if not self.block_types_to_count: return for block_key in block_structure.post_order_traversal(): for block_type in self.block_types_to_count: descendants_type_count = sum([ block_structure.get_transformer_block_field(child_key, self, block_type, 0) for child_key in block_structure.get_children(block_key) ]) block_structure.set_transformer_block_field( block_key, self, block_type, ( descendants_type_count + (1 if (block_structure.get_xblock_field(block_key, 'category') == block_type) else 0) ) )
agpl-3.0
kamcpp/tensorflow
tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py
5
44406
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SdcaModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from threading import Thread import tensorflow as tf from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import _ShardedMutableHashTable from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SparseFeatureColumn from tensorflow.python.framework.test_util import TensorFlowTestCase from tensorflow.python.platform import googletest _MAX_ITERATIONS = 100 _SHARD_NUMBERS = [None, 1, 3, 10] _NUM_LOSS_PARTITIONS = [2, 4] def make_example_proto(feature_dict, target, value=1.0): e = tf.train.Example() features = e.features features.feature['target'].float_list.value.append(target) for key, values in feature_dict.items(): features.feature[key + '_indices'].int64_list.value.extend(values) features.feature[key + '_values'].float_list.value.extend([value] * len(values)) return e def make_example_dict(example_protos, example_weights): def parse_examples(example_protos): features = { 'target': tf.FixedLenFeature(shape=[1], dtype=tf.float32, default_value=0), 'age_indices': tf.VarLenFeature(dtype=tf.int64), 'age_values': tf.VarLenFeature(dtype=tf.float32), 'gender_indices': tf.VarLenFeature(dtype=tf.int64), 'gender_values': tf.VarLenFeature(dtype=tf.float32) } return tf.parse_example( [e.SerializeToString() for e in example_protos], features) parsed = parse_examples(example_protos) sparse_features = [ SparseFeatureColumn( tf.reshape( tf.split(1, 2, parsed['age_indices'].indices)[0], [-1]), tf.reshape(parsed['age_indices'].values, [-1]), tf.reshape(parsed['age_values'].values, [-1])), SparseFeatureColumn( tf.reshape( tf.split(1, 2, parsed['gender_indices'].indices)[0], [-1]), tf.reshape(parsed['gender_indices'].values, [-1]), tf.reshape(parsed['gender_values'].values, [-1])) ] return dict(sparse_features=sparse_features, dense_features=[], example_weights=example_weights, example_labels=tf.reshape(parsed['target'], [-1]), example_ids=['%d' % i for i in range(0, len(example_protos))]) def make_variable_dict(max_age, max_gender): # TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from # examples_dict. age_weights = tf.Variable(tf.zeros([max_age + 1], dtype=tf.float32)) gender_weights = tf.Variable(tf.zeros([max_gender + 1], dtype=tf.float32)) return dict(sparse_features_weights=[age_weights, gender_weights], dense_features_weights=[]) def make_dense_examples_and_variables_dicts(dense_features_values, weights, labels): """Creates examples and variables dictionaries for dense features. Variables shapes are inferred from the list of dense feature values passed as argument. Args: dense_features_values: The values of the dense features weights: The example weights. labels: The example labels. Returns: One dictionary for the examples and one for the variables. """ dense_tensors = [] dense_weights = [] for dense_feature in dense_features_values: dense_tensor = tf.convert_to_tensor(dense_feature, dtype=tf.float32) check_shape_op = tf.Assert( tf.less_equal(tf.rank(dense_tensor), 2), ['dense_tensor shape must be [batch_size, dimension] or [batch_size]']) # Reshape to [batch_size, dense_column_dimension]. with tf.control_dependencies([check_shape_op]): dense_tensor = tf.reshape(dense_tensor, [dense_tensor.get_shape().as_list()[0], -1]) dense_tensors.append(dense_tensor) # Add variables of shape [feature_column_dimension]. dense_weights.append( tf.Variable( tf.zeros( [dense_tensor.get_shape().as_list()[1]], dtype=tf.float32))) examples_dict = dict( sparse_features=[], dense_features=dense_tensors, example_weights=weights, example_labels=labels, example_ids=['%d' % i for i in range(0, len(labels))]) variables_dict = dict( sparse_features_weights=[], dense_features_weights=dense_weights) return examples_dict, variables_dict def get_binary_predictions_for_logistic(predictions, cutoff=0.5): return tf.cast( tf.greater_equal(predictions, tf.ones_like(predictions) * cutoff), dtype=tf.int32) def get_binary_predictions_for_hinge(predictions): return tf.cast( tf.greater_equal(predictions, tf.zeros_like(predictions)), dtype=tf.int32) # TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking. # TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code. class SdcaModelTest(TensorFlowTestCase): """Base SDCA optimizer test class for any loss type.""" def _single_threaded_test_session(self): config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) return self.test_session(use_gpu=False, config=config) class SdcaWithLogisticLossTest(SdcaModelTest): """SDCA optimizer test class for logistic loss.""" def testSimple(self): # Setup test data example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, 0), make_example_proto( {'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0] for num_shards in _SHARD_NUMBERS: with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, num_table_shards=num_shards, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() unregularized_loss = lr.unregularized_loss(examples) loss = lr.regularized_loss(examples) predictions = lr.predictions(examples) self.assertAllClose(0.693147, unregularized_loss.eval()) self.assertAllClose(0.693147, loss.eval()) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # The high tolerance in unregularized_loss comparisons is due to the # fact that it's possible to trade off unregularized_loss vs. # regularization and still have a sum that is quite close to the # optimal regularized_loss value. SDCA's duality gap only ensures that # the regularized_loss is within 0.01 of optimal. # 0.525457 is the optimal regularized_loss. # 0.411608 is the unregularized_loss at that optimum. self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05) self.assertAllClose(0.525457, loss.eval(), atol=0.01) predicted_labels = get_binary_predictions_for_logistic(predictions) self.assertAllEqual([0, 1], predicted_labels.eval()) self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2) def testDistributedSimple(self): # Setup test data example_protos = [ make_example_proto({'age': [0], 'gender': [0]}, 0), make_example_proto({'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0] for num_shards in _SHARD_NUMBERS: for num_loss_partitions in _NUM_LOSS_PARTITIONS: with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict( symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type='logistic_loss', num_table_shards=num_shards, num_loss_partitions=num_loss_partitions) lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() unregularized_loss = lr.unregularized_loss(examples) loss = lr.regularized_loss(examples) predictions = lr.predictions(examples) self.assertAllClose(0.693147, unregularized_loss.eval()) self.assertAllClose(0.693147, loss.eval()) train_op = lr.minimize() def Minimize(): with self._single_threaded_test_session(): for _ in range(_MAX_ITERATIONS): train_op.run() threads = [] for _ in range(num_loss_partitions): threads.append(Thread(target=Minimize)) threads[-1].start() for t in threads: t.join() lr.update_weights(train_op).run() # The high tolerance in unregularized_loss comparisons is due to the # fact that it's possible to trade off unregularized_loss vs. # regularization and still have a sum that is quite close to the # optimal regularized_loss value. SDCA's duality gap only ensures # that the regularized_loss is within 0.01 of optimal. # 0.525457 is the optimal regularized_loss. # 0.411608 is the unregularized_loss at that optimum. self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05) self.assertAllClose(0.525457, loss.eval(), atol=0.01) predicted_labels = get_binary_predictions_for_logistic(predictions) self.assertAllEqual([0, 1], predicted_labels.eval()) self.assertTrue(lr.approximate_duality_gap().eval() < 0.02) def testSimpleNoL2(self): # Same as test above (so comments from above apply) but without an L2. # The algorithm should behave as if we have an L2 of 1 in optimization but # 0 in regularized_loss. example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, 0), make_example_proto( {'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0] for num_shards in _SHARD_NUMBERS: with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=0, symmetric_l1_regularization=0, num_table_shards=num_shards, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() unregularized_loss = lr.unregularized_loss(examples) loss = lr.regularized_loss(examples) predictions = lr.predictions(examples) self.assertAllClose(0.693147, unregularized_loss.eval()) self.assertAllClose(0.693147, loss.eval()) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # There is neither L1 nor L2 loss, so regularized and unregularized # losses should be exactly the same. self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01) self.assertAllClose(0.40244, loss.eval(), atol=0.01) predicted_labels = get_binary_predictions_for_logistic(predictions) self.assertAllEqual([0, 1], predicted_labels.eval()) self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2) def testSomeUnweightedExamples(self): # Setup test data with 4 examples, but should produce the same # results as testSimple. example_protos = [ # Will be used. make_example_proto( {'age': [0], 'gender': [0]}, 0), # Will be ignored. make_example_proto( {'age': [1], 'gender': [0]}, 0), # Will be used. make_example_proto( {'age': [1], 'gender': [1]}, 1), # Will be ignored. make_example_proto( {'age': [1], 'gender': [0]}, 1), ] example_weights = [1.0, 0.0, 1.0, 0.0] for num_shards in _SHARD_NUMBERS: with self._single_threaded_test_session(): # Only use examples 0 and 2 examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, num_table_shards=num_shards, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() unregularized_loss = lr.unregularized_loss(examples) loss = lr.regularized_loss(examples) predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05) self.assertAllClose(0.525457, loss.eval(), atol=0.01) predicted_labels = get_binary_predictions_for_logistic(predictions) self.assertAllClose([0, 1, 1, 1], predicted_labels.eval()) self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2) def testFractionalExampleLabel(self): # Setup test data with 1 positive, and 1 mostly-negative example. example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, 0.1), make_example_proto( {'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0] for num_shards in _SHARD_NUMBERS: with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, num_table_shards=num_shards, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() with self.assertRaisesOpError( 'Only labels of 0.0 or 1.0 are supported right now.'): lr.minimize().run() def testImbalanced(self): # Setup test data with 1 positive, and 3 negative examples. example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, 0), make_example_proto( {'age': [2], 'gender': [0]}, 0), make_example_proto( {'age': [3], 'gender': [0]}, 0), make_example_proto( {'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0, 1.0, 1.0] for num_shards in _SHARD_NUMBERS: with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(3, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, num_table_shards=num_shards, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() unregularized_loss = lr.unregularized_loss(examples) loss = lr.regularized_loss(examples) predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() self.assertAllClose(0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08) self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01) predicted_labels = get_binary_predictions_for_logistic(predictions) self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval()) self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2) def testImbalancedWithExampleWeights(self): # Setup test data with 1 positive, and 1 negative example. example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, 0), make_example_proto( {'age': [1], 'gender': [1]}, 1), ] example_weights = [3.0, 1.0] for num_shards in _SHARD_NUMBERS: with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, num_table_shards=num_shards, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() unregularized_loss = lr.unregularized_loss(examples) loss = lr.regularized_loss(examples) predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08) self.assertAllClose(0.408044, loss.eval(), atol=0.012) predicted_labels = get_binary_predictions_for_logistic(predictions) self.assertAllEqual([0, 1], predicted_labels.eval()) self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2) def testInstancesOfOneClassOnly(self): # Setup test data with 1 positive (ignored), and 1 negative example. example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, 0), make_example_proto( {'age': [1], 'gender': [0]}, 1), # Shares gender with the instance above. ] example_weights = [1.0, 0.0] # Second example "omitted" from training. for num_shards in _SHARD_NUMBERS: with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, num_table_shards=num_shards, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() unregularized_loss = lr.unregularized_loss(examples) loss = lr.regularized_loss(examples) predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05) self.assertAllClose(0.525457, loss.eval(), atol=0.01) predicted_labels = get_binary_predictions_for_logistic(predictions) self.assertAllEqual([0, 0], predicted_labels.eval()) self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2) def testOutOfRangeSparseFeatures(self): # Setup test data example_protos = [ make_example_proto({'age': [0], 'gender': [0]}, 0), make_example_proto({'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0] with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(0, 0) options = dict( symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() train_op = lr.minimize() with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'indices.*'): train_op.run() def testOutOfRangeDenseFeatures(self): with self._single_threaded_test_session(): examples, variables = make_dense_examples_and_variables_dicts( dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]], weights=[20.0, 10.0], labels=[1.0, 0.0]) # Replace with a variable of size 1 instead of 2. variables['dense_features_weights'] = [ tf.Variable(tf.zeros( [1], dtype=tf.float32)) ] options = dict( symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type='logistic_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() train_op = lr.minimize() with self.assertRaisesRegexp( tf.errors.InvalidArgumentError, 'More dense features than we have parameters for.*'): train_op.run() # TODO(katsiaspis): add a test for the case when examples at the end of an # epoch are repeated, since example id may be duplicated. class SdcaWithLinearLossTest(SdcaModelTest): """SDCA optimizer test class for linear (squared) loss.""" def testSimple(self): # Setup test data example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, -10.0), make_example_proto( {'age': [1], 'gender': [1]}, 14.0), ] example_weights = [1.0, 1.0] with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type='squared_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # Predictions should be 2/3 of label due to minimizing regularized loss: # (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005) # Approximate gap should be very close to 0.0. (In fact, because the gap # is only approximate, it is likely that upon convergence the duality gap # can have a tiny negative value). self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2) def testL2Regularization(self): # Setup test data example_protos = [ # 2 identical examples make_example_proto( {'age': [0], 'gender': [0]}, -10.0), make_example_proto( {'age': [0], 'gender': [0]}, -10.0), # 2 more identical examples make_example_proto( {'age': [1], 'gender': [1]}, 14.0), make_example_proto( {'age': [1], 'gender': [1]}, 14.0), ] example_weights = [1.0, 1.0, 1.0, 1.0] with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=16, symmetric_l1_regularization=0, loss_type='squared_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # Predictions should be 1/5 of label due to minimizing regularized loss: # (label - 2 * weight)^2 + L2 * 16 * weight^2 optimal1 = -10.0 / 5.0 optimal2 = 14.0 / 5.0 self.assertAllClose( [optimal1, optimal1, optimal2, optimal2], predictions.eval(), rtol=0.01) def testL1Regularization(self): # Setup test data example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, -10.0), make_example_proto( {'age': [1], 'gender': [1]}, 14.0), ] example_weights = [1.0, 1.0] with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=4.0, loss_type='squared_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() prediction = lr.predictions(examples) loss = lr.regularized_loss(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # Predictions should be -4.0, 48/5 due to minimizing regularized loss: # (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08) # Loss should be the sum of the regularized loss value from above per # example after plugging in the optimal weights. self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01) def testFeatureValues(self): # Setup test data example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, -10.0, -2.0), make_example_proto( {'age': [1], 'gender': [1]}, 14.0, 2.0), ] example_weights = [5.0, 3.0] with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type='squared_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # There are 4 (sparse) variable weights to be learned. 2 for age and 2 for # gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1, # y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the # corresponding *example* weights. With the given feature values, the loss # function is given by: # s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2 # + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it # can be verified that: # w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and # w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to # regularization and example weights, the predictions are within: # 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels. self.assertAllClose([-10 * 40.0 / 41.0, 14.0 * 24 / 25.0], predictions.eval(), atol=0.01) def testDenseFeaturesWithDefaultWeights(self): with self._single_threaded_test_session(): examples, variables = make_dense_examples_and_variables_dicts( dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]], weights=[1.0, 1.0], labels=[10.0, -5.0]) options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type='squared_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # The loss function for these particular features is given by: # 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So, # differentiating wrt to w_1, w_2 yields the following optimal values: # w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2. # In this case the (unnormalized regularized) loss will be: # 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual # loss should be further normalized by the sum of example weights. self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01) loss = lr.regularized_loss(examples) self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01) def testDenseFeaturesWithArbitraryWeights(self): with self._single_threaded_test_session(): examples, variables = make_dense_examples_and_variables_dicts( dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]], weights=[20.0, 10.0], labels=[10.0, -5.0]) options = dict(symmetric_l2_regularization=5.0, symmetric_l1_regularization=0, loss_type='squared_loss') lr = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = lr.predictions(examples) train_op = lr.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() lr.update_weights(train_op).run() # The loss function for these particular features is given by: # 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 + # \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It # turns out that the optimal (variable) weights are given by: # w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and # w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3. # In this case the (unnormalized regularized) loss will be: # s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The # actual loss should be further normalized by the sum of example weights. self.assertAllClose([8.0, -10.0/3], predictions.eval(), rtol=0.01) loss = lr.regularized_loss(examples) self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01) class SdcaWithHingeLossTest(SdcaModelTest): """SDCA optimizer test class for hinge loss.""" def testSimple(self): # Setup test data example_protos = [ make_example_proto( {'age': [0], 'gender': [0]}, 0), make_example_proto( {'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0] with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type='hinge_loss') model = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() # Before minimization, the weights default to zero. There is no loss due # to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0. predictions = model.predictions(examples) self.assertAllClose([0.0, 0.0], predictions.eval()) unregularized_loss = model.unregularized_loss(examples) regularized_loss = model.regularized_loss(examples) self.assertAllClose(1.0, unregularized_loss.eval()) self.assertAllClose(1.0, regularized_loss.eval()) # After minimization, the model separates perfectly the data points. There # are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3 # and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing # wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0 # unregularized loss and 0.25 L2 loss. train_op = model.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() model.update_weights(train_op).run() binary_predictions = get_binary_predictions_for_hinge(predictions) self.assertAllEqual([-1.0, 1.0], predictions.eval()) self.assertAllEqual([0, 1], binary_predictions.eval()) self.assertAllClose(0.0, unregularized_loss.eval()) self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05) def testDenseFeaturesPerfectlySeparable(self): with self._single_threaded_test_session(): examples, variables = make_dense_examples_and_variables_dicts( dense_features_values=[[1.0, 1.0], [1.0, -1.0]], weights=[1.0, 1.0], labels=[1.0, 0.0]) options = dict( symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type='hinge_loss') model = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = model.predictions(examples) binary_predictions = get_binary_predictions_for_hinge(predictions) train_op = model.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() model.update_weights(train_op).run() self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05) self.assertAllEqual([1, 0], binary_predictions.eval()) # (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is, # the SVM's functional margin >=1), so the unregularized loss is ~0.0. # There is only loss due to l2-regularization. For these datapoints, it # turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25. unregularized_loss = model.unregularized_loss(examples) regularized_loss = model.regularized_loss(examples) self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02) self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02) def testDenseFeaturesSeparableWithinMargins(self): with self._single_threaded_test_session(): examples, variables = make_dense_examples_and_variables_dicts( dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]], weights=[1.0, 1.0], labels=[1.0, 0.0]) options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type='hinge_loss') model = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = model.predictions(examples) binary_predictions = get_binary_predictions_for_hinge(predictions) train_op = model.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() model.update_weights(train_op).run() # (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints # are within the margins so there is unregularized loss (1/2 per example). # For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which # gives an L2 loss of ~0.25. self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05) self.assertAllEqual([1, 0], binary_predictions.eval()) unregularized_loss = model.unregularized_loss(examples) regularized_loss = model.regularized_loss(examples) self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02) self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02) def testDenseFeaturesWeightedExamples(self): with self._single_threaded_test_session(): examples, variables = make_dense_examples_and_variables_dicts( dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]], weights=[3.0, 1.0], labels=[1.0, 0.0]) options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type='hinge_loss') model = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() predictions = model.predictions(examples) binary_predictions = get_binary_predictions_for_hinge(predictions) train_op = model.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() model.update_weights(train_op).run() # Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will # try to increase the margin from (1.0, 0.5). Due to regularization, # (1.0, -0.5) will be within the margin. For these points and example # weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2 # loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be # correct, but the boundary will be much closer to the 2nd point than the # first one. self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05) self.assertAllEqual([1, 0], binary_predictions.eval()) unregularized_loss = model.unregularized_loss(examples) regularized_loss = model.regularized_loss(examples) self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02) self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02) class SdcaWithSmoothHingeLossTest(SdcaModelTest): """SDCA optimizer test class for smooth hinge loss.""" def testSimple(self): # Setup test data example_protos = [ make_example_proto({'age': [0], 'gender': [0]}, 0), make_example_proto({'age': [1], 'gender': [1]}, 1), ] example_weights = [1.0, 1.0] with self._single_threaded_test_session(): examples = make_example_dict(example_protos, example_weights) variables = make_variable_dict(1, 1) options = dict( symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type='smooth_hinge_loss') model = SdcaModel(examples, variables, options) tf.initialize_all_variables().run() # Before minimization, the weights default to zero. There is no loss due # to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0. predictions = model.predictions(examples) self.assertAllClose([0.0, 0.0], predictions.eval()) unregularized_loss = model.unregularized_loss(examples) regularized_loss = model.regularized_loss(examples) self.assertAllClose(1.0, unregularized_loss.eval()) self.assertAllClose(1.0, regularized_loss.eval()) # After minimization, the model separates perfectly the data points. There # are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3 # and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives # an unregularized hinge loss of 0.33 and a 0.11 L2 loss train_op = model.minimize() for _ in range(_MAX_ITERATIONS): train_op.run() model.update_weights(train_op).run() binary_predictions = get_binary_predictions_for_hinge(predictions) self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05) self.assertAllEqual([0, 1], binary_predictions.eval()) self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02) self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02) class SparseFeatureColumnTest(SdcaModelTest): """Tests for SparseFeatureColumn. """ def testBasic(self): expected_example_indices = [1, 1, 1, 2] expected_feature_indices = [0, 1, 2, 0] sfc = SparseFeatureColumn(expected_example_indices, expected_feature_indices, None) self.assertTrue(isinstance(sfc.example_indices, tf.Tensor)) self.assertTrue(isinstance(sfc.feature_indices, tf.Tensor)) self.assertEqual(sfc.feature_values, None) with self._single_threaded_test_session(): self.assertAllEqual(expected_example_indices, sfc.example_indices.eval()) self.assertAllEqual(expected_feature_indices, sfc.feature_indices.eval()) expected_feature_values = [1.0, 2.0, 3.0, 4.0] sfc = SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0], expected_feature_values) with self._single_threaded_test_session(): self.assertAllEqual(expected_feature_values, sfc.feature_values.eval()) class SdcaFprintTest(SdcaModelTest): """Tests for the SdcaFprint op. This is one way of enforcing the platform-agnostic nature of SdcaFprint. Basically we are checking against exact values and this test could be running across different platforms. Note that it is fine for expected values to change in the future, if the implementation of SdcaFprint changes (ie this is *not* a frozen test). """ def testFprint(self): with self._single_threaded_test_session(): in_data = tf.constant(['abc', 'very looooooong string', 'def']) out_data = tf.sdca.sdca_fprint(in_data) self.assertAllEqual( [b'\x04l\x12\xd2\xaf\xb2\x809E\x9e\x02\x13', b'\x9f\x0f\x91P\x9aG.Ql\xf2Y\xf9', b'"0\xe00"\x18_\x08\x12?\xa0\x17'], out_data.eval()) class ShardedMutableHashTableTest(SdcaModelTest): """Tests for the _ShardedMutableHashTable class.""" def testShardedMutableHashTable(self): for num_shards in [1, 3, 10]: with self._single_threaded_test_session(): default_val = -1 keys = tf.constant(['brain', 'salad', 'surgery']) values = tf.constant([0, 1, 2], tf.int64) table = _ShardedMutableHashTable(tf.string, tf.int64, default_val, num_shards=num_shards) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = tf.constant(['brain', 'salad', 'tank']) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testExportSharded(self): with self._single_threaded_test_session(): default_val = -1 num_shards = 2 keys = tf.constant(['a1', 'b1', 'c2']) values = tf.constant([0, 1, 2], tf.int64) table = _ShardedMutableHashTable( tf.string, tf.int64, default_val, num_shards=num_shards) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) keys_list, values_list = table.export_sharded() self.assertAllEqual(num_shards, len(keys_list)) self.assertAllEqual(num_shards, len(values_list)) self.assertAllEqual(set([b'b1', b'c2']), set(keys_list[0].eval())) self.assertAllEqual([b'a1'], keys_list[1].eval()) self.assertAllEqual(set([1, 2]), set(values_list[0].eval())) self.assertAllEqual([0], values_list[1].eval()) if __name__ == '__main__': googletest.main()
apache-2.0
nikkitan/bitcoin
test/functional/rpc_deprecated.py
19
1182
#!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test deprecation of RPC calls.""" from test_framework.test_framework import BitcoinTestFramework # from test_framework.util import assert_raises_rpc_error class DeprecatedRpcTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True self.extra_args = [[], []] def run_test(self): # This test should be used to verify correct behaviour of deprecated # RPC methods with and without the -deprecatedrpc flags. For example: # # In set_test_params: # self.extra_args = [[], ["-deprecatedrpc=generate"]] # # In run_test: # self.log.info("Test generate RPC") # assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1) # self.nodes[1].generate(1) self.log.info("No tested deprecated RPC methods") if __name__ == '__main__': DeprecatedRpcTest().main()
mit
m039/Void
third-party/void-boost/tools/build/src/build/targets.py
8
61347
# Status: ported. # Base revision: 64488 # Copyright Vladimir Prus 2002-2007. # Copyright Rene Rivera 2006. # # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Supports 'abstract' targets, which are targets explicitly defined in Jamfile. # # Abstract targets are represented by classes derived from 'AbstractTarget' class. # The first abstract target is 'project_target', which is created for each # Jamfile, and can be obtained by the 'target' rule in the Jamfile's module. # (see project.jam). # # Project targets keep a list of 'MainTarget' instances. # A main target is what the user explicitly defines in a Jamfile. It is # possible to have several definitions for a main target, for example to have # different lists of sources for different platforms. So, main targets # keep a list of alternatives. # # Each alternative is an instance of 'AbstractTarget'. When a main target # subvariant is defined by some rule, that rule will decide what class to # use, create an instance of that class and add it to the list of alternatives # for the main target. # # Rules supplied by the build system will use only targets derived # from 'BasicTarget' class, which will provide some default behaviour. # There will be two classes derived from it, 'make-target', created by the # 'make' rule, and 'TypedTarget', created by rules such as 'exe' and 'dll'. # # +------------------------+ # |AbstractTarget | # +========================+ # |name | # |project | # | | # |generate(properties) = 0| # +-----------+------------+ # | # ^ # / \ # +-+-+ # | # | # +------------------------+------+------------------------------+ # | | | # | | | # +----------+-----------+ +------+------+ +------+-------+ # | project_target | | MainTarget | | BasicTarget | # +======================+ 1 * +=============+ alternatives +==============+ # | generate(properties) |o-----------+ generate |<>------------->| generate | # | main-target | +-------------+ | construct = 0| # +----------------------+ +--------------+ # | # ^ # / \ # +-+-+ # | # | # ...--+----------------+------------------+----------------+---+ # | | | | # | | | | # ... ---+-----+ +------+-------+ +------+------+ +--------+-----+ # | | TypedTarget | | make-target | | stage-target | # . +==============+ +=============+ +==============+ # . | construct | | construct | | construct | # +--------------+ +-------------+ +--------------+ import re import os.path import sys from b2.manager import get_manager from b2.util.utility import * import property, project, virtual_target, property_set, feature, generators, toolset from virtual_target import Subvariant from b2.exceptions import * from b2.util.sequence import unique from b2.util import path, bjam_signature, safe_isinstance, is_iterable_typed from b2.build import errors from b2.build.errors import user_error_checkpoint import b2.build.build_request as build_request import b2.util.set _re_separate_target_from_properties = re.compile (r'^([^<]*)(/(<.*))?$') class TargetRegistry: def __init__ (self): # All targets that are currently being built. # Only the key is id (target), the value is the actual object. self.targets_being_built_ = {} # Current indent for debugging messages self.indent_ = "" self.debug_building_ = "--debug-building" in bjam.variable("ARGV") self.targets_ = [] def main_target_alternative (self, target): """ Registers the specified target as a main target alternatives. Returns 'target'. """ assert isinstance(target, AbstractTarget) target.project ().add_alternative (target) return target def main_target_sources (self, sources, main_target_name, no_renaming=0): """Return the list of sources to use, if main target rule is invoked with 'sources'. If there are any objects in 'sources', they are treated as main target instances, and the name of such targets are adjusted to be '<name_of_this_target>__<name_of_source_target>'. Such renaming is disabled is non-empty value is passed for 'no-renaming' parameter.""" assert is_iterable_typed(sources, basestring) assert isinstance(main_target_name, basestring) assert isinstance(no_renaming, (int, bool)) result = [] for t in sources: t = b2.util.jam_to_value_maybe(t) if isinstance (t, AbstractTarget): name = t.name () if not no_renaming: name = main_target_name + '__' + name t.rename (name) # Inline targets are not built by default. p = t.project() p.mark_targets_as_explicit([name]) result.append(name) else: result.append (t) return result def main_target_requirements(self, specification, project): """Returns the requirement to use when declaring a main target, which are obtained by - translating all specified property paths, and - refining project requirements with the one specified for the target 'specification' are the properties xplicitly specified for a main target 'project' is the project where the main taret is to be declared.""" assert is_iterable_typed(specification, basestring) assert isinstance(project, ProjectTarget) # create a copy since the list is being modified specification = list(specification) specification.extend(toolset.requirements()) requirements = property_set.refine_from_user_input( project.get("requirements"), specification, project.project_module(), project.get("location")) return requirements def main_target_usage_requirements (self, specification, project): """ Returns the use requirement to use when declaraing a main target, which are obtained by - translating all specified property paths, and - adding project's usage requirements specification: Use-properties explicitly specified for a main target project: Project where the main target is to be declared """ assert is_iterable_typed(specification, basestring) assert isinstance(project, ProjectTarget) project_usage_requirements = project.get ('usage-requirements') # We don't use 'refine-from-user-input' because I'm not sure if: # - removing of parent's usage requirements makes sense # - refining of usage requirements is not needed, since usage requirements # are always free. usage_requirements = property_set.create_from_user_input( specification, project.project_module(), project.get("location")) return project_usage_requirements.add (usage_requirements) def main_target_default_build (self, specification, project): """ Return the default build value to use when declaring a main target, which is obtained by using specified value if not empty and parent's default build attribute otherwise. specification: Default build explicitly specified for a main target project: Project where the main target is to be declared """ assert is_iterable_typed(specification, basestring) assert isinstance(project, ProjectTarget) if specification: return property_set.create_with_validation(specification) else: return project.get ('default-build') def start_building (self, main_target_instance): """ Helper rules to detect cycles in main target references. """ assert isinstance(main_target_instance, MainTarget) if id(main_target_instance) in self.targets_being_built_: names = [] for t in self.targets_being_built_.values() + [main_target_instance]: names.append (t.full_name()) get_manager().errors()("Recursion in main target references\n") self.targets_being_built_[id(main_target_instance)] = main_target_instance def end_building (self, main_target_instance): assert isinstance(main_target_instance, MainTarget) assert (id(main_target_instance) in self.targets_being_built_) del self.targets_being_built_ [id (main_target_instance)] def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements): """ Creates a TypedTarget with the specified properties. The 'name', 'sources', 'requirements', 'default_build' and 'usage_requirements' are assumed to be in the form specified by the user in Jamfile corresponding to 'project'. """ assert isinstance(type, basestring) assert isinstance(project, ProjectTarget) assert is_iterable_typed(sources, basestring) assert is_iterable_typed(requirements, basestring) assert is_iterable_typed(default_build, basestring) return self.main_target_alternative (TypedTarget (name, project, type, self.main_target_sources (sources, name), self.main_target_requirements (requirements, project), self.main_target_default_build (default_build, project), self.main_target_usage_requirements (usage_requirements, project))) def increase_indent(self): self.indent_ += " " def decrease_indent(self): self.indent_ = self.indent_[0:-4] def logging(self): return self.debug_building_ def log(self, message): if self.debug_building_: print self.indent_ + message def push_target(self, target): assert isinstance(target, AbstractTarget) self.targets_.append(target) def pop_target(self): self.targets_ = self.targets_[:-1] def current(self): return self.targets_[0] class GenerateResult: def __init__ (self, ur=None, targets=None): if not targets: targets = [] assert isinstance(ur, property_set.PropertySet) or ur is None assert is_iterable_typed(targets, virtual_target.VirtualTarget) self.__usage_requirements = ur self.__targets = targets if not self.__usage_requirements: self.__usage_requirements = property_set.empty () def usage_requirements (self): return self.__usage_requirements def targets (self): return self.__targets def extend (self, other): assert (isinstance (other, GenerateResult)) self.__usage_requirements = self.__usage_requirements.add (other.usage_requirements ()) self.__targets.extend (other.targets ()) class AbstractTarget: """ Base class for all abstract targets. """ def __init__ (self, name, project, manager = None): """ manager: the Manager object name: name of the target project: the project target to which this one belongs manager:the manager object. If none, uses project.manager () """ assert isinstance(name, basestring) assert (isinstance (project, ProjectTarget)) # Note: it might seem that we don't need either name or project at all. # However, there are places where we really need it. One example is error # messages which should name problematic targets. Another is setting correct # paths for sources and generated files. # Why allow manager to be specified? Because otherwise project target could not derive # from this class. if manager: self.manager_ = manager else: self.manager_ = project.manager () self.name_ = name self.project_ = project self.location_ = errors.nearest_user_location() def manager (self): return self.manager_ def name (self): """ Returns the name of this target. """ return self.name_ def project (self): """ Returns the project for this target. """ return self.project_ def location (self): """ Return the location where the target was declared. """ return self.location_ def full_name (self): """ Returns a user-readable name for this target. """ location = self.project ().get ('location') return location + '/' + self.name_ def generate (self, property_set): """ Takes a property set. Generates virtual targets for this abstract target, using the specified properties, unless a different value of some feature is required by the target. On success, returns a GenerateResult instance with: - a property_set with the usage requirements to be applied to dependents - a list of produced virtual targets, which may be empty. If 'property_set' is empty, performs default build of this target, in a way specific to derived class. """ raise BaseException ("method should be defined in derived classes") def rename (self, new_name): assert isinstance(new_name, basestring) self.name_ = new_name class ProjectTarget (AbstractTarget): """ Project target class (derived from 'AbstractTarget') This class these responsibilities: - maintaining a list of main target in this project and building it Main targets are constructed in two stages: - When Jamfile is read, a number of calls to 'add_alternative' is made. At that time, alternatives can also be renamed to account for inline targets. - The first time 'main-target' or 'has-main-target' rule is called, all alternatives are enumerated an main targets are created. """ def __init__ (self, manager, name, project_module, parent_project, requirements, default_build): assert isinstance(project_module, basestring) assert isinstance(parent_project, (ProjectTarget, type(None))) assert isinstance(requirements, (type(None), property_set.PropertySet)) assert isinstance(default_build, (type(None), property_set.PropertySet)) AbstractTarget.__init__ (self, name, self, manager) self.project_module_ = project_module self.location_ = manager.projects().attribute (project_module, 'location') self.requirements_ = requirements self.default_build_ = default_build self.build_dir_ = None # A cache of IDs self.ids_cache_ = {} # True is main targets have already been built. self.built_main_targets_ = False # A list of the registered alternatives for this project. self.alternatives_ = [] # A map from main target name to the target corresponding # to it. self.main_target_ = {} # Targets marked as explicit. self.explicit_targets_ = set() # Targets marked as always self.always_targets_ = set() # The constants defined for this project. self.constants_ = {} # Whether targets for all main target are already created. self.built_main_targets_ = 0 if parent_project: self.inherit (parent_project) # TODO: This is needed only by the 'make' rule. Need to find the # way to make 'make' work without this method. def project_module (self): return self.project_module_ def get (self, attribute): assert isinstance(attribute, basestring) return self.manager().projects().attribute( self.project_module_, attribute) def build_dir (self): if not self.build_dir_: self.build_dir_ = self.get ('build-dir') if not self.build_dir_: self.build_dir_ = os.path.join(self.project_.get ('location'), 'bin') return self.build_dir_ def generate (self, ps): """ Generates all possible targets contained in this project. """ assert isinstance(ps, property_set.PropertySet) self.manager_.targets().log( "Building project '%s' with '%s'" % (self.name (), str(ps))) self.manager_.targets().increase_indent () result = GenerateResult () for t in self.targets_to_build (): g = t.generate (ps) result.extend (g) self.manager_.targets().decrease_indent () return result def targets_to_build (self): """ Computes and returns a list of AbstractTarget instances which must be built when this project is built. """ result = [] if not self.built_main_targets_: self.build_main_targets () # Collect all main targets here, except for "explicit" ones. for n, t in self.main_target_.iteritems (): if not t.name () in self.explicit_targets_: result.append (t) # Collect all projects referenced via "projects-to-build" attribute. self_location = self.get ('location') for pn in self.get ('projects-to-build'): result.append (self.find(pn + "/")) return result def mark_targets_as_explicit (self, target_names): """Add 'target' to the list of targets in this project that should be build only by explicit request.""" # Record the name of the target, not instance, since this # rule is called before main target instaces are created. assert is_iterable_typed(target_names, basestring) self.explicit_targets_.update(target_names) def mark_targets_as_always(self, target_names): assert is_iterable_typed(target_names, basestring) self.always_targets_.update(target_names) def add_alternative (self, target_instance): """ Add new target alternative. """ assert isinstance(target_instance, AbstractTarget) if self.built_main_targets_: raise IllegalOperation ("add-alternative called when main targets are already created for project '%s'" % self.full_name ()) self.alternatives_.append (target_instance) def main_target (self, name): assert isinstance(name, basestring) if not self.built_main_targets_: self.build_main_targets() return self.main_target_[name] def has_main_target (self, name): """Tells if a main target with the specified name exists.""" assert isinstance(name, basestring) if not self.built_main_targets_: self.build_main_targets() return name in self.main_target_ def create_main_target (self, name): """ Returns a 'MainTarget' class instance corresponding to the 'name'. """ assert isinstance(name, basestring) if not self.built_main_targets_: self.build_main_targets () return self.main_targets_.get (name, None) def find_really(self, id): """ Find and return the target with the specified id, treated relative to self. """ assert isinstance(id, basestring) result = None current_location = self.get ('location') __re_split_project_target = re.compile (r'(.*)//(.*)') split = __re_split_project_target.match (id) project_part = None target_part = None if split: project_part = split.group(1) target_part = split.group(2) if not target_part: get_manager().errors()( 'Project ID, "{}", is not a valid target reference. There should ' 'be either a target name after the "//" or the "//" should be removed ' 'from the target reference.' .format(id) ) project_registry = self.project_.manager ().projects () extra_error_message = '' if project_part: # There's explicit project part in id. Looks up the # project and pass the request to it. pm = project_registry.find (project_part, current_location) if pm: project_target = project_registry.target (pm) result = project_target.find (target_part, no_error=1) else: extra_error_message = "error: could not find project '$(project_part)'" else: # Interpret target-name as name of main target # Need to do this before checking for file. Consider this: # # exe test : test.cpp ; # install s : test : <location>. ; # # After first build we'll have target 'test' in Jamfile and file # 'test' on the disk. We need target to override the file. result = None if self.has_main_target(id): result = self.main_target(id) if not result: result = FileReference (self.manager_, id, self.project_) if not result.exists (): # File actually does not exist. # Reset 'target' so that an error is issued. result = None if not result: # Interpret id as project-id project_module = project_registry.find (id, current_location) if project_module: result = project_registry.target (project_module) return result def find (self, id, no_error = False): assert isinstance(id, basestring) assert isinstance(no_error, int) # also matches bools v = self.ids_cache_.get (id, None) if not v: v = self.find_really (id) self.ids_cache_ [id] = v if v or no_error: return v raise BaseException ("Unable to find file or target named '%s'\nreferred from project at '%s'" % (id, self.get ('location'))) def build_main_targets (self): self.built_main_targets_ = True for a in self.alternatives_: name = a.name () if name not in self.main_target_: t = MainTarget (name, self.project_) self.main_target_ [name] = t if name in self.always_targets_: a.always() self.main_target_ [name].add_alternative (a) def add_constant(self, name, value, path=0): """Adds a new constant for this project. The constant will be available for use in Jamfile module for this project. If 'path' is true, the constant will be interpreted relatively to the location of project. """ assert isinstance(name, basestring) assert is_iterable_typed(value, basestring) assert isinstance(path, int) # will also match bools if path: l = self.location_ if not l: # Project corresponding to config files do not have # 'location' attribute, but do have source location. # It might be more reasonable to make every project have # a location and use some other approach to prevent buildable # targets in config files, but that's for later. l = self.get('source-location') value = os.path.join(l, value[0]) # Now make the value absolute path. Constants should be in # platform-native form. value = [os.path.normpath(os.path.join(os.getcwd(), value))] self.constants_[name] = value bjam.call("set-variable", self.project_module(), name, value) def inherit(self, parent_project): assert isinstance(parent_project, ProjectTarget) for c in parent_project.constants_: # No need to pass the type. Path constants were converted to # absolute paths already by parent. self.add_constant(c, parent_project.constants_[c]) # Import rules from parent this_module = self.project_module() parent_module = parent_project.project_module() rules = bjam.call("RULENAMES", parent_module) if not rules: rules = [] user_rules = [x for x in rules if x not in self.manager().projects().project_rules().all_names()] if user_rules: bjam.call("import-rules-from-parent", parent_module, this_module, user_rules) class MainTarget (AbstractTarget): """ A named top-level target in Jamfile. """ def __init__ (self, name, project): AbstractTarget.__init__ (self, name, project) self.alternatives_ = [] self.best_alternative = None self.default_build_ = property_set.empty () def add_alternative (self, target): """ Add a new alternative for this target. """ assert isinstance(target, BasicTarget) d = target.default_build () if self.alternatives_ and self.default_build_ != d: get_manager().errors()("default build must be identical in all alternatives\n" "main target is '%s'\n" "with '%s'\n" "differing from previous default build: '%s'" % (self.full_name (), d.raw (), self.default_build_.raw ())) else: self.default_build_ = d self.alternatives_.append (target) def __select_alternatives (self, property_set_, debug): """ Returns the best viable alternative for this property_set See the documentation for selection rules. # TODO: shouldn't this be 'alternative' (singular)? """ # When selecting alternatives we have to consider defaults, # for example: # lib l : l.cpp : <variant>debug ; # lib l : l_opt.cpp : <variant>release ; # won't work unless we add default value <variant>debug. assert isinstance(property_set_, property_set.PropertySet) assert isinstance(debug, int) # also matches bools property_set_ = property_set_.add_defaults () # The algorithm: we keep the current best viable alternative. # When we've got new best viable alternative, we compare it # with the current one. best = None best_properties = None if len (self.alternatives_) == 0: return None if len (self.alternatives_) == 1: return self.alternatives_ [0] if debug: print "Property set for selection:", property_set_ for v in self.alternatives_: properties = v.match (property_set_, debug) if properties is not None: if not best: best = v best_properties = properties else: if b2.util.set.equal (properties, best_properties): return None elif b2.util.set.contains (properties, best_properties): # Do nothing, this alternative is worse pass elif b2.util.set.contains (best_properties, properties): best = v best_properties = properties else: return None return best def apply_default_build (self, property_set_): assert isinstance(property_set_, property_set.PropertySet) return apply_default_build(property_set_, self.default_build_) def generate (self, ps): """ Select an alternative for this main target, by finding all alternatives which requirements are satisfied by 'properties' and picking the one with longest requirements set. Returns the result of calling 'generate' on that alternative. """ assert isinstance(ps, property_set.PropertySet) self.manager_.targets ().start_building (self) # We want composite properties in build request act as if # all the properties it expands too are explicitly specified. ps = ps.expand () all_property_sets = self.apply_default_build (ps) result = GenerateResult () for p in all_property_sets: result.extend (self.__generate_really (p)) self.manager_.targets ().end_building (self) return result def __generate_really (self, prop_set): """ Generates the main target with the given property set and returns a list which first element is property_set object containing usage_requirements of generated target and with generated virtual target in other elements. It's possible that no targets are generated. """ assert isinstance(prop_set, property_set.PropertySet) best_alternative = self.__select_alternatives (prop_set, debug=0) self.best_alternative = best_alternative if not best_alternative: # FIXME: revive. # self.__select_alternatives(prop_set, debug=1) self.manager_.errors()( "No best alternative for '%s'.\n" % (self.full_name(),)) result = best_alternative.generate (prop_set) # Now return virtual targets for the only alternative return result def rename(self, new_name): assert isinstance(new_name, basestring) AbstractTarget.rename(self, new_name) for a in self.alternatives_: a.rename(new_name) class FileReference (AbstractTarget): """ Abstract target which refers to a source file. This is artificial creature; it's usefull so that sources to a target can be represented as list of abstract target instances. """ def __init__ (self, manager, file, project): AbstractTarget.__init__ (self, file, project) self.file_location_ = None def generate (self, properties): return GenerateResult (None, [ self.manager_.virtual_targets ().from_file ( self.name_, self.location(), self.project_) ]) def exists (self): """ Returns true if the referred file really exists. """ if self.location (): return True else: return False def location (self): # Returns the location of target. Needed by 'testing.jam' if not self.file_location_: source_location = self.project_.get('source-location') for src_dir in source_location: location = os.path.join(src_dir, self.name()) if os.path.isfile(location): self.file_location_ = src_dir self.file_path = location break return self.file_location_ def resolve_reference(target_reference, project): """ Given a target_reference, made in context of 'project', returns the AbstractTarget instance that is referred to, as well as properties explicitly specified for this reference. """ # Separate target name from properties override assert isinstance(target_reference, basestring) assert isinstance(project, ProjectTarget) split = _re_separate_target_from_properties.match (target_reference) if not split: raise BaseException ("Invalid reference: '%s'" % target_reference) id = split.group (1) sproperties = [] if split.group (3): sproperties = property.create_from_strings(feature.split(split.group(3))) sproperties = feature.expand_composites(sproperties) # Find the target target = project.find (id) return (target, property_set.create(sproperties)) def generate_from_reference(target_reference, project, property_set_): """ Attempts to generate the target given by target reference, which can refer both to a main target or to a file. Returns a list consisting of - usage requirements - generated virtual targets, if any target_reference: Target reference project: Project where the reference is made property_set: Properties of the main target that makes the reference """ assert isinstance(target_reference, basestring) assert isinstance(project, ProjectTarget) assert isinstance(property_set_, property_set.PropertySet) target, sproperties = resolve_reference(target_reference, project) # Take properties which should be propagated and refine them # with source-specific requirements. propagated = property_set_.propagated() rproperties = propagated.refine(sproperties) return target.generate(rproperties) class BasicTarget (AbstractTarget): """ Implements the most standard way of constructing main target alternative from sources. Allows sources to be either file or other main target and handles generation of those dependency targets. """ def __init__ (self, name, project, sources, requirements = None, default_build = None, usage_requirements = None): assert is_iterable_typed(sources, basestring) assert isinstance(requirements, property_set.PropertySet) or requirements is None assert isinstance(default_build, property_set.PropertySet) or default_build is None assert isinstance(usage_requirements, property_set.PropertySet) or usage_requirements is None AbstractTarget.__init__ (self, name, project) for s in sources: if get_grist (s): raise InvalidSource ("property '%s' found in the 'sources' parameter for '%s'" % (s, name)) self.sources_ = sources if not requirements: requirements = property_set.empty () self.requirements_ = requirements if not default_build: default_build = property_set.empty () self.default_build_ = default_build if not usage_requirements: usage_requirements = property_set.empty () self.usage_requirements_ = usage_requirements # A cache for resolved references self.source_targets_ = None # A cache for generated targets self.generated_ = {} # A cache for build requests self.request_cache = {} # Result of 'capture_user_context' has everything. For example, if this # target is declare as result of loading Jamfile which was loaded when # building target B which was requested from A, then we'll have A, B and # Jamroot location in context. We only care about Jamroot location, most # of the times. self.user_context_ = self.manager_.errors().capture_user_context()[-1:] self.always_ = False def always(self): self.always_ = True def sources (self): """ Returns the list of AbstractTargets which are used as sources. The extra properties specified for sources are not represented. The only used of this rule at the moment is the '--dump-tests' feature of the test system. """ if self.source_targets_ == None: self.source_targets_ = [] for s in self.sources_: self.source_targets_.append(resolve_reference(s, self.project_)[0]) return self.source_targets_ def requirements (self): return self.requirements_ def default_build (self): return self.default_build_ def common_properties (self, build_request, requirements): """ Given build request and requirements, return properties common to dependency build request and target build properties. """ # For optimization, we add free unconditional requirements directly, # without using complex algorithsm. # This gives the complex algorithm better chance of caching results. # The exact effect of this "optimization" is no longer clear assert isinstance(build_request, property_set.PropertySet) assert isinstance(requirements, property_set.PropertySet) free_unconditional = [] other = [] for p in requirements.all(): if p.feature.free and not p.condition and p.feature.name != 'conditional': free_unconditional.append(p) else: other.append(p) other = property_set.create(other) key = (build_request, other) if key not in self.request_cache: self.request_cache[key] = self.__common_properties2 (build_request, other) return self.request_cache[key].add_raw(free_unconditional) # Given 'context' -- a set of already present properties, and 'requirements', # decide which extra properties should be applied to 'context'. # For conditional requirements, this means evaluating condition. For # indirect conditional requirements, this means calling a rule. Ordinary # requirements are always applied. # # Handles situation where evaluating one conditional requirements affects # condition of another conditional requirements, for example: # # <toolset>gcc:<variant>release <variant>release:<define>RELEASE # # If 'what' is 'refined' returns context refined with new requirements. # If 'what' is 'added' returns just the requirements that must be applied. def evaluate_requirements(self, requirements, context, what): # Apply non-conditional requirements. # It's possible that that further conditional requirement change # a value set by non-conditional requirements. For example: # # exe a : a.cpp : <threading>single <toolset>foo:<threading>multi ; # # I'm not sure if this should be an error, or not, especially given that # # <threading>single # # might come from project's requirements. assert isinstance(requirements, property_set.PropertySet) assert isinstance(context, property_set.PropertySet) assert isinstance(what, basestring) unconditional = feature.expand(requirements.non_conditional()) context = context.refine(property_set.create(unconditional)) # We've collected properties that surely must be present in common # properties. We now try to figure out what other properties # should be added in order to satisfy rules (4)-(6) from the docs. conditionals = property_set.create(requirements.conditional()) # It's supposed that #conditionals iterations # should be enough for properties to propagate along conditions in any # direction. max_iterations = len(conditionals.all()) +\ len(requirements.get("<conditional>")) + 1 added_requirements = [] current = context # It's assumed that ordinary conditional requirements can't add # <indirect-conditional> properties, and that rules referred # by <indirect-conditional> properties can't add new # <indirect-conditional> properties. So the list of indirect conditionals # does not change. indirect = requirements.get("<conditional>") ok = 0 for i in range(0, max_iterations): e = conditionals.evaluate_conditionals(current).all()[:] # Evaluate indirect conditionals. for i in indirect: new = None i = b2.util.jam_to_value_maybe(i) if callable(i): # This is Python callable, yeah. new = i(current) else: # Name of bjam function. Because bjam is unable to handle # list of Property, pass list of strings. br = b2.util.call_jam_function(i[1:], [str(p) for p in current.all()]) if br: new = property.create_from_strings(br) if new: new = property.translate_paths(new, self.project().location()) e.extend(new) if e == added_requirements: # If we got the same result, we've found final properties. ok = 1 break else: # Oops, results of evaluation of conditionals has changed. # Also 'current' contains leftover from previous evaluation. # Recompute 'current' using initial properties and conditional # requirements. added_requirements = e current = context.refine(property_set.create(feature.expand(e))) if not ok: self.manager().errors()("Can't evaluate conditional properties " + str(conditionals)) if what == "added": return property_set.create(unconditional + added_requirements) elif what == "refined": return current else: self.manager().errors("Invalid value of the 'what' parameter") def __common_properties2(self, build_request, requirements): # This guarantees that default properties are present # in result, unless they are overrided by some requirement. # TODO: There is possibility that we've added <foo>bar, which is composite # and expands to <foo2>bar2, but default value of <foo2> is not bar2, # in which case it's not clear what to do. # assert isinstance(build_request, property_set.PropertySet) assert isinstance(requirements, property_set.PropertySet) build_request = build_request.add_defaults() # Featured added by 'add-default' can be composite and expand # to features without default values -- so they are not added yet. # It could be clearer/faster to expand only newly added properties # but that's not critical. build_request = build_request.expand() return self.evaluate_requirements(requirements, build_request, "refined") def match (self, property_set_, debug): """ Returns the alternative condition for this alternative, if the condition is satisfied by 'property_set'. """ # The condition is composed of all base non-conditional properties. # It's not clear if we should expand 'self.requirements_' or not. # For one thing, it would be nice to be able to put # <toolset>msvc-6.0 # in requirements. # On the other hand, if we have <variant>release in condition it # does not make sense to require <optimization>full to be in # build request just to select this variant. assert isinstance(property_set_, property_set.PropertySet) bcondition = self.requirements_.base () ccondition = self.requirements_.conditional () condition = b2.util.set.difference (bcondition, ccondition) if debug: print " next alternative: required properties:", [str(p) for p in condition] if b2.util.set.contains (condition, property_set_.all()): if debug: print " matched" return condition else: return None def generate_dependency_targets (self, target_ids, property_set_): assert is_iterable_typed(target_ids, basestring) assert isinstance(property_set_, property_set.PropertySet) targets = [] usage_requirements = [] for id in target_ids: result = generate_from_reference(id, self.project_, property_set_) targets += result.targets() usage_requirements += result.usage_requirements().all() return (targets, usage_requirements) def generate_dependency_properties(self, properties, ps): """ Takes a target reference, which might be either target id or a dependency property, and generates that target using 'property_set' as build request. Returns a tuple (result, usage_requirements). """ assert is_iterable_typed(properties, property.Property) assert isinstance(ps, property_set.PropertySet) result_properties = [] usage_requirements = [] for p in properties: result = generate_from_reference(p.value, self.project_, ps) for t in result.targets(): result_properties.append(property.Property(p.feature, t)) usage_requirements += result.usage_requirements().all() return (result_properties, usage_requirements) @user_error_checkpoint def generate (self, ps): """ Determines final build properties, generates sources, and calls 'construct'. This method should not be overridden. """ assert isinstance(ps, property_set.PropertySet) self.manager_.errors().push_user_context( "Generating target " + self.full_name(), self.user_context_) if self.manager().targets().logging(): self.manager().targets().log( "Building target '%s'" % self.name_) self.manager().targets().increase_indent () self.manager().targets().log( "Build request: '%s'" % str (ps.raw ())) cf = self.manager().command_line_free_features() self.manager().targets().log( "Command line free features: '%s'" % str (cf.raw ())) self.manager().targets().log( "Target requirements: %s'" % str (self.requirements().raw ())) self.manager().targets().push_target(self) if ps not in self.generated_: # Apply free features form the command line. If user # said # define=FOO # he most likely want this define to be set for all compiles. ps = ps.refine(self.manager().command_line_free_features()) rproperties = self.common_properties (ps, self.requirements_) self.manager().targets().log( "Common properties are '%s'" % str (rproperties)) if rproperties.get("<build>") != ["no"]: result = GenerateResult () properties = rproperties.non_dependency () (p, u) = self.generate_dependency_properties (rproperties.dependency (), rproperties) properties += p assert all(isinstance(p, property.Property) for p in properties) usage_requirements = u (source_targets, u) = self.generate_dependency_targets (self.sources_, rproperties) usage_requirements += u self.manager_.targets().log( "Usage requirements for '%s' are '%s'" % (self.name_, usage_requirements)) # FIXME: rproperties = property_set.create(properties + usage_requirements) usage_requirements = property_set.create (usage_requirements) self.manager_.targets().log( "Build properties: '%s'" % str(rproperties)) source_targets += rproperties.get('<source>') # We might get duplicate sources, for example if # we link to two library which have the same <library> in # usage requirements. # Use stable sort, since for some targets the order is # important. E.g. RUN_PY target need python source to come # first. source_targets = unique(source_targets, stable=True) # FIXME: figure why this call messes up source_targets in-place result = self.construct (self.name_, source_targets[:], rproperties) if result: assert len(result) == 2 gur = result [0] result = result [1] if self.always_: for t in result: t.always() s = self.create_subvariant ( result, self.manager().virtual_targets().recent_targets(), ps, source_targets, rproperties, usage_requirements) self.manager().virtual_targets().clear_recent_targets() ur = self.compute_usage_requirements (s) ur = ur.add (gur) s.set_usage_requirements (ur) self.manager_.targets().log ( "Usage requirements from '%s' are '%s'" % (self.name(), str(rproperties))) self.generated_[ps] = GenerateResult (ur, result) else: self.generated_[ps] = GenerateResult (property_set.empty(), []) else: # If we just see <build>no, we cannot produce any reasonable # diagnostics. The code that adds this property is expected # to explain why a target is not built, for example using # the configure.log-component-configuration function. # If this target fails to build, add <build>no to properties # to cause any parent target to fail to build. Except that it # - does not work now, since we check for <build>no only in # common properties, but not in properties that came from # dependencies # - it's not clear if that's a good idea anyway. The alias # target, for example, should not fail to build if a dependency # fails. self.generated_[ps] = GenerateResult( property_set.create(["<build>no"]), []) else: self.manager().targets().log ("Already built") self.manager().targets().pop_target() self.manager().targets().decrease_indent() return self.generated_[ps] def compute_usage_requirements (self, subvariant): """ Given the set of generated targets, and refined build properties, determines and sets appripriate usage requirements on those targets. """ assert isinstance(subvariant, virtual_target.Subvariant) rproperties = subvariant.build_properties () xusage_requirements =self.evaluate_requirements( self.usage_requirements_, rproperties, "added") # We generate all dependency properties and add them, # as well as their usage requirements, to result. (r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties) extra = r1 + r2 result = property_set.create (xusage_requirements.non_dependency () + extra) # Propagate usage requirements we've got from sources, except # for the <pch-header> and <pch-file> features. # # That feature specifies which pch file to use, and should apply # only to direct dependents. Consider: # # pch pch1 : ... # lib lib1 : ..... pch1 ; # pch pch2 : # lib lib2 : pch2 lib1 ; # # Here, lib2 should not get <pch-header> property from pch1. # # Essentially, when those two features are in usage requirements, # they are propagated only to direct dependents. We might need # a more general mechanism, but for now, only those two # features are special. properties = [] for p in subvariant.sources_usage_requirements().all(): if p.feature.name not in ('pch-header', 'pch-file'): properties.append(p) if 'shared' in rproperties.get('link'): new_properties = [] for p in properties: if p.feature.name != 'library': new_properties.append(p) properties = new_properties result = result.add_raw(properties) return result def create_subvariant (self, root_targets, all_targets, build_request, sources, rproperties, usage_requirements): """Creates a new subvariant-dg instances for 'targets' - 'root-targets' the virtual targets will be returned to dependents - 'all-targets' all virtual targets created while building this main target - 'build-request' is property-set instance with requested build properties""" assert is_iterable_typed(root_targets, virtual_target.VirtualTarget) assert is_iterable_typed(all_targets, virtual_target.VirtualTarget) assert isinstance(build_request, property_set.PropertySet) assert is_iterable_typed(sources, virtual_target.VirtualTarget) assert isinstance(rproperties, property_set.PropertySet) assert isinstance(usage_requirements, property_set.PropertySet) for e in root_targets: e.root (True) s = Subvariant (self, build_request, sources, rproperties, usage_requirements, all_targets) for v in all_targets: if not v.creating_subvariant(): v.creating_subvariant(s) return s def construct (self, name, source_targets, properties): """ Constructs the virtual targets for this abstract targets and the dependecy graph. Returns a tuple consisting of the properties and the list of virtual targets. Should be overrided in derived classes. """ raise BaseException ("method should be defined in derived classes") class TypedTarget (BasicTarget): import generators def __init__ (self, name, project, type, sources, requirements, default_build, usage_requirements): assert isinstance(type, basestring) BasicTarget.__init__ (self, name, project, sources, requirements, default_build, usage_requirements) self.type_ = type def __jam_repr__(self): return b2.util.value_to_jam(self) def type (self): return self.type_ def construct (self, name, source_targets, prop_set): assert isinstance(name, basestring) assert is_iterable_typed(source_targets, virtual_target.VirtualTarget) assert isinstance(prop_set, property_set.PropertySet) r = generators.construct (self.project_, os.path.splitext(name)[0], self.type_, prop_set.add_raw(['<main-target-type>' + self.type_]), source_targets, True) if not r: print "warning: Unable to construct '%s'" % self.full_name () # Are there any top-level generators for this type/property set. if not generators.find_viable_generators (self.type_, prop_set): print "error: no generators were found for type '" + self.type_ + "'" print "error: and the requested properties" print "error: make sure you've configured the needed tools" print "See http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html" print "To debug this problem, try the --debug-generators option." sys.exit(1) return r def apply_default_build(property_set_, default_build): # 1. First, see what properties from default_build # are already present in property_set. assert isinstance(property_set_, property_set.PropertySet) assert isinstance(default_build, property_set.PropertySet) defaults_to_apply = [] for d in default_build.all(): if not property_set_.get(d.feature): defaults_to_apply.append(d) # 2. If there's any defaults to be applied, form the new # build request. Pass it throw 'expand-no-defaults', since # default_build might contain "release debug", which will # result in two property_sets. result = [] if defaults_to_apply: # We have to compress subproperties here to prevent # property lists like: # # <toolset>msvc <toolset-msvc:version>7.1 <threading>multi # # from being expanded into: # # <toolset-msvc:version>7.1/<threading>multi # <toolset>msvc/<toolset-msvc:version>7.1/<threading>multi # # due to cross-product property combination. That may # be an indication that # build_request.expand-no-defaults is the wrong rule # to use here. properties = build_request.expand_no_defaults( [property_set.create([p]) for p in feature.compress_subproperties(property_set_.all()) + defaults_to_apply] ) if properties: for p in properties: result.append(property_set.create(feature.expand(p.all()))) else: result = [property_set.empty()] else: result.append (property_set_) return result def create_typed_metatarget(name, type, sources, requirements, default_build, usage_requirements): assert isinstance(name, basestring) assert isinstance(type, basestring) assert is_iterable_typed(requirements, basestring) assert is_iterable_typed(default_build, basestring) assert is_iterable_typed(usage_requirements, basestring) from b2.manager import get_manager t = get_manager().targets() project = get_manager().projects().current() return t.main_target_alternative( TypedTarget(name, project, type, t.main_target_sources(sources, name), t.main_target_requirements(requirements, project), t.main_target_default_build(default_build, project), t.main_target_usage_requirements(usage_requirements, project))) def create_metatarget(klass, name, sources, requirements=[], default_build=[], usage_requirements=[]): assert isinstance(name, basestring) assert is_iterable_typed(sources, basestring) assert is_iterable_typed(requirements, basestring) assert is_iterable_typed(default_build, basestring) assert is_iterable_typed(usage_requirements, basestring) from b2.manager import get_manager t = get_manager().targets() project = get_manager().projects().current() return t.main_target_alternative( klass(name, project, t.main_target_sources(sources, name), t.main_target_requirements(requirements, project), t.main_target_default_build(default_build, project), t.main_target_usage_requirements(usage_requirements, project))) def metatarget_function_for_class(class_): @bjam_signature((["name"], ["sources", "*"], ["requirements", "*"], ["default_build", "*"], ["usage_requirements", "*"])) def create_metatarget(name, sources, requirements = [], default_build = None, usage_requirements = []): from b2.manager import get_manager t = get_manager().targets() project = get_manager().projects().current() return t.main_target_alternative( class_(name, project, t.main_target_sources(sources, name), t.main_target_requirements(requirements, project), t.main_target_default_build(default_build, project), t.main_target_usage_requirements(usage_requirements, project))) return create_metatarget
mit
WebSpider/SickRage
lib/unidecode/x06f.py
252
4650
data = ( 'Qing ', # 0x00 'Yu ', # 0x01 'Piao ', # 0x02 'Ji ', # 0x03 'Ya ', # 0x04 'Jiao ', # 0x05 'Qi ', # 0x06 'Xi ', # 0x07 'Ji ', # 0x08 'Lu ', # 0x09 'Lu ', # 0x0a 'Long ', # 0x0b 'Jin ', # 0x0c 'Guo ', # 0x0d 'Cong ', # 0x0e 'Lou ', # 0x0f 'Zhi ', # 0x10 'Gai ', # 0x11 'Qiang ', # 0x12 'Li ', # 0x13 'Yan ', # 0x14 'Cao ', # 0x15 'Jiao ', # 0x16 'Cong ', # 0x17 'Qun ', # 0x18 'Tuan ', # 0x19 'Ou ', # 0x1a 'Teng ', # 0x1b 'Ye ', # 0x1c 'Xi ', # 0x1d 'Mi ', # 0x1e 'Tang ', # 0x1f 'Mo ', # 0x20 'Shang ', # 0x21 'Han ', # 0x22 'Lian ', # 0x23 'Lan ', # 0x24 'Wa ', # 0x25 'Li ', # 0x26 'Qian ', # 0x27 'Feng ', # 0x28 'Xuan ', # 0x29 'Yi ', # 0x2a 'Man ', # 0x2b 'Zi ', # 0x2c 'Mang ', # 0x2d 'Kang ', # 0x2e 'Lei ', # 0x2f 'Peng ', # 0x30 'Shu ', # 0x31 'Zhang ', # 0x32 'Zhang ', # 0x33 'Chong ', # 0x34 'Xu ', # 0x35 'Huan ', # 0x36 'Kuo ', # 0x37 'Jian ', # 0x38 'Yan ', # 0x39 'Chuang ', # 0x3a 'Liao ', # 0x3b 'Cui ', # 0x3c 'Ti ', # 0x3d 'Yang ', # 0x3e 'Jiang ', # 0x3f 'Cong ', # 0x40 'Ying ', # 0x41 'Hong ', # 0x42 'Xun ', # 0x43 'Shu ', # 0x44 'Guan ', # 0x45 'Ying ', # 0x46 'Xiao ', # 0x47 '[?] ', # 0x48 '[?] ', # 0x49 'Xu ', # 0x4a 'Lian ', # 0x4b 'Zhi ', # 0x4c 'Wei ', # 0x4d 'Pi ', # 0x4e 'Jue ', # 0x4f 'Jiao ', # 0x50 'Po ', # 0x51 'Dang ', # 0x52 'Hui ', # 0x53 'Jie ', # 0x54 'Wu ', # 0x55 'Pa ', # 0x56 'Ji ', # 0x57 'Pan ', # 0x58 'Gui ', # 0x59 'Xiao ', # 0x5a 'Qian ', # 0x5b 'Qian ', # 0x5c 'Xi ', # 0x5d 'Lu ', # 0x5e 'Xi ', # 0x5f 'Xuan ', # 0x60 'Dun ', # 0x61 'Huang ', # 0x62 'Min ', # 0x63 'Run ', # 0x64 'Su ', # 0x65 'Liao ', # 0x66 'Zhen ', # 0x67 'Zhong ', # 0x68 'Yi ', # 0x69 'Di ', # 0x6a 'Wan ', # 0x6b 'Dan ', # 0x6c 'Tan ', # 0x6d 'Chao ', # 0x6e 'Xun ', # 0x6f 'Kui ', # 0x70 'Yie ', # 0x71 'Shao ', # 0x72 'Tu ', # 0x73 'Zhu ', # 0x74 'San ', # 0x75 'Hei ', # 0x76 'Bi ', # 0x77 'Shan ', # 0x78 'Chan ', # 0x79 'Chan ', # 0x7a 'Shu ', # 0x7b 'Tong ', # 0x7c 'Pu ', # 0x7d 'Lin ', # 0x7e 'Wei ', # 0x7f 'Se ', # 0x80 'Se ', # 0x81 'Cheng ', # 0x82 'Jiong ', # 0x83 'Cheng ', # 0x84 'Hua ', # 0x85 'Jiao ', # 0x86 'Lao ', # 0x87 'Che ', # 0x88 'Gan ', # 0x89 'Cun ', # 0x8a 'Heng ', # 0x8b 'Si ', # 0x8c 'Shu ', # 0x8d 'Peng ', # 0x8e 'Han ', # 0x8f 'Yun ', # 0x90 'Liu ', # 0x91 'Hong ', # 0x92 'Fu ', # 0x93 'Hao ', # 0x94 'He ', # 0x95 'Xian ', # 0x96 'Jian ', # 0x97 'Shan ', # 0x98 'Xi ', # 0x99 'Oki ', # 0x9a '[?] ', # 0x9b 'Lan ', # 0x9c '[?] ', # 0x9d 'Yu ', # 0x9e 'Lin ', # 0x9f 'Min ', # 0xa0 'Zao ', # 0xa1 'Dang ', # 0xa2 'Wan ', # 0xa3 'Ze ', # 0xa4 'Xie ', # 0xa5 'Yu ', # 0xa6 'Li ', # 0xa7 'Shi ', # 0xa8 'Xue ', # 0xa9 'Ling ', # 0xaa 'Man ', # 0xab 'Zi ', # 0xac 'Yong ', # 0xad 'Kuai ', # 0xae 'Can ', # 0xaf 'Lian ', # 0xb0 'Dian ', # 0xb1 'Ye ', # 0xb2 'Ao ', # 0xb3 'Huan ', # 0xb4 'Zhen ', # 0xb5 'Chan ', # 0xb6 'Man ', # 0xb7 'Dan ', # 0xb8 'Dan ', # 0xb9 'Yi ', # 0xba 'Sui ', # 0xbb 'Pi ', # 0xbc 'Ju ', # 0xbd 'Ta ', # 0xbe 'Qin ', # 0xbf 'Ji ', # 0xc0 'Zhuo ', # 0xc1 'Lian ', # 0xc2 'Nong ', # 0xc3 'Guo ', # 0xc4 'Jin ', # 0xc5 'Fen ', # 0xc6 'Se ', # 0xc7 'Ji ', # 0xc8 'Sui ', # 0xc9 'Hui ', # 0xca 'Chu ', # 0xcb 'Ta ', # 0xcc 'Song ', # 0xcd 'Ding ', # 0xce '[?] ', # 0xcf 'Zhu ', # 0xd0 'Lai ', # 0xd1 'Bin ', # 0xd2 'Lian ', # 0xd3 'Mi ', # 0xd4 'Shi ', # 0xd5 'Shu ', # 0xd6 'Mi ', # 0xd7 'Ning ', # 0xd8 'Ying ', # 0xd9 'Ying ', # 0xda 'Meng ', # 0xdb 'Jin ', # 0xdc 'Qi ', # 0xdd 'Pi ', # 0xde 'Ji ', # 0xdf 'Hao ', # 0xe0 'Ru ', # 0xe1 'Zui ', # 0xe2 'Wo ', # 0xe3 'Tao ', # 0xe4 'Yin ', # 0xe5 'Yin ', # 0xe6 'Dui ', # 0xe7 'Ci ', # 0xe8 'Huo ', # 0xe9 'Jing ', # 0xea 'Lan ', # 0xeb 'Jun ', # 0xec 'Ai ', # 0xed 'Pu ', # 0xee 'Zhuo ', # 0xef 'Wei ', # 0xf0 'Bin ', # 0xf1 'Gu ', # 0xf2 'Qian ', # 0xf3 'Xing ', # 0xf4 'Hama ', # 0xf5 'Kuo ', # 0xf6 'Fei ', # 0xf7 '[?] ', # 0xf8 'Boku ', # 0xf9 'Jian ', # 0xfa 'Wei ', # 0xfb 'Luo ', # 0xfc 'Zan ', # 0xfd 'Lu ', # 0xfe 'Li ', # 0xff )
gpl-3.0
bobrathbone/piradio
test_tcp.py
1
1031
#!/usr/bin/env python # # Raspberry Pi TCPIP test server class # $Id: test_tcp.py,v 1.1 2015/10/09 12:03:45 bob Exp $ # # Author : Bob Rathbone # Site : http://www.bobrathbone.com # # This program uses the Python socket server # See https://docs.python.org/2/library/socketserver.html # # License: GNU V3, See https://www.gnu.org/copyleft/gpl.html # # Disclaimer: Software is provided as is and absolutly no warranties are implied or given. # The authors shall not be liable for any loss or damage however caused. # import sys import time import SocketServer from tcp_server_class import TCPServer from tcp_server_class import RequestHandler server = None def callback(): global server print "Data =", server.getData() return False server = TCPServer((TCPServer.host,TCPServer.port),RequestHandler) print "Listening", server.fileno() server.listen(server,callback) try: while True: time.sleep(0.1) except KeyboardInterrupt: print "Exit server" server.shutdown() server.server_close() sys.exit(0) # End of program
gpl-3.0
Krossom/python-for-android
python3-alpha/python3-src/Lib/lib2to3/fixes/fix_apply.py
161
1901
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for apply(). This converts apply(func, v, k) into (func)(*v, **k).""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Call, Comma, parenthesize class FixApply(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'apply' trailer< '(' arglist< (not argument<NAME '=' any>) func=any ',' (not argument<NAME '=' any>) args=any [',' (not argument<NAME '=' any>) kwds=any] [','] > ')' > > """ def transform(self, node, results): syms = self.syms assert results func = results["func"] args = results["args"] kwds = results.get("kwds") prefix = node.prefix func = func.clone() if (func.type not in (token.NAME, syms.atom) and (func.type != syms.power or func.children[-2].type == token.DOUBLESTAR)): # Need to parenthesize func = parenthesize(func) func.prefix = "" args = args.clone() args.prefix = "" if kwds is not None: kwds = kwds.clone() kwds.prefix = "" l_newargs = [pytree.Leaf(token.STAR, "*"), args] if kwds is not None: l_newargs.extend([Comma(), pytree.Leaf(token.DOUBLESTAR, "**"), kwds]) l_newargs[-2].prefix = " " # that's the ** token # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t) # can be translated into f(x, y, *t) instead of f(*(x, y) + t) #new = pytree.Node(syms.power, (func, ArgList(l_newargs))) return Call(func, l_newargs, prefix=prefix)
apache-2.0
harlowja/networkx
networkx/algorithms/bipartite/centrality.py
76
8139
#-*- coding: utf-8 -*- # Copyright (C) 2011 by # Jordi Torrents <jtorrents@milnou.net> # Aric Hagberg <hagberg@lanl.gov> # All rights reserved. # BSD license. import networkx as nx __author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>', 'Aric Hagberg (hagberg@lanl.gov)']) __all__=['degree_centrality', 'betweenness_centrality', 'closeness_centrality'] def degree_centrality(G, nodes): r"""Compute the degree centrality for nodes in a bipartite network. The degree centrality for a node `v` is the fraction of nodes connected to it. Parameters ---------- G : graph A bipartite network nodes : list or container Container with all nodes in one bipartite node set. Returns ------- centrality : dictionary Dictionary keyed by node with bipartite degree centrality as the value. See Also -------- betweenness_centrality, closeness_centrality, sets, is_bipartite Notes ----- The nodes input parameter must conatin all nodes in one bipartite node set, but the dictionary returned contains all nodes from both bipartite node sets. For unipartite networks, the degree centrality values are normalized by dividing by the maximum possible degree (which is `n-1` where `n` is the number of nodes in G). In the bipartite case, the maximum possible degree of a node in a bipartite node set is the number of nodes in the opposite node set [1]_. The degree centrality for a node `v` in the bipartite sets `U` with `n` nodes and `V` with `m` nodes is .. math:: d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U , d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V , where `deg(v)` is the degree of node `v`. References ---------- .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook of Social Network Analysis. Sage Publications. http://www.steveborgatti.com/papers/bhaffiliations.pdf """ top = set(nodes) bottom = set(G) - top s = 1.0/len(bottom) centrality = dict((n,d*s) for n,d in G.degree_iter(top)) s = 1.0/len(top) centrality.update(dict((n,d*s) for n,d in G.degree_iter(bottom))) return centrality def betweenness_centrality(G, nodes): r"""Compute betweenness centrality for nodes in a bipartite network. Betweenness centrality of a node `v` is the sum of the fraction of all-pairs shortest paths that pass through `v`. Values of betweenness are normalized by the maximum possible value which for bipartite graphs is limited by the relative size of the two node sets [1]_. Let `n` be the number of nodes in the node set `U` and `m` be the number of nodes in the node set `V`, then nodes in `U` are normalized by dividing by .. math:: \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] , where .. math:: s = (n - 1) \div m , t = (n - 1) \mod m , and nodes in `V` are normalized by dividing by .. math:: \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] , where, .. math:: p = (m - 1) \div n , r = (m - 1) \mod n . Parameters ---------- G : graph A bipartite graph nodes : list or container Container with all nodes in one bipartite node set. Returns ------- betweenness : dictionary Dictionary keyed by node with bipartite betweenness centrality as the value. See Also -------- degree_centrality, closeness_centrality, sets, is_bipartite Notes ----- The nodes input parameter must contain all nodes in one bipartite node set, but the dictionary returned contains all nodes from both node sets. References ---------- .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook of Social Network Analysis. Sage Publications. http://www.steveborgatti.com/papers/bhaffiliations.pdf """ top = set(nodes) bottom = set(G) - top n = float(len(top)) m = float(len(bottom)) s = (n-1) // m t = (n-1) % m bet_max_top = (((m**2)*((s+1)**2))+ (m*(s+1)*(2*t-s-1))- (t*((2*s)-t+3)))/2.0 p = (m-1) // n r = (m-1) % n bet_max_bot = (((n**2)*((p+1)**2))+ (n*(p+1)*(2*r-p-1))- (r*((2*p)-r+3)))/2.0 betweenness = nx.betweenness_centrality(G, normalized=False, weight=None) for node in top: betweenness[node]/=bet_max_top for node in bottom: betweenness[node]/=bet_max_bot return betweenness def closeness_centrality(G, nodes, normalized=True): r"""Compute the closeness centrality for nodes in a bipartite network. The closeness of a node is the distance to all other nodes in the graph or in the case that the graph is not connected to all other nodes in the connected component containing that node. Parameters ---------- G : graph A bipartite network nodes : list or container Container with all nodes in one bipartite node set. normalized : bool, optional If True (default) normalize by connected component size. Returns ------- closeness : dictionary Dictionary keyed by node with bipartite closeness centrality as the value. See Also -------- betweenness_centrality, degree_centrality sets, is_bipartite Notes ----- The nodes input parameter must conatin all nodes in one bipartite node set, but the dictionary returned contains all nodes from both node sets. Closeness centrality is normalized by the minimum distance possible. In the bipartite case the minimum distance for a node in one bipartite node set is 1 from all nodes in the other node set and 2 from all other nodes in its own set [1]_. Thus the closeness centrality for node `v` in the two bipartite sets `U` with `n` nodes and `V` with `m` nodes is .. math:: c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U, c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V, where `d` is the sum of the distances from `v` to all other nodes. Higher values of closeness indicate higher centrality. As in the unipartite case, setting normalized=True causes the values to normalized further to n-1 / size(G)-1 where n is the number of nodes in the connected part of graph containing the node. If the graph is not completely connected, this algorithm computes the closeness centrality for each connected part separately. References ---------- .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook of Social Network Analysis. Sage Publications. http://www.steveborgatti.com/papers/bhaffiliations.pdf """ closeness={} path_length=nx.single_source_shortest_path_length top = set(nodes) bottom = set(G) - top n = float(len(top)) m = float(len(bottom)) for node in top: sp=path_length(G,node) totsp=sum(sp.values()) if totsp > 0.0 and len(G) > 1: closeness[node]= (m + 2*(n-1)) / totsp if normalized: s=(len(sp)-1.0) / ( len(G) - 1 ) closeness[node] *= s else: closeness[n]=0.0 for node in bottom: sp=path_length(G,node) totsp=sum(sp.values()) if totsp > 0.0 and len(G) > 1: closeness[node]= (n + 2*(m-1)) / totsp if normalized: s=(len(sp)-1.0) / ( len(G) - 1 ) closeness[node] *= s else: closeness[n]=0.0 return closeness
bsd-3-clause
dhenrygithub/QGIS
python/plugins/processing/algs/grass7/ext/r_stats_quantile_rast.py
3
2431
# -*- coding: utf-8 -*- """ *************************************************************************** r_stats_quantile_rast.py ------------------------ Date : February 2016 Copyright : (C) 2016 by Médéric Ribreux Email : medspx at medspx dot fr *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Médéric Ribreux' __date__ = 'February 2016' __copyright__ = '(C) 2016, Médéric Ribreux' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from processing.core.parameters import getParameterFromString import os def processCommand(alg): # We create the output sequence according to percentiles number base = alg.getParameterValue('base') quantiles = alg.getParameterValue('quantiles') - 1 outputs = [] for i in range(0, int(quantiles)): outputs.append('output_{}'.format(i)) output = getParameterFromString('ParameterString|output|Output Rasters|None|False|True') output.value = ','.join(outputs) alg.addParameter(output) output_dir = alg.getOutputFromName('output_dir') alg.removeOutputFromName('output_dir') # Launch the algorithm alg.processCommand() # We re-add the previous output alg.addOutput(output_dir) def processOutputs(alg): # We need to export each of the output output_dir = alg.getOutputValue('output_dir') outputParam = alg.getParameterFromName('output') outputs = outputParam.value.split(',') alg.parameters.remove(outputParam) for output in outputs: command = u"r.out.gdal -c createopt=\"TFW=YES,COMPRESS=LZW\" input={} output=\"{}\" --overwrite".format( output, os.path.join(output_dir, output + '.tif') ) alg.commands.append(command) alg.outputCommands.append(command)
gpl-2.0
sungkim11/mhargadh
django/templatetags/cache.py
309
2406
from django.template import Library, Node, TemplateSyntaxError, Variable, VariableDoesNotExist from django.template import resolve_variable from django.core.cache import cache from django.utils.encoding import force_unicode from django.utils.http import urlquote from django.utils.hashcompat import md5_constructor register = Library() class CacheNode(Node): def __init__(self, nodelist, expire_time_var, fragment_name, vary_on): self.nodelist = nodelist self.expire_time_var = Variable(expire_time_var) self.fragment_name = fragment_name self.vary_on = vary_on def render(self, context): try: expire_time = self.expire_time_var.resolve(context) except VariableDoesNotExist: raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var) try: expire_time = int(expire_time) except (ValueError, TypeError): raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time) # Build a unicode key for this fragment and all vary-on's. args = md5_constructor(u':'.join([urlquote(resolve_variable(var, context)) for var in self.vary_on])) cache_key = 'template.cache.%s.%s' % (self.fragment_name, args.hexdigest()) value = cache.get(cache_key) if value is None: value = self.nodelist.render(context) cache.set(cache_key, value, expire_time) return value def do_cache(parser, token): """ This will cache the contents of a template fragment for a given amount of time. Usage:: {% load cache %} {% cache [expire_time] [fragment_name] %} .. some expensive processing .. {% endcache %} This tag also supports varying by a list of arguments:: {% load cache %} {% cache [expire_time] [fragment_name] [var1] [var2] .. %} .. some expensive processing .. {% endcache %} Each unique set of arguments will result in a unique cache entry. """ nodelist = parser.parse(('endcache',)) parser.delete_first_token() tokens = token.contents.split() if len(tokens) < 3: raise TemplateSyntaxError(u"'%r' tag requires at least 2 arguments." % tokens[0]) return CacheNode(nodelist, tokens[1], tokens[2], tokens[3:]) register.tag('cache', do_cache)
bsd-3-clause
gayangunarathne/kubernetes
Godeps/_workspace/src/github.com/ugorji/go/codec/test.py
670
3808
#!/usr/bin/env python # This will create golden files in a directory passed to it. # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). # Ensure msgpack-python and cbor are installed first, using: # pip install --user msgpack-python # pip install --user cbor import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): # get list with all primitive types, and a combo type l0 = [ -8, -1616, -32323232, -6464646464646464, 192, 1616, 32323232, 6464646464646464, 192, -3232.0, -6464646464.0, 3232.0, 6464646464.0, False, True, None, u"someday", u"", u"bytestring", 1328176922000002000, -2206187877999998000, 270, -2013855847999995777, #-6795364578871345152, ] l1 = [ { "true": True, "false": False }, { "true": "True", "false": False, "uint16(1616)": 1616 }, { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], "int32":32323232, "bool": True, "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", "SHORT STRING": "1234567890" }, { True: "true", 8: False, "false": 0 } ] l = [] l.extend(l0) l.append(l0) l.extend(l1) return l def build_test_data(destdir): l = get_test_data_list() for i in range(len(l)): # packer = msgpack.Packer() serialized = msgpack.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') f.write(serialized) f.close() serialized = cbor.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') f.write(serialized) f.close() def doRpcServer(port, stopTimeSec): class EchoHandler(object): def Echo123(self, msg1, msg2, msg3): return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) def EchoStruct(self, msg): return ("%s" % msg) addr = msgpackrpc.Address('localhost', port) server = msgpackrpc.Server(EchoHandler()) server.listen(addr) # run thread to stop it after stopTimeSec seconds if > 0 if stopTimeSec > 0: def myStopRpcServer(): server.stop() t = threading.Timer(stopTimeSec, myStopRpcServer) t.start() server.start() def doRpcClientToPythonSvc(port): address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("Echo123", "A1", "B2", "C3") print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doMain(args): if len(args) == 2 and args[0] == "testdata": build_test_data(args[1]) elif len(args) == 3 and args[0] == "rpc-server": doRpcServer(int(args[1]), int(args[2])) elif len(args) == 2 and args[0] == "rpc-client-python-service": doRpcClientToPythonSvc(int(args[1])) elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: print("Usage: test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": doMain(sys.argv[1:])
apache-2.0
mancoast/CPythonPyc_test
cpython/241_test_sort.py
15
8689
from test.test_support import verbose import random from UserList import UserList nerrors = 0 def check(tag, expected, raw, compare=None): global nerrors if verbose: print " checking", tag orig = raw[:] # save input in case of error if compare: raw.sort(compare) else: raw.sort() if len(expected) != len(raw): print "error in", tag print "length mismatch;", len(expected), len(raw) print expected print orig print raw nerrors += 1 return for i, good in enumerate(expected): maybe = raw[i] if good is not maybe: print "error in", tag print "out of order at index", i, good, maybe print expected print orig print raw nerrors += 1 return # Try a variety of sizes at and around powers of 2, and at powers of 10. sizes = [0] for power in range(1, 10): n = 2 ** power sizes.extend(range(n-1, n+2)) sizes.extend([10, 100, 1000]) class Complains(object): maybe_complain = True def __init__(self, i): self.i = i def __lt__(self, other): if Complains.maybe_complain and random.random() < 0.001: if verbose: print " complaining at", self, other raise RuntimeError return self.i < other.i def __repr__(self): return "Complains(%d)" % self.i class Stable(object): def __init__(self, key, i): self.key = key self.index = i def __cmp__(self, other): return cmp(self.key, other.key) def __repr__(self): return "Stable(%d, %d)" % (self.key, self.index) for n in sizes: x = range(n) if verbose: print "Testing size", n s = x[:] check("identity", x, s) s = x[:] s.reverse() check("reversed", x, s) s = x[:] random.shuffle(s) check("random permutation", x, s) y = x[:] y.reverse() s = x[:] check("reversed via function", y, s, lambda a, b: cmp(b, a)) if verbose: print " Checking against an insane comparison function." print " If the implementation isn't careful, this may segfault." s = x[:] s.sort(lambda a, b: int(random.random() * 3) - 1) check("an insane function left some permutation", x, s) x = [Complains(i) for i in x] s = x[:] random.shuffle(s) Complains.maybe_complain = True it_complained = False try: s.sort() except RuntimeError: it_complained = True if it_complained: Complains.maybe_complain = False check("exception during sort left some permutation", x, s) s = [Stable(random.randrange(10), i) for i in xrange(n)] augmented = [(e, e.index) for e in s] augmented.sort() # forced stable because ties broken by index x = [e for e, i in augmented] # a stable sort of s check("stability", x, s) import unittest from test import test_support import sys #============================================================================== class TestBugs(unittest.TestCase): def test_bug453523(self): # bug 453523 -- list.sort() crasher. # If this fails, the most likely outcome is a core dump. # Mutations during a list sort should raise a ValueError. class C: def __lt__(self, other): if L and random.random() < 0.75: L.pop() else: L.append(3) return random.random() < 0.5 L = [C() for i in range(50)] self.assertRaises(ValueError, L.sort) def test_cmpNone(self): # Testing None as a comparison function. L = range(50) random.shuffle(L) L.sort(None) self.assertEqual(L, range(50)) def test_undetected_mutation(self): # Python 2.4a1 did not always detect mutation memorywaster = [] for i in range(20): def mutating_cmp(x, y): L.append(3) L.pop() return cmp(x, y) L = [1,2] self.assertRaises(ValueError, L.sort, mutating_cmp) def mutating_cmp(x, y): L.append(3) del L[:] return cmp(x, y) self.assertRaises(ValueError, L.sort, mutating_cmp) memorywaster = [memorywaster] #============================================================================== class TestDecorateSortUndecorate(unittest.TestCase): def test_decorated(self): data = 'The quick Brown fox Jumped over The lazy Dog'.split() copy = data[:] random.shuffle(data) data.sort(key=str.lower) copy.sort(cmp=lambda x,y: cmp(x.lower(), y.lower())) def test_baddecorator(self): data = 'The quick Brown fox Jumped over The lazy Dog'.split() self.assertRaises(TypeError, data.sort, None, lambda x,y: 0) def test_stability(self): data = [(random.randrange(100), i) for i in xrange(200)] copy = data[:] data.sort(key=lambda (x,y): x) # sort on the random first field copy.sort() # sort using both fields self.assertEqual(data, copy) # should get the same result def test_cmp_and_key_combination(self): # Verify that the wrapper has been removed def compare(x, y): self.assertEqual(type(x), str) self.assertEqual(type(x), str) return cmp(x, y) data = 'The quick Brown fox Jumped over The lazy Dog'.split() data.sort(cmp=compare, key=str.lower) def test_badcmp_with_key(self): # Verify that the wrapper has been removed data = 'The quick Brown fox Jumped over The lazy Dog'.split() self.assertRaises(TypeError, data.sort, "bad", str.lower) def test_key_with_exception(self): # Verify that the wrapper has been removed data = range(-2,2) dup = data[:] self.assertRaises(ZeroDivisionError, data.sort, None, lambda x: 1/x) self.assertEqual(data, dup) def test_key_with_mutation(self): data = range(10) def k(x): del data[:] data[:] = range(20) return x self.assertRaises(ValueError, data.sort, key=k) def test_key_with_mutating_del(self): data = range(10) class SortKiller(object): def __init__(self, x): pass def __del__(self): del data[:] data[:] = range(20) self.assertRaises(ValueError, data.sort, key=SortKiller) def test_key_with_mutating_del_and_exception(self): data = range(10) ## dup = data[:] class SortKiller(object): def __init__(self, x): if x > 2: raise RuntimeError def __del__(self): del data[:] data[:] = range(20) self.assertRaises(RuntimeError, data.sort, key=SortKiller) ## major honking subtlety: we *can't* do: ## ## self.assertEqual(data, dup) ## ## because there is a reference to a SortKiller in the ## traceback and by the time it dies we're outside the call to ## .sort() and so the list protection gimmicks are out of ## date (this cost some brain cells to figure out...). def test_reverse(self): data = range(100) random.shuffle(data) data.sort(reverse=True) self.assertEqual(data, range(99,-1,-1)) self.assertRaises(TypeError, data.sort, "wrong type") def test_reverse_stability(self): data = [(random.randrange(100), i) for i in xrange(200)] copy1 = data[:] copy2 = data[:] data.sort(cmp=lambda x,y: cmp(x[0],y[0]), reverse=True) copy1.sort(cmp=lambda x,y: cmp(y[0],x[0])) self.assertEqual(data, copy1) copy2.sort(key=lambda x: x[0], reverse=True) self.assertEqual(data, copy2) #============================================================================== def test_main(verbose=None): test_classes = ( TestDecorateSortUndecorate, TestBugs, ) test_support.run_unittest(*test_classes) # verify reference counting if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in xrange(len(counts)): test_support.run_unittest(*test_classes) gc.collect() counts[i] = sys.gettotalrefcount() print counts if __name__ == "__main__": test_main(verbose=True)
gpl-3.0
endlessm/chromium-browser
third_party/swiftshader/third_party/subzero/pydir/if.py
3
1812
#!/usr/bin/env python2 import argparse import os import sys from utils import shellcmd def main(): """Run the specified command only if conditions are met. Two conditions are checked. First, the CONDITION must be true. Secondly, all NEED names must be in the set of HAVE names. If both conditions are met, the command defined by the remaining arguments is run in a shell. """ argparser = argparse.ArgumentParser( description=' ' + main.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) argparser.add_argument('--cond', choices={'true', 'false'} , required=False, default='true', metavar='CONDITION', help='Condition to test.') argparser.add_argument('--need', required=False, default=[], action='append', metavar='NEED', help='Needed name. May be repeated.') argparser.add_argument('--have', required=False, default=[], action='append', metavar='HAVE', help='Name you have. May be repeated.') argparser.add_argument('--echo-cmd', required=False, action='store_true', help='Trace the command before running.') argparser.add_argument('--command', nargs=argparse.REMAINDER, help='Command to run if attributes found.') args = argparser.parse_args() # Quit early if no command to run. if not args.command: raise RuntimeError("No command argument(s) specified for ifatts") if args.cond == 'true' and set(args.need) <= set(args.have): stdout_result = shellcmd(args.command, echo=args.echo_cmd) if not args.echo_cmd: sys.stdout.write(stdout_result) if __name__ == '__main__': main() sys.exit(0)
bsd-3-clause
andykimpe/chromium-test-npapi
tools/perf/page_sets/page_cycler/intl2.py
10
2867
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry.page.actions.all_page_actions import * from telemetry.page import page as page_module from telemetry.page import page_set as page_set_module class Intl2Page(page_module.Page): def __init__(self, url, page_set): super(Intl2Page, self).__init__(url=url, page_set=page_set) # pylint: disable=C0301 class Intl2PageSet(page_set_module.PageSet): """ Description: Intl2 page_cycler benchmark """ def __init__(self): super(Intl2PageSet, self).__init__( # pylint: disable=C0301 serving_dirs=set(['../../../../data/page_cycler/intl2'])) urls_list = [ 'file://../../../../data/page_cycler/intl2/arabicnews.google.com/', 'file://../../../../data/page_cycler/intl2/bn.wikipedia.org/', 'file://../../../../data/page_cycler/intl2/exteen.com/', 'file://../../../../data/page_cycler/intl2/farsnews.com/', 'file://../../../../data/page_cycler/intl2/hindi.webdunia.com/', 'file://../../../../data/page_cycler/intl2/in.telugu.yahoo.com/', 'file://../../../../data/page_cycler/intl2/isna.ir/', 'file://../../../../data/page_cycler/intl2/kapook.com/', 'file://../../../../data/page_cycler/intl2/kooora.com/', 'file://../../../../data/page_cycler/intl2/manager.co.th/', 'file://../../../../data/page_cycler/intl2/masrawy.com/', 'file://../../../../data/page_cycler/intl2/ml.wikipedia.org/', 'file://../../../../data/page_cycler/intl2/msn.co.il/', 'file://../../../../data/page_cycler/intl2/news.bbc.co.uk/', 'file://../../../../data/page_cycler/intl2/news.google.com/', 'file://../../../../data/page_cycler/intl2/sh3bwah.com/', 'file://../../../../data/page_cycler/intl2/sgkalesh.blogspot.com/', 'file://../../../../data/page_cycler/intl2/tapuz.co.il/', 'file://../../../../data/page_cycler/intl2/thaimisc.com/', 'file://../../../../data/page_cycler/intl2/vietnamnet.vn/', 'file://../../../../data/page_cycler/intl2/vnexpress.net/', 'file://../../../../data/page_cycler/intl2/walla.co.il/', 'file://../../../../data/page_cycler/intl2/www.aljayyash.net/', 'file://../../../../data/page_cycler/intl2/www.bbc.co.uk/', 'file://../../../../data/page_cycler/intl2/www.google.com.sa/', 'file://../../../../data/page_cycler/intl2/www.islamweb.net/', 'file://../../../../data/page_cycler/intl2/www.mthai.com/', 'file://../../../../data/page_cycler/intl2/www.startimes2.com/', 'file://../../../../data/page_cycler/intl2/www.jagran.com/', 'file://../../../../data/page_cycler/intl2/ynet.co.il/' ] for url in urls_list: self.AddPage(Intl2Page(url, self))
bsd-3-clause
open-homeautomation/home-assistant
homeassistant/components/climate/nest.py
14
7519
""" Support for Nest thermostats. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/climate.nest/ """ import logging import voluptuous as vol from homeassistant.components.nest import DATA_NEST from homeassistant.components.climate import ( STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice, PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW, ATTR_TEMPERATURE) from homeassistant.const import ( TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF, STATE_UNKNOWN) DEPENDENCIES = ['nest'] _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_SCAN_INTERVAL): vol.All(vol.Coerce(int), vol.Range(min=1)), }) STATE_ECO = 'eco' STATE_HEAT_COOL = 'heat-cool' def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the Nest thermostat.""" if discovery_info is None: return _LOGGER.debug("Setting up nest thermostat") temp_unit = hass.config.units.temperature_unit add_devices( [NestThermostat(structure, device, temp_unit) for structure, device in hass.data[DATA_NEST].thermostats()], True ) class NestThermostat(ClimateDevice): """Representation of a Nest thermostat.""" def __init__(self, structure, device, temp_unit): """Initialize the thermostat.""" self._unit = temp_unit self.structure = structure self.device = device self._fan_list = [STATE_ON, STATE_AUTO] # Not all nest devices support cooling and heating remove unused self._operation_list = [STATE_OFF] # Add supported nest thermostat features if self.device.can_heat: self._operation_list.append(STATE_HEAT) if self.device.can_cool: self._operation_list.append(STATE_COOL) if self.device.can_heat and self.device.can_cool: self._operation_list.append(STATE_AUTO) self._operation_list.append(STATE_ECO) # feature of device self._has_fan = self.device.has_fan # data attributes self._away = None self._location = None self._name = None self._humidity = None self._target_temperature = None self._temperature = None self._temperature_scale = None self._mode = None self._fan = None self._eco_temperature = None self._is_locked = None self._locked_temperature = None self._min_temperature = None self._max_temperature = None @property def name(self): """Return the name of the nest, if any.""" return self._name @property def temperature_unit(self): """Return the unit of measurement.""" return self._temperature_scale @property def current_temperature(self): """Return the current temperature.""" return self._temperature @property def current_operation(self): """Return current operation ie. heat, cool, idle.""" if self._mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]: return self._mode elif self._mode == STATE_HEAT_COOL: return STATE_AUTO else: return STATE_UNKNOWN @property def target_temperature(self): """Return the temperature we try to reach.""" if self._mode != STATE_HEAT_COOL and not self.is_away_mode_on: return self._target_temperature else: return None @property def target_temperature_low(self): """Return the lower bound temperature we try to reach.""" if (self.is_away_mode_on or self._mode == STATE_ECO) and \ self._eco_temperature[0]: # eco_temperature is always a low, high tuple return self._eco_temperature[0] if self._mode == STATE_HEAT_COOL: return self._target_temperature[0] else: return None @property def target_temperature_high(self): """Return the upper bound temperature we try to reach.""" if (self.is_away_mode_on or self._mode == STATE_ECO) and \ self._eco_temperature[1]: # eco_temperature is always a low, high tuple return self._eco_temperature[1] if self._mode == STATE_HEAT_COOL: return self._target_temperature[1] else: return None @property def is_away_mode_on(self): """Return if away mode is on.""" return self._away def set_temperature(self, **kwargs): """Set new target temperature.""" target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW) target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH) if self._mode == STATE_HEAT_COOL: if target_temp_low is not None and target_temp_high is not None: temp = (target_temp_low, target_temp_high) else: temp = kwargs.get(ATTR_TEMPERATURE) _LOGGER.debug("Nest set_temperature-output-value=%s", temp) self.device.target = temp def set_operation_mode(self, operation_mode): """Set operation mode.""" if operation_mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]: device_mode = operation_mode elif operation_mode == STATE_AUTO: device_mode = STATE_HEAT_COOL self.device.mode = device_mode @property def operation_list(self): """List of available operation modes.""" return self._operation_list def turn_away_mode_on(self): """Turn away on.""" self.structure.away = True def turn_away_mode_off(self): """Turn away off.""" self.structure.away = False @property def current_fan_mode(self): """Return whether the fan is on.""" if self._has_fan: # Return whether the fan is on return STATE_ON if self._fan else STATE_AUTO else: # No Fan available so disable slider return None @property def fan_list(self): """List of available fan modes.""" return self._fan_list def set_fan_mode(self, fan): """Turn fan on/off.""" self.device.fan = fan.lower() @property def min_temp(self): """Identify min_temp in Nest API or defaults if not available.""" return self._min_temperature @property def max_temp(self): """Identify max_temp in Nest API or defaults if not available.""" return self._max_temperature def update(self): """Cache value from Python-nest.""" self._location = self.device.where self._name = self.device.name self._humidity = self.device.humidity, self._temperature = self.device.temperature self._mode = self.device.mode self._target_temperature = self.device.target self._fan = self.device.fan self._away = self.structure.away == 'away' self._eco_temperature = self.device.eco_temperature self._locked_temperature = self.device.locked_temperature self._min_temperature = self.device.min_temperature self._max_temperature = self.device.max_temperature self._is_locked = self.device.is_locked if self.device.temperature_scale == 'C': self._temperature_scale = TEMP_CELSIUS else: self._temperature_scale = TEMP_FAHRENHEIT
apache-2.0
iohannez/gnuradio
grc/core/blocks/_templates.py
7
2565
# Copyright 2016 Free Software Foundation, Inc. # This file is part of GNU Radio # # GNU Radio Companion is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # GNU Radio Companion is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA """ This dict class holds a (shared) cache of compiled mako templates. These """ from __future__ import absolute_import, print_function from mako.template import Template from mako.exceptions import SyntaxException from ..errors import TemplateError class MakoTemplates(dict): _template_cache = {} def __init__(self, _bind_to=None, *args, **kwargs): self.instance = _bind_to dict.__init__(self, *args, **kwargs) def __get__(self, instance, owner): if instance is None or self.instance is not None: return self copy = self.__class__(_bind_to=instance, **self) if getattr(instance.__class__, 'templates', None) is self: setattr(instance, 'templates', copy) return copy @classmethod def compile(cls, text): text = str(text) try: template = Template(text) except SyntaxException as error: raise TemplateError(text, *error.args) cls._template_cache[text] = template return template def _get_template(self, text): try: return self._template_cache[str(text)] except KeyError: return self.compile(text) def render(self, item): text = self.get(item) if not text: return '' namespace = self.instance.namespace_templates try: if isinstance(text, list): templates = (self._get_template(t) for t in text) return [template.render(**namespace) for template in templates] else: template = self._get_template(text) return template.render(**namespace) except Exception as error: raise TemplateError(error, text)
gpl-3.0
JeroenDeDauw/teg
python/client/listeners.py
2
1213
## $Id: listeners.py,v 1.1 2003/08/18 20:42:59 riq Exp $ ## ## Tenes Empanadas Graciela ## ## Copyright (C) 2000,2003 Ricardo Quesada ## ## Author: Ricardo Calixto Quesada <riq@coresecurity.com> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; only version 2 of the License ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. class Listener( object ): def __init__( self ): self._eventMgr = None def notify( self, ev ): raise NotImplemented def update( self ): raise NotImplemented def setEventMgr( self, eventMgr ): self._eventMgr = eventMgr def post( self, event ): self._eventMgr.post( event )
gpl-3.0
smasala/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
124
5804
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit tests for logutils.py.""" import logging import os import unittest2 as unittest from webkitpy.common.system.logtesting import LogTesting from webkitpy.common.system.logtesting import TestLogStream from webkitpy.common.system import logutils class GetLoggerTest(unittest.TestCase): """Tests get_logger().""" def test_get_logger_in_webkitpy(self): logger = logutils.get_logger(__file__) self.assertEqual(logger.name, "webkitpy.common.system.logutils_unittest") def test_get_logger_not_in_webkitpy(self): # Temporarily change the working directory so that we # can test get_logger() for a path outside of webkitpy. working_directory = os.getcwd() root_dir = "/" os.chdir(root_dir) logger = logutils.get_logger("/Tools/Scripts/test-webkitpy") self.assertEqual(logger.name, "test-webkitpy") logger = logutils.get_logger("/Tools/Scripts/test-webkitpy.py") self.assertEqual(logger.name, "test-webkitpy") os.chdir(working_directory) class ConfigureLoggingTestBase(unittest.TestCase): """Base class for configure_logging() unit tests.""" def _logging_level(self): raise Exception("Not implemented.") def setUp(self): log_stream = TestLogStream(self) # Use a logger other than the root logger or one prefixed with # "webkitpy." so as not to conflict with test-webkitpy logging. logger = logging.getLogger("unittest") # Configure the test logger not to pass messages along to the # root logger. This prevents test messages from being # propagated to loggers used by test-webkitpy logging (e.g. # the root logger). logger.propagate = False logging_level = self._logging_level() self._handlers = logutils.configure_logging(logging_level=logging_level, logger=logger, stream=log_stream) self._log = logger self._log_stream = log_stream def tearDown(self): """Reset logging to its original state. This method ensures that the logging configuration set up for a unit test does not affect logging in other unit tests. """ logger = self._log for handler in self._handlers: logger.removeHandler(handler) def _assert_log_messages(self, messages): """Assert that the logged messages equal the given messages.""" self._log_stream.assertMessages(messages) class ConfigureLoggingTest(ConfigureLoggingTestBase): """Tests configure_logging() with the default logging level.""" def _logging_level(self): return None def test_info_message(self): self._log.info("test message") self._assert_log_messages(["test message\n"]) def test_debug_message(self): self._log.debug("test message") self._assert_log_messages([]) def test_below_threshold_message(self): # We test the boundary case of a logging level equal to 19. # In practice, we will probably only be calling log.debug(), # which corresponds to a logging level of 10. level = logging.INFO - 1 # Equals 19. self._log.log(level, "test message") self._assert_log_messages([]) def test_two_messages(self): self._log.info("message1") self._log.info("message2") self._assert_log_messages(["message1\n", "message2\n"]) class ConfigureLoggingVerboseTest(ConfigureLoggingTestBase): def _logging_level(self): return logging.DEBUG def test_info_message(self): self._log.info("test message") self._assert_log_messages(["unittest: [INFO] test message\n"]) def test_debug_message(self): self._log.debug("test message") self._assert_log_messages(["unittest: [DEBUG] test message\n"]) class ConfigureLoggingCustomLevelTest(ConfigureLoggingTestBase): """Tests configure_logging() with a custom logging level.""" _level = 36 def _logging_level(self): return self._level def test_logged_message(self): self._log.log(self._level, "test message") self._assert_log_messages(["test message\n"]) def test_below_threshold_message(self): self._log.log(self._level - 1, "test message") self._assert_log_messages([])
bsd-3-clause
vwvww/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_stream_hixie75.py
496
2285
#!/usr/bin/env python # # Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for stream module.""" import unittest import set_sys_path # Update sys.path to locate mod_pywebsocket module. from mod_pywebsocket.stream import StreamHixie75 from test.test_msgutil import _create_request_hixie75 class StreamHixie75Test(unittest.TestCase): """A unittest for StreamHixie75 class.""" def test_payload_length(self): for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'), (0x1234, '\x80\xa4\x34')): test_stream = StreamHixie75(_create_request_hixie75(bytes)) self.assertEqual( length, test_stream._read_payload_length_hixie75()) if __name__ == '__main__': unittest.main() # vi:sts=4 sw=4 et
mpl-2.0
erikdejonge/youtube-dl
youtube_dl/extractor/reddit.py
10
4208
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, float_or_none, url_or_none, ) class RedditIE(InfoExtractor): _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)' _TEST = { # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/ 'url': 'https://v.redd.it/zv89llsvexdz', 'md5': '0a070c53eba7ec4534d95a5a1259e253', 'info_dict': { 'id': 'zv89llsvexdz', 'ext': 'mp4', 'title': 'zv89llsvexdz', }, 'params': { 'format': 'bestvideo', }, } def _real_extract(self, url): video_id = self._match_id(url) formats = self._extract_m3u8_formats( 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) formats.extend(self._extract_mpd_formats( 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': video_id, 'formats': formats, } class RedditRIE(InfoExtractor): _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))' _TESTS = [{ 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', 'info_dict': { 'id': 'zv89llsvexdz', 'ext': 'mp4', 'title': 'That small heart attack.', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1501941939, 'upload_date': '20170805', 'uploader': 'Antw87', 'like_count': int, 'dislike_count': int, 'comment_count': int, 'age_limit': 0, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }, { 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj', 'only_matching': True, }, { # imgur 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 'only_matching': True, }, { # imgur @ old reddit 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 'only_matching': True, }, { # streamable 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/', 'only_matching': True, }, { # youtube 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/', 'only_matching': True, }, { # reddit video @ nm reddit 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) url, video_id = mobj.group('url', 'id') video_id = self._match_id(url) data = self._download_json( url + '/.json', video_id)[0]['data']['children'][0]['data'] video_url = data['url'] # Avoid recursing into the same reddit URL if 'reddit.com/' in video_url and '/%s/' % video_id in video_url: raise ExtractorError('No media found', expected=True) over_18 = data.get('over_18') if over_18 is True: age_limit = 18 elif over_18 is False: age_limit = 0 else: age_limit = None return { '_type': 'url_transparent', 'url': video_url, 'title': data.get('title'), 'thumbnail': url_or_none(data.get('thumbnail')), 'timestamp': float_or_none(data.get('created_utc')), 'uploader': data.get('author'), 'like_count': int_or_none(data.get('ups')), 'dislike_count': int_or_none(data.get('downs')), 'comment_count': int_or_none(data.get('num_comments')), 'age_limit': age_limit, }
unlicense
vpodzime/anaconda
pyanaconda/anaconda_argparse.py
6
13905
# # anaconda_argparse.py: option parsing for anaconda (CLI and boot args) # # Copyright (C) 2012 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Authors: # Will Woods <wwoods@redhat.com> # Martin Kolman <mkolman@redhat.com> DESCRIPTION = "Anaconda is the installation program used by Fedora," \ "Red Hat Enterprise Linux and some other distributions." import itertools import os import sys import fcntl import termios import struct from argparse import ArgumentParser, ArgumentError, HelpFormatter, Namespace from pyanaconda.flags import BootArgs import logging log = logging.getLogger("anaconda") # Help text formatting constants LEFT_PADDING = 8 # the help text will start after 8 spaces RIGHT_PADDING = 8 # there will be 8 spaces left on the right DEFAULT_HELP_WIDTH = 80 def get_help_width(): """ Try to detect the terminal window width size and use it to compute optimal help text width. If it can't be detected a default values is returned. :returns: optimal help text width in number of characters :rtype: int """ # don't do terminal size detection on s390, it is not supported # by its arcane TTY system and only results in cryptic error messages # ending on the standard output # (we do the s390 detection here directly to avoid # the delay caused by importing the Blivet module # just for this single call) is_s390 = os.uname()[4].startswith('s390') if is_s390: return DEFAULT_HELP_WIDTH help_width = DEFAULT_HELP_WIDTH try: data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234') columns = int(struct.unpack('hh', data)[1]) # apply the right padding columns = columns - RIGHT_PADDING if columns > 0: help_width = columns # pylint: disable=broad-except except Exception as e: # detection failed, use the default # NOTE: this could be caused by the COLUMNS string having a value # that can't be converted to an integer print("anaconda argparse: terminal size detection failed, using default width") print(e) return help_width class AnacondaArgumentParser(ArgumentParser): """ Subclass of ArgumentParser that also examines boot arguments. """ def __init__(self, *args, **kwargs): """ If the "bootarg_prefix" keyword argument is set, it's assumed that all bootargs will start with that prefix. "require_prefix" is a bool: False: accept the argument with or without the prefix. True: ignore the argument without the prefix. (default) """ help_width = get_help_width() self._boot_arg = dict() self.deprecated_bootargs = [] self.bootarg_prefix = kwargs.pop("bootarg_prefix", "") self.require_prefix = kwargs.pop("require_prefix", True) ArgumentParser.__init__(self, description=DESCRIPTION, formatter_class=lambda prog: HelpFormatter( prog, max_help_position=LEFT_PADDING, width=help_width), *args, **kwargs) def add_argument(self, *args, **kwargs): """ Add a new option - like ArgumentParser.add_argument. The long options will be added to the list of boot args, unless the keyword argument 'bootarg' is set to False. Positional arguments that don't start with '-' are considered extra boot args to look for. NOTE: conflict_handler is currently ignored for boot args - they will always raise ArgumentError if they conflict. """ # TODO: add kwargs to make an option commandline-only or boot-arg-only flags = [a for a in args if a.startswith('-')] bootargs = [a for a in args if not a.startswith('-')] do_bootarg = kwargs.pop("bootarg", True) option = super(AnacondaArgumentParser, self).add_argument(*flags, **kwargs) # make a generator that returns only the long opts without the -- prefix long_opts = (o[2:] for o in option.option_strings if o.startswith("--")) bootargs += (flag for flag in long_opts) if do_bootarg: for b in bootargs: if b in self._boot_arg: raise ArgumentError( "conflicting bootopt string: %s" % b, option) else: self._boot_arg[b] = option return option def _get_bootarg_option(self, arg): """ Find the correct Option for a given bootarg (if one exists) :param string arg: boot option :returns: argparse option object or None if no suitable option is found :rtype argparse option or None """ if self.bootarg_prefix and arg.startswith(self.bootarg_prefix): prefixed_option = True arg = arg[len(self.bootarg_prefix):] else: prefixed_option = False option = self._boot_arg.get(arg) if self.require_prefix and not prefixed_option: return None if option and self.bootarg_prefix and not prefixed_option: self.deprecated_bootargs.append(arg) return option def parse_boot_cmdline(self, boot_cmdline): """ Parse the boot cmdline and create an appropriate Namespace instance according to the option definitions set by add_argument. boot_cmdline can be given as a string (to be parsed by BootArgs), or a dict (or any object with .items()) of {bootarg:value} pairs. If boot_cmdline is None, the boot_cmdline data will be whatever BootArgs reads by default (/proc/cmdline, /run/initramfs/etc/cmdline, /etc/cmdline). If an option requires a value but the boot arg doesn't provide one, we'll quietly not set anything in the Namespace. We also skip any boot options that were not specified by add_argument as we don't care about them (there will usually be quite a lot of them (rd.*, etc.). :param boot_cmdline: the Anaconda boot command line arguments :type boot_cmdline: string, dict or None :returns: an argparse Namespace instance :rtype: Namespace """ namespace = Namespace() if boot_cmdline is None or type(boot_cmdline) is str: bootargs = BootArgs(boot_cmdline) else: bootargs = boot_cmdline self.deprecated_bootargs = [] # go over all options corresponding to current boot cmdline # and do any modifications necessary # NOTE: program cmdline overrides boot cmdline for arg, val in bootargs.items(): option = self._get_bootarg_option(arg) if option is None: # this boot option is unknown to Anaconda, skip it continue if option.nargs != 0 and val is None: # nargs == 0 -> the option expects one or more arguments but the # boot option was not given any, so we skip it log.warning("boot option specified without expected number of " "arguments and will be ignored: %s", arg) continue if option.nargs == 0 and option.const is not None: # nargs == 0 & constr == True -> store_true # (we could also check the class, but it begins with an # underscore, so it would be ugly) # special case: "mpath=0" would otherwise set mpath to True if option.const is True and val in ("0", "no", "off"): setattr(namespace, option.dest, False) # Set all other set_const cases to the const specified else: setattr(namespace, option.dest, option.const) # anaconda considers cases such as noselinux=off to be a negative # concord, which is to say that selinux will be set to False and # we hate you. continue setattr(namespace, option.dest, val) return namespace # pylint: disable=arguments-differ def parse_args(self, args=None, boot_cmdline=None): """ Like ArgumentParser.parse_args(), but also parses the boot cmdline. (see parse_boot_cmdline for details on that process.) Program cmdline arguments will override boot cmdline arguments. :param args: program command line arguments :type args: string or None :param boot_cmdline: the Anaconda boot command line arguments :type boot_cmdline: string, dict or None :returns: an argparse Namespace instance :rtype: Namespace """ # parse boot options first namespace = self.parse_boot_cmdline(boot_cmdline) # parse CLI arguments (if any) and add them to the namespace # created from parsing boot options, overriding any options # with the same destination already present in the namespace # NOTE: this means that CLI options override boot options namespace = ArgumentParser.parse_args(self, args, namespace) return namespace def name_path_pairs(image_specs): """Processes and verifies image file specifications. Generates pairs of names and paths. :param image_specs: a list of image specifications :type image_specs: list of str Each image spec in image_specs has format <path>[:<name>] where <path> is the path to a local file and <name> is an optional name used to identify the disk in UI. <name> may not contain colons or slashes. If no name given in specification, synthesizes name from basename of path. Since two distinct paths may have the same basename, handles name collisions by synthesizing a different name for the colliding name. Raises an exception if: * A path is empty * A path specifies a non-existant file * A path specifies a directory * Duplicate paths are specified * A name contains a "/" """ image_specs = (spec.rsplit(":", 1) for spec in image_specs) path_name_pairs = ((image_spec[0], image_spec[1].strip() if len(image_spec) == 2 else None) for image_spec in image_specs) paths_seen = [] names_seen = [] for (path, name) in path_name_pairs: if path == "": raise ValueError("empty path specified for image file") path = os.path.abspath(path) if not os.path.exists(path): raise ValueError("non-existant path %s specified for image file" % path) if os.path.isdir(path): raise ValueError("directory path %s specified for image file" % path) if path in paths_seen: raise ValueError("path %s specified twice for image file" % path) paths_seen.append(path) if name and "/" in name: raise ValueError("improperly formatted image file name %s, includes slashes" % name) if not name: name = os.path.splitext(os.path.basename(path))[0] if name in names_seen: names = ("%s_%d" % (name, n) for n in itertools.count()) name = next(itertools.dropwhile(lambda n: n in names_seen, names)) names_seen.append(name) yield name, path class HelpTextParser(object): """Class to parse help text from file and make it available to option parser. """ def __init__(self, path): """ Initializer :param path: The absolute path to the help text file """ self._path = path self._help_text = None def read(self, lines): """Reads option, help text pairs from a text file. Each pair is separated from the next by an empty line. The option comes first, followed by any number of lines of help text. :param lines: a sequence of lines of text """ if not lines: return expect_option = True option = None text = [] for line in (line.strip() for line in lines): if line == "": expect_option = True elif expect_option: if option: yield option, " ".join(text) option = line text = [] expect_option = False else: text.append(line) yield option, " ".join(text) def help_text(self, option): """ Returns the help text corresponding to the given command-line option. If no help text is available, returns the empty string. :param str option: The name of the option :rtype: str """ if self._help_text is None: self._help_text = {} try: with open(self._path) as lines: for parsed_option, parsed_text in self.read(lines): self._help_text[parsed_option] = parsed_text except Exception: # pylint: disable=broad-except log.error("error reading help text file %s", self._path) return self._help_text.get(option, "")
gpl-2.0
neumerance/cloudloon2
.venv/lib/python2.7/site-packages/pygments/style.py
270
3743
# -*- coding: utf-8 -*- """ pygments.style ~~~~~~~~~~~~~~ Basic style object. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.token import Token, STANDARD_TYPES class StyleMeta(type): def __new__(mcs, name, bases, dct): obj = type.__new__(mcs, name, bases, dct) for token in STANDARD_TYPES: if token not in obj.styles: obj.styles[token] = '' def colorformat(text): if text[0:1] == '#': col = text[1:] if len(col) == 6: return col elif len(col) == 3: return col[0]*2 + col[1]*2 + col[2]*2 elif text == '': return '' assert False, "wrong color format %r" % text _styles = obj._styles = {} for ttype in obj.styles: for token in ttype.split(): if token in _styles: continue ndef = _styles.get(token.parent, None) styledefs = obj.styles.get(token, '').split() if not ndef or token is None: ndef = ['', 0, 0, 0, '', '', 0, 0, 0] elif 'noinherit' in styledefs and token is not Token: ndef = _styles[Token][:] else: ndef = ndef[:] _styles[token] = ndef for styledef in obj.styles.get(token, '').split(): if styledef == 'noinherit': pass elif styledef == 'bold': ndef[1] = 1 elif styledef == 'nobold': ndef[1] = 0 elif styledef == 'italic': ndef[2] = 1 elif styledef == 'noitalic': ndef[2] = 0 elif styledef == 'underline': ndef[3] = 1 elif styledef == 'nounderline': ndef[3] = 0 elif styledef[:3] == 'bg:': ndef[4] = colorformat(styledef[3:]) elif styledef[:7] == 'border:': ndef[5] = colorformat(styledef[7:]) elif styledef == 'roman': ndef[6] = 1 elif styledef == 'sans': ndef[7] = 1 elif styledef == 'mono': ndef[8] = 1 else: ndef[0] = colorformat(styledef) return obj def style_for_token(cls, token): t = cls._styles[token] return { 'color': t[0] or None, 'bold': bool(t[1]), 'italic': bool(t[2]), 'underline': bool(t[3]), 'bgcolor': t[4] or None, 'border': t[5] or None, 'roman': bool(t[6]) or None, 'sans': bool(t[7]) or None, 'mono': bool(t[8]) or None, } def list_styles(cls): return list(cls) def styles_token(cls, ttype): return ttype in cls._styles def __iter__(cls): for token in cls._styles: yield token, cls.style_for_token(token) def __len__(cls): return len(cls._styles) class Style(object): __metaclass__ = StyleMeta #: overall background color (``None`` means transparent) background_color = '#ffffff' #: highlight background color highlight_color = '#ffffcc' #: Style definitions for individual token types. styles = {}
apache-2.0
zscproject/OWASP-ZSC
lib/encoder/linux_x86/xor_random.py
4
34594
#!/usr/bin/env python ''' OWASP ZSC https://www.owasp.org/index.php/OWASP_ZSC_Tool_Project https://github.com/zscproject/OWASP-ZSC http://api.z3r0d4y.com/ https://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ] ''' import random, binascii, string from core.compatible import version _version = version() chars = string.digits + string.ascii_letters def start(shellcode, job): if 'chmod' == job: t = True eax = str('0x0f') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '00' not in str(eax_2): t = False eax = 'push $%s' % (str(eax)) eax_xor = 'push $0x%s\npop %%eax\npush $0x%s\npop %%ebx\nxor %%eax,%%ebx\npush %%ebx\n' % ( eax_1, eax_2) shellcode = shellcode.replace(eax, eax_xor) ecx = str(shellcode.rsplit('\n')[8]) ecx_value = str(shellcode.rsplit('\n')[8].rsplit()[1][1:]) t = True while t: if _version is 2: ecx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ecx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ecx_2 = "%x" % (int(ecx_value, 16) ^ int(ecx_1, 16)) if '00' not in str(ecx_1) and '00' not in str(ecx_2) and len( ecx_1) >= 7 and len(ecx_2) >= 7: t = False ecx_xor = 'push $0x%s\npop %%ebx\npush $0x%s\npop %%ecx\nxor %%ecx,%%ebx\npush %%ebx\n_z3r0d4y_\n' % ( str(ecx_1), str(ecx_2)) shellcode = shellcode.replace(ecx, ecx_xor) n = 0 start = '' middle = '' end = '' xor = 0 for l in shellcode.rsplit('\n'): n += 1 if xor is 0: if '_z3r0d4y_' not in l: start += l + '\n' else: xor = 1 if xor is 1: if '_z3r0d4y_' not in l: if '%esp,%ebx' not in l: middle += l + '\n' else: xor = 2 if xor is 2: end += l + '\n' for l in middle.rsplit('\n'): t = True while t: if 'push $0x' in l: ebx = l.rsplit()[1][1:] if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(ebx[2:], 16) ^ int(ebx_1, 16)) if '00' not in str(ebx_1) and '00' not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%edx\nxor %%ebx,%%edx\npush %%edx\n' % ( str(ebx_1), str(ebx_2)) middle = middle.replace(l, command) t = False else: t = False shellcode = start + middle + end elif 'dir_create' == job: shellcode = 'xor %edx,%edx\n' + shellcode.replace( 'push $0xb\npop %eax\ncltd', '').replace('push %ebx\nmov %esp,%ecx', 'push %ebx\nmov %esp,%ecx' + '\n' + 'push $0xb\npop %eax\ncltd') t = True eax = str('0xb') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '00' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0xb\npop %eax\ncltd', eax_xor + '\ncltd\n') for line in shellcode.rsplit('\n'): if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n' % ( str(ebx_1), str(ebx_2)) shellcode = shellcode.replace(line, command) t = False elif 'download_execute' == job: shellcode = 'xor %edx,%edx\n' + shellcode.replace( 'push $0xb\npop %eax\ncltd', '').replace('push %ebx\nmov %esp,%ecx', 'push %ebx\nmov %esp,%ecx' + '\n' + 'push $0xb\npop %eax\ncltd') t = True eax = str('0xb') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '00' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0xb\npop %eax\ncltd', eax_xor + '\ncltd\n') for line in shellcode.rsplit('\n'): if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n' % ( str(ebx_1), str(ebx_2)) shellcode = shellcode.replace(line, command) t = False elif 'download' == job: shellcode = 'xor %edx,%edx\n' + shellcode.replace( 'push $0xb\npop %eax\ncltd', '').replace('push %ebx\nmov %esp,%ecx', 'push %ebx\nmov %esp,%ecx' + '\n' + 'push $0xb\npop %eax\ncltd') t = True eax = str('0xb') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '00' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0xb\npop %eax\ncltd', eax_xor + '\ncltd\n') for line in shellcode.rsplit('\n'): if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n' % ( str(ebx_1), str(ebx_2)) shellcode = shellcode.replace(line, command) t = False elif 'exec' == job: t = True eax = str('0x46') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '00' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('mov $0x46,%al', eax_xor) for line in shellcode.rsplit('\n'): if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2.replace( '-', '')) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n' % ( str(ebx_1), str(ebx_2)) shellcode = shellcode.replace(line, command) t = False elif 'file_create' == job: shellcode = 'xor %edx,%edx\n' + shellcode.replace( 'push $0xb\npop %eax\ncltd', '').replace('push %ebx\nmov %esp,%ecx', 'push %ebx\nmov %esp,%ecx' + '\n' + 'push $0xb\npop %eax\ncltd') t = True eax = str('0xb') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '00' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0xb\npop %eax\ncltd', eax_xor + '\ncltd\n') for line in shellcode.rsplit('\n'): if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n' % ( str(ebx_1), str(ebx_2)) shellcode = shellcode.replace(line, command) t = False elif 'script_executor' == job: shellcode = 'xor %edx,%edx\n' + shellcode.replace( 'push $0xb\npop %eax\ncltd', '').replace('push %ebx\nmov %esp,%ecx', 'push %ebx\nmov %esp,%ecx' + '\n' + 'push $0xb\npop %eax\ncltd') t = True eax = str('0xb') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '0' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0xb\npop %eax\ncltd', eax_xor + '\ncltd\n') for line in shellcode.rsplit('\n'): if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n' % ( str(ebx_1), str(ebx_2)) shellcode = shellcode.replace(line, command) t = False elif 'system' == job: shellcode = 'xor %edx,%edx\n' + shellcode.replace( 'push $0xb\npop %eax\ncltd', '').replace('push %ebx\nmov %esp,%ecx', 'push %ebx\nmov %esp,%ecx' + '\n' + 'push $0xb\npop %eax\ncltd') t = True eax = str('0xb') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax > eax_1: if '00' not in str(eax_1) and '0' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0xb\npop %eax\ncltd', eax_xor + '\ncltd\n') for line in shellcode.rsplit('\n'): if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n' % ( str(ebx_1), str(ebx_2)) shellcode = shellcode.replace(line, command) t = False elif 'write' == job: t = True eax = str('0x5') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax_1 != eax: if eax > eax_1: if '00' not in str(eax_1) and '0' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0x5\npop %eax', eax_xor) t = True eax = str('0x4') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(1))).encode('latin-1')) ).decode('latin-1') eax_1 = str('0') + str(eax_1[1]) eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax_1 != eax: if eax > eax_1: if str('00') not in str(eax_1) and str('0') not in str( eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0x4\npop %eax', eax_xor) A = 0 for line in shellcode.rsplit('\n'): if 'mov %esp,%ebx' in line: A = 1 shellcode = shellcode.replace( line, '\nmov %esp,%ebx\n_z3r0d4y_\n') if A is 0: if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice( chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if str('00') not in str(ebx_1) and str('00') not in str( ebx_2) and len(ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1 and ebx_1 != data: if '-' in ebx_2: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ebx\nneg %%ebx\nxor $0x%s,%%ebx\npush %%ebx\n' % ( str(ebx_2), str(ebx_1)) shellcode = shellcode.replace(line, command) t = False if t is True: command = '\npush $0x%s\npop %%ebx\nxor $0x%s,%%ebx\npush %%ebx\n' % ( str(ebx_2), str(ebx_1)) shellcode = shellcode.replace(line, command) t = False shellcode = shellcode.replace('_z3r0d4y_', '') t = True eax = str('4014141') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if eax_1 != eax: if eax > eax_1: if '00' not in str(eax_1) and '00' not in str(eax_2): t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%ecx\nneg %%ecx\nxor $0x%s,%%ecx\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%ecx\nxor $0x%s,%%ecx\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0x4014141\npop %ecx', eax_xor + '\n_z3r0d4y_\n').replace( 'mov %esp,%ecx', '\n_z3r0|d4y_\nmov %esp,%ecx\n') A = 1 for line in shellcode.rsplit('\n'): if '_z3r0d4y_' in line: A = 0 if '_z3r0|d4y_' in line: A = 2 if A is 0: if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice( chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if ebx_1 != data and str('00') not in str( ebx_1) and str('00') not in str(ebx_2) and len( ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: if '-' in ebx_2: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%ecx\nneg %%ecx\nxor $0x%s,%%ecx\npush %%ecx\n' % ( str(ebx_2), str(ebx_1)) shellcode = shellcode.replace(line, command) t = False if '-' not in ebx_2 and t is True: command = '\npush $0x%s\npop %%ecx\nxor $0x%s,%%ecx\npush %%ecx\n' % ( str(ebx_2), str(ebx_1)) shellcode = shellcode.replace(line, command) t = False if A is 2: if 'push' in line and '$0x' in line and ',' not in line and len( line) > 14: data = line.rsplit('push')[1].rsplit('$0x')[1] t = True while t: if _version is 2: ebx_1 = binascii.b2a_hex(''.join(random.choice( chars) for i in range(4))) if _version is 3: ebx_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16)) if ebx_1 != data and str('00') not in str( ebx_1) and str('00') not in str(ebx_2) and len( ebx_2) >= 7 and len( ebx_1) >= 7 and '-' not in ebx_1: if '-' in ebx_2: ebx_2 = ebx_2.replace('-', '') command = '\npush $0x%s\npop %%edx\nneg %%edx\nxor $0x%s,%%edx\npush %%edx\n' % ( str(ebx_2), str(ebx_1)) shellcode = shellcode.replace(line, command) t = False if '-' not in ebx_2 and t is True: command = '\npush $0x%s\npop %%edx\nxor $0x%s,%%edx\npush %%edx\n' % ( str(ebx_2), str(ebx_1)) shellcode = shellcode.replace(line, command) t = False shellcode = shellcode.replace('_z3r0d4y_', '').replace('_z3r0|d4y_', '') t = True eax = str('0b909090') while t: if _version is 2: eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4))) if _version is 3: eax_1 = (binascii.b2a_hex((''.join(random.choice( chars) for i in range(4))).encode('latin-1')) ).decode('latin-1') eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16)) if '00' not in str(eax_1) and '00' not in str( eax_2) and eax_1 != eax: t = False A = 0 eax = 'push $%s' % (str(eax)) if '-' in eax_2: A = 1 eax_2 = eax_2.replace('-', '') eax_xor = 'push $0x%s\npop %%edx\nneg %%edx\nxor $0x%s,%%edx\n' % ( eax_2, eax_1) if A is 0: eax_xor = 'push $0x%s\npop %%edx\nxor $0x%s,%%edx\n' % (eax_2, eax_1) shellcode = shellcode.replace('push $0x0b909090\n\npop %edx\n', eax_xor) return shellcode
gpl-3.0
mitschabaude/nanopores
nanopores/models/pughpoints.py
1
6275
# (c) 2016 Gregor Mitscha-Baude import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from nanopores.geometries.pughpore import params as pugh_params from nanopores import Params def grid_piecewise1D(nodes, h, N=100, ep=None): # compute number of grid points in each section # N = 1/scaling * (length1 / h1 + length2 / h2 + ...) lengths = np.diff(np.array(nodes)) h = np.array(h) n = lengths/h n = np.round(n*N/sum(n)) # compute each grid intervals = zip(nodes[:-1], nodes[1:]) k = len(lengths) grids = [] # ep = endpoint preference = 0 or 1 if ep is None: ep = [0]*(k-1) for i in range(k): a, b = intervals[i] grid = list(np.linspace(a, b, n[i]+1)[1:-1]) #print i #print grid if i == 0 or ep[i-1] == 1: grid.insert(0, a) if i == k-1 or ep[i] == 0: grid.append(b) #print grid grids.append(grid) #print n return grids def lround(x, nd): if hasattr(x, "__iter__"): return [lround(t, nd) for t in x] else: return round(x, nd) def tensor(xy, z, r): tensorgrid = [] for i in range(len(z)): # scale xy by radius ri = r[i] xyz = [(ri*xj, ri*yj, zi) for zi in z[i] for xj, yj in xy] tensorgrid.extend(xyz) return lround(tensorgrid, 3) def plot_1Dgrid(z, grids): totalgrid = list(set(reduce(lambda a, b: a+b, grids)) - set(z)) fig = plt.figure("line") fig.set_size_inches(8, 1) plt.axhline(y=0, color="black", zorder=-10) plt.scatter(totalgrid, [0.]*len(totalgrid), color="black") plt.scatter(z, [0.]*len(z), color="red") plt.xlim(z[0]-1, z[-1]+1) plt.axis('off') def neg(x): return [-t for t in x] def plot_2Dgrid(xy): xx = [xi for (xi, yi) in xy] yy = [yi for (xi, yi) in xy] fig = plt.figure("triangle") fig.set_size_inches(4, 4) plt.scatter(xx, yy, color="red") plt.scatter(yy, xx, color="green") plt.scatter(xx + yy, neg(yy + xx), color="green") plt.scatter(neg(xx + yy + xx + yy), yy + xx + neg(yy + xx), color="green") plt.plot([0, 1], [0, 1], "-k") plt.plot([0, 1], [0, 0], "-k") plt.xlim(-1, 1) plt.ylim(-1, 1) def plot_xz_grid(xyz): # project to x-z plane xy = list(set([(x, z) for x, y, z in xyz])) xx = [xi for (xi, yi) in xy] yy = [yi for (xi, yi) in xy] fig = plt.figure("porexz") fig.set_size_inches(4, 4) plt.scatter(neg(xx) + xx, yy + yy) def plot_polygon(ax, polygon, **settings): settings = dict(dict(closed=True, facecolor="#eeeeee", linewidth=1., edgecolor="black"), **settings) polygon = np.array(polygon) polygon_m = np.column_stack([-polygon[:,0], polygon[:,1]]) patch = patches.Polygon(polygon, **settings) patchm = patches.Polygon(polygon_m, **settings) #patch.set_zorder(10) #patchm.set_zorder(10) ax.add_patch(patch) ax.add_patch(patchm) # will result in roughly nz * nr*(nr+1)/2 points def tensorgrid(nz=30, nr=5, plot=False, eps=5e-2, eps2=1e-1, buf=7., **params): params = Params(pugh_params) | Params(params) r = params.rMolecule r = r + eps # ---- create z part of tensor grid ----- ztop = params.hpore/2. zbot = -ztop # 6 nodes => 5 sections z = [zbot - buf, zbot - r, ztop - params.h2 + r, ztop - params.h1 + r, ztop + r, ztop + buf] # relative meshwidths, radii hz = np.array([1., 1., .5, 1., 1.]) rpore = [params.l0/2. - r, params.l3/2. - r, params.l2/2. - r, params.l1/2. - r, params.l0/2. - r] # to which of the two intervals the shared endpoint belongs ep = [0, 1, 1, 1] grids = grid_piecewise1D(z, hz, N=nz, ep=ep) # ---- create xy (triangle) part of tensor grid ----- # points in the unit triangle x = np.linspace(eps2, 1-eps, nr) y = np.linspace(eps, 1-eps2, nr) xy = [(xi, yi) for xi in x for yi in y if xi > yi] # ---- tensor product xyz = tensor(xy, grids, rpore) if plot: print "Created %d points in z direction." % (sum(len(g) for g in grids),) print "Created %d points in xy direction." % (len(xy),) print "Total number of points:", len(xyz) #plot_1Dgrid(z, grids) plot_2Dgrid(xy) plot_xz_grid(xyz) plt.ylim(-params.H*0.5, params.H*0.5) ax = plt.gca() from nanopores.models.pughpore import polygon plot_polygon(ax, polygon()) return xyz if __name__ == "__main__": from nanopores.models.pughpore import tensorgrid as tg xyz = tg(nz=30, nr=4, plot=True) plt.show() #........................R............................. # . # . # .........l0.......... . # . . . # ._ _______________ _............... . # |D| |D| . . . . # |D|......l1.......|D| h1 . . . # |D|_ ____l2_____ _|D|...... h2 . . # |DDD|_ _______ _|DDD|.......... . . # |DDDDD| |DDDDD| . . # |DDDDD| |DDDDD| . . # DNA--->|DDDDD| |DDDDD| hpore . # |DDDDD| |DDDDD| . . # |DDDDD|..l3...|DDDDD| . . # MEMBRANE |DDDDD| |DDDDD| . H # | |DDDDD| |DDDDD| . . # | |DDDDD| |DDDDD|....h4 . . #______V_________|DDD| |DDD|_____.________ .___ ....... #MMMMMMMMMMMMMMMM|DDD| |DDD|MMMMM.MMMMMMMMM.MMMM. hmem #MMMMMMMMMMMMMMMM|DDD|_______|DDD|MMMMM.MMMMMMMMM.MMMM....... # . . . # .......l4........ . # . # . # . #......................................................
mit
RedHatInsights/insights-core
insights/parsers/neutron_metadata_agent_log.py
1
1977
""" NeutronMetadataAgentLog - file ``/var/log/neutron/metadata-agent.log`` ====================================================================== """ from .. import LogFileOutput, parser from insights.specs import Specs @parser(Specs.neutron_metadata_agent_log) class NeutronMetadataAgentLog(LogFileOutput): """ Parse the ``/var/log/neutron/metadata-agent.log`` file. .. note:: Please refer to its super-class :class:`insights.core.LogFileOutput` for more details. Sample log lines:: 2018-06-08 17:29:55.894 11770 WARNING neutron.agent.metadata.agent [-] Server does not support metadata RPC, fallback to using neutron client 2018-06-08 17:29:55.907 11770 ERROR neutron.agent.metadata.agent [-] Unexpected error 2018-06-08 17:29:56.126 11770 TRACE neutron.agent.metadata.agent Traceback (most recent call last): 2018-06-08 17:29:56.126 11770 TRACE neutron.agent.metadata.agent File "/usr/lib/python2.7/site-packages/neutron/agent/metadata/agent.py", line 109, in __call__ 2018-06-08 17:29:56.126 11770 TRACE neutron.agent.metadata.agent self._authenticate_keystone() 2018-06-08 17:29:56.126 11770 TRACE neutron.agent.metadata.agent File "/usr/lib/python2.7/site-packages/neutronclient/client.py", line 218, in _authenticate_keystone 2018-06-08 17:29:56.126 11770 TRACE neutron.agent.metadata.agent raise exceptions.Unauthorized(message=resp_body) 2018-06-08 17:29:56.126 11770 TRACE neutron.agent.metadata.agent Unauthorized: {"error": {"message": "The resource could not be found.", "code": 404, "title": "Not Found"}} Examples: >>> len(metadata_agent_log.get("Server does not support metadata RPC, fallback to using neutron client")) == 1 True >>> from datetime import datetime >>> len(list(metadata_agent_log.get_after(datetime(2018, 6, 8, 17, 29, 56)))) 6 """ pass
apache-2.0
aidanlister/django
django/conf/locale/sv/formats.py
504
1569
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'Y-m-d' SHORT_DATETIME_FORMAT = 'Y-m-d H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # Kept ISO formats as they are in first position DATE_INPUT_FORMATS = [ '%Y-%m-%d', # '2006-10-25' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y', # '10/25/06' ] DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%m/%d/%y %H:%M', # '10/25/06 14:30' '%m/%d/%y', # '10/25/06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
bsd-3-clause