repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
AnhellO/DAS_Sistemas
Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/contrib/gis/utils/ogrinfo.py
131
1934
""" This module includes some utility functions for inspecting the layout of a GDAL data source -- the functionality is analogous to the output produced by the `ogrinfo` utility. """ from django.contrib.gis.gdal import DataSource from django.contrib.gis.gdal.geometries import GEO_CLASSES def ogrinfo(data_source, num_features=10): """ Walk the available layers in the supplied `data_source`, displaying the fields for the first `num_features` features. """ # Checking the parameters. if isinstance(data_source, str): data_source = DataSource(data_source) elif isinstance(data_source, DataSource): pass else: raise Exception('Data source parameter must be a string or a DataSource object.') for i, layer in enumerate(data_source): print("data source : %s" % data_source.name) print("==== layer %s" % i) print(" shape type: %s" % GEO_CLASSES[layer.geom_type.num].__name__) print(" # features: %s" % len(layer)) print(" srs: %s" % layer.srs) extent_tup = layer.extent.tuple print(" extent: %s - %s" % (extent_tup[0:2], extent_tup[2:4])) print("Displaying the first %s features ====" % num_features) width = max(*map(len, layer.fields)) fmt = " %%%ss: %%s" % width for j, feature in enumerate(layer[:num_features]): print("=== Feature %s" % j) for fld_name in layer.fields: type_name = feature[fld_name].type_name output = fmt % (fld_name, type_name) val = feature.get(fld_name) if val: if isinstance(val, str): val_fmt = ' ("%s")' else: val_fmt = ' (%s)' output += val_fmt % val else: output += ' (None)' print(output)
mit
joel-wright/DDRPi
plugins/gavanna_plugin.py
1
3204
__authors__ = ['Andrew Taylor'] import logging import pygame import random import time from datetime import datetime from DDRPi import DDRPiPlugin class GavannaPlugin(DDRPiPlugin): pulse_rate = 2000 pulse_increasing = 1 pulse_last_ratio = 0 def post_invalidate(self): self.changed = 1 def configure(self, config, image_surface): """ This is an example of an end user module - need to make sure we can get the main image surface and config to write to them both... """ self.ddrpi_config = config self.ddrpi_surface = image_surface self.clock = pygame.time.Clock() def __name__(self): return 'Text Plugin' def start(self): """ Start writing to the surface """ # Setup recurring events return None def stop(self): """ Stop writing to the surface and clean up """ # Stop recurring events return None def pause(self): return None def resume(self): self.post_invalidate() return None def handle(self, event): """ Handle the pygame event sent to the plugin from the main loop """ return None def draw_heart(self, colour, x_pos, y_pos, fill): w = self.ddrpi_surface.width h = self.ddrpi_surface.height heart = (0x06, 0x09, 0x11, 0x22, 0x11, 0x09, 0x06); if (fill > 0): heart = (0x06, 0x0F, 0x1F, 0x3E, 0x1F, 0x0F, 0x06); heart_height = 6 heart_width = len(heart) for x in range(0, heart_width): for y in range(0, heart_height): pixel_value = (heart[x] >> y) & 0x01 if (pixel_value == 1): self.ddrpi_surface.draw_tuple_pixel(x+x_pos,y+y_pos, colour) return None def update_surface(self): """ Write the updated plugin state to the dance surface and blit """ w = self.ddrpi_surface.width h = self.ddrpi_surface.height for x in range(0,w): for y in range(0,h): self.ddrpi_surface.draw_tuple_pixel(x,y, (0,0,0)) self.ddrpi_surface.draw_text("Gav", (0xFF,0xFF,0xFF), 3, 0) self.ddrpi_surface.draw_text("Anna", (0xFF,0xFF,0xFF), 0, 11) # Calculate the red value for the heart's centre ratio = int(255.0 * (float(pygame.time.get_ticks() % self.pulse_rate) / float(self.pulse_rate))) # Increase then decrease the value self.pulse_increasing = 1 pulse_mod = pygame.time.get_ticks() % (2*self.pulse_rate) # Calculate which if (pygame.time.get_ticks() % (2*self.pulse_rate) > self.pulse_rate): self.pulse_increasing = -1 # Work out the red value red_value = ratio if (self.pulse_increasing == -1): red_value = 255 - ratio # Draw the fading heart... self.draw_heart((red_value, 0x00, 0x00), w/2 -4, h/2 - 2, 1) # .. and a solid outline self.draw_heart((0xFF, 0x00, 0x00), w/2 -4, h/2 - 2, 0) # Limit the frame rate self.ddrpi_surface.blit() # Rate limit it self.clock.tick(25) def display_preview(self): """ Construct a splash screen suitable to display for a plugin selection menu """ w = self.ddrpi_surface.width h = self.ddrpi_surface.height # Background is black for x in range(0,w): for y in range(0,h): self.ddrpi_surface.draw_tuple_pixel(x,y, (0,0,0)) # Draw a solid red heart in the middle (ish) self.draw_heart((0xFF, 0x00, 0x00), w/2 -4, h/2 - 2, 1) self.ddrpi_surface.blit()
mit
powellc/hacklabs
setup.py
1
2283
from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand import sys version = __import__('hacklabs').__version__ install_requires = [ 'setuptools', 'Django==1.6.5', 'django-configurations==0.8', 'dj-database-url==0.3.0', 'pylibmc==1.3.0', 'boto==2.9.5', 'South==1.0.0', 'django-storages==1.1.8', 'Pillow==2.5.1', 'django-cache-url==0.8.0', 'werkzeug==0.9.4', 'gunicorn==0.17.4', 'easy-thumbnails==1.2', 'django-debug-toolbar==1.1', 'django-extensions==1.3.4', 'django-braces==1.4.0', 'django-allauth==0.16.1', 'django-floppyforms==1.1.1', 'django-custom-user==0.4', 'raven==5.0.0', 'boto==2.9.5', 'django-storages==1.1.8', 'psycopg2==2.5', 'Markdown>2.2.0', 'django-sekizai>=0.7', 'django-mptt==0.6.0', 'django-bootstrap-form==3.1', ] class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): #import here, cause outside the eggs aren't loaded import tox errno = tox.cmdline(self.test_args) sys.exit(errno) setup( name="hacklabs", version=version, url='http://github.com/powellc/hacklabs', license='BSD', platforms=['OS Independent'], description="An hacklabs for django applications.", author="Colin Powell", author_email='colin.powell@gmail.com', packages=find_packages(), install_requires=install_requires, include_package_data=True, zip_safe=False, tests_require=['tox'], cmdclass={'test': Tox}, classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], package_dir={ 'hacklabs': 'hacklabs', 'hacklabs/templates': 'hacklabs/templates', }, entry_points={ 'console_scripts': [ 'hacklabs = hacklabs.manage_hacklabs:main', ], }, )
bsd-3-clause
mapennell/ansible
v1/ansible/runner/shell_plugins/sh.py
82
5847
# (c) 2014, Chris Church <chris@ninemoreminutes.com> # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os import re import pipes import ansible.constants as C _USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$') class ShellModule(object): # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\n' def env_prefix(self, **kwargs): '''Build command prefix with environment variables.''' env = dict( LANG = C.DEFAULT_MODULE_LANG, LC_CTYPE = C.DEFAULT_MODULE_LANG, LC_MESSAGES = C.DEFAULT_MODULE_LANG, ) env.update(kwargs) return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()]) def join_path(self, *args): return os.path.join(*args) def path_has_trailing_slash(self, path): return path.endswith('/') def chmod(self, mode, path): path = pipes.quote(path) return 'chmod %s %s' % (mode, path) def remove(self, path, recurse=False): path = pipes.quote(path) if recurse: return "rm -rf %s >/dev/null 2>&1" % path else: return "rm -f %s >/dev/null 2>&1" % path def mkdtemp(self, basefile=None, system=False, mode=None): if not basefile: basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile) if system and basetmp.startswith('$HOME'): basetmp = self.join_path('/tmp', basefile) cmd = 'mkdir -p %s' % basetmp if mode: cmd += ' && chmod %s %s' % (mode, basetmp) cmd += ' && echo %s' % basetmp return cmd def expand_user(self, user_home_path): ''' Return a command to expand tildes in a path It can be either "~" or "~username". We use the POSIX definition of a username: http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426 http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276 ''' # Check that the user_path to expand is safe if user_home_path != '~': if not _USER_HOME_PATH_RE.match(user_home_path): # pipes.quote will make the shell return the string verbatim user_home_path = pipes.quote(user_home_path) return 'echo %s' % user_home_path def checksum(self, path, python_interp): # The following test needs to be SH-compliant. BASH-isms will # not work if /bin/sh points to a non-BASH shell. # # In the following test, each condition is a check and logical # comparison (|| or &&) that sets the rc value. Every check is run so # the last check in the series to fail will be the rc that is # returned. # # If a check fails we error before invoking the hash functions because # hash functions may successfully take the hash of a directory on BSDs # (UFS filesystem?) which is not what the rest of the ansible code # expects # # If all of the available hashing methods fail we fail with an rc of # 0. This logic is added to the end of the cmd at the bottom of this # function. # Return codes: # checksum: success! # 0: Unknown error # 1: Remote file does not exist # 2: No read permissions on the file # 3: File is a directory # 4: No python interpreter # Quoting gets complex here. We're writing a python string that's # used by a variety of shells on the remote host to invoke a python # "one-liner". shell_escaped_path = pipes.quote(path) test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp) csums = [ "({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3) "({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4 ] cmd = " || ".join(csums) cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path) return cmd def build_module_command(self, env_string, shebang, cmd, rm_tmp=None): cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd] new_cmd = " ".join(cmd_parts) if rm_tmp: new_cmd = '%s; rm -rf %s >/dev/null 2>&1' % (new_cmd, rm_tmp) return new_cmd
gpl-3.0
nparley/mylatitude
lib/google/auth/_default.py
3
11668
# Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Application default credentials. Implements application default credentials and project ID detection. """ import io import json import logging import os import warnings import six from google.auth import environment_vars from google.auth import exceptions import google.auth.transport._http_client _LOGGER = logging.getLogger(__name__) # Valid types accepted for file-based credentials. _AUTHORIZED_USER_TYPE = 'authorized_user' _SERVICE_ACCOUNT_TYPE = 'service_account' _VALID_TYPES = (_AUTHORIZED_USER_TYPE, _SERVICE_ACCOUNT_TYPE) # Help message when no credentials can be found. _HELP_MESSAGE = """\ Could not automatically determine credentials. Please set {env} or \ explicitly create credentials and re-run the application. For more \ information, please see \ https://developers.google.com/accounts/docs/application-default-credentials. """.format(env=environment_vars.CREDENTIALS).strip() # Warning when using Cloud SDK user credentials _CLOUD_SDK_CREDENTIALS_WARNING = """\ Your application has authenticated using end user credentials from Google \ Cloud SDK. We recommend that most server applications use service accounts \ instead. If your application continues to use end user credentials from Cloud \ SDK, you might receive a "quota exceeded" or "API not enabled" error. For \ more information about service accounts, see \ https://cloud.google.com/docs/authentication/.""" def _warn_about_problematic_credentials(credentials): """Determines if the credentials are problematic. Credentials from the Cloud SDK that are associated with Cloud SDK's project are problematic because they may not have APIs enabled and have limited quota. If this is the case, warn about it. """ from google.auth import _cloud_sdk if credentials.client_id == _cloud_sdk.CLOUD_SDK_CLIENT_ID: warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING) def _load_credentials_from_file(filename): """Loads credentials from a file. The credentials file must be a service account key or stored authorized user credentials. Args: filename (str): The full path to the credentials file. Returns: Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded credentials and the project ID. Authorized user credentials do not have the project ID information. Raises: google.auth.exceptions.DefaultCredentialsError: if the file is in the wrong format or is missing. """ if not os.path.exists(filename): raise exceptions.DefaultCredentialsError( 'File {} was not found.'.format(filename)) with io.open(filename, 'r') as file_obj: try: info = json.load(file_obj) except ValueError as caught_exc: new_exc = exceptions.DefaultCredentialsError( 'File {} is not a valid json file.'.format(filename), caught_exc) six.raise_from(new_exc, caught_exc) # The type key should indicate that the file is either a service account # credentials file or an authorized user credentials file. credential_type = info.get('type') if credential_type == _AUTHORIZED_USER_TYPE: from google.auth import _cloud_sdk try: credentials = _cloud_sdk.load_authorized_user_credentials(info) except ValueError as caught_exc: msg = 'Failed to load authorized user credentials from {}'.format( filename) new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) six.raise_from(new_exc, caught_exc) # Authorized user credentials do not contain the project ID. _warn_about_problematic_credentials(credentials) return credentials, None elif credential_type == _SERVICE_ACCOUNT_TYPE: from google.oauth2 import service_account try: credentials = ( service_account.Credentials.from_service_account_info(info)) except ValueError as caught_exc: msg = 'Failed to load service account credentials from {}'.format( filename) new_exc = exceptions.DefaultCredentialsError(msg, caught_exc) six.raise_from(new_exc, caught_exc) return credentials, info.get('project_id') else: raise exceptions.DefaultCredentialsError( 'The file {file} does not have a valid type. ' 'Type is {type}, expected one of {valid_types}.'.format( file=filename, type=credential_type, valid_types=_VALID_TYPES)) def _get_gcloud_sdk_credentials(): """Gets the credentials and project ID from the Cloud SDK.""" from google.auth import _cloud_sdk # Check if application default credentials exist. credentials_filename = ( _cloud_sdk.get_application_default_credentials_path()) if not os.path.isfile(credentials_filename): return None, None credentials, project_id = _load_credentials_from_file( credentials_filename) if not project_id: project_id = _cloud_sdk.get_project_id() return credentials, project_id def _get_explicit_environ_credentials(): """Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment variable.""" explicit_file = os.environ.get(environment_vars.CREDENTIALS) if explicit_file is not None: credentials, project_id = _load_credentials_from_file( os.environ[environment_vars.CREDENTIALS]) return credentials, project_id else: return None, None def _get_gae_credentials(): """Gets Google App Engine App Identity credentials and project ID.""" from google.auth import app_engine try: credentials = app_engine.Credentials() project_id = app_engine.get_project_id() return credentials, project_id except EnvironmentError: return None, None def _get_gce_credentials(request=None): """Gets credentials and project ID from the GCE Metadata Service.""" # Ping requires a transport, but we want application default credentials # to require no arguments. So, we'll use the _http_client transport which # uses http.client. This is only acceptable because the metadata server # doesn't do SSL and never requires proxies. from google.auth import compute_engine from google.auth.compute_engine import _metadata if request is None: request = google.auth.transport._http_client.Request() if _metadata.ping(request=request): # Get the project ID. try: project_id = _metadata.get_project_id(request=request) except exceptions.TransportError: project_id = None return compute_engine.Credentials(), project_id else: return None, None def default(scopes=None, request=None): """Gets the default credentials for the current environment. `Application Default Credentials`_ provides an easy way to obtain credentials to call Google APIs for server-to-server or local applications. This function acquires credentials from the environment in the following order: 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set to the path of a valid service account JSON private key file, then it is loaded and returned. The project ID returned is the project ID defined in the service account file if available (some older files do not contain project ID information). 2. If the `Google Cloud SDK`_ is installed and has application default credentials set they are loaded and returned. To enable application default credentials with the Cloud SDK run:: gcloud auth application-default login If the Cloud SDK has an active project, the project ID is returned. The active project can be set using:: gcloud config set project 3. If the application is running in the `App Engine standard environment`_ then the credentials and project ID from the `App Identity Service`_ are used. 4. If the application is running in `Compute Engine`_ or the `App Engine flexible environment`_ then the credentials and project ID are obtained from the `Metadata Service`_. 5. If no credentials are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised. .. _Application Default Credentials: https://developers.google.com\ /identity/protocols/application-default-credentials .. _Google Cloud SDK: https://cloud.google.com/sdk .. _App Engine standard environment: https://cloud.google.com/appengine .. _App Identity Service: https://cloud.google.com/appengine/docs/python\ /appidentity/ .. _Compute Engine: https://cloud.google.com/compute .. _App Engine flexible environment: https://cloud.google.com\ /appengine/flexible .. _Metadata Service: https://cloud.google.com/compute/docs\ /storing-retrieving-metadata Example:: import google.auth credentials, project_id = google.auth.default() Args: scopes (Sequence[str]): The list of scopes for the credentials. If specified, the credentials will automatically be scoped if necessary. request (google.auth.transport.Request): An object used to make HTTP requests. This is used to detect whether the application is running on Compute Engine. If not specified, then it will use the standard library http client to make requests. Returns: Tuple[~google.auth.credentials.Credentials, Optional[str]]: the current environment's credentials and project ID. Project ID may be None, which indicates that the Project ID could not be ascertained from the environment. Raises: ~google.auth.exceptions.DefaultCredentialsError: If no credentials were found, or if the credentials found were invalid. """ from google.auth.credentials import with_scopes_if_required explicit_project_id = os.environ.get( environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT)) checkers = ( _get_explicit_environ_credentials, _get_gcloud_sdk_credentials, _get_gae_credentials, lambda: _get_gce_credentials(request)) for checker in checkers: credentials, project_id = checker() if credentials is not None: credentials = with_scopes_if_required(credentials, scopes) effective_project_id = explicit_project_id or project_id if not effective_project_id: _LOGGER.warning( 'No project ID could be determined. Consider running ' '`gcloud config set project` or setting the %s ' 'environment variable', environment_vars.PROJECT) return credentials, effective_project_id raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)
mit
jzoldak/edx-platform
common/lib/xmodule/xmodule/tests/test_library_root.py
33
3336
# -*- coding: utf-8 -*- """ Basic unit tests for LibraryRoot """ from mock import patch from xblock.fragment import Fragment from xblock.runtime import Runtime as VanillaRuntime from xmodule.x_module import AUTHOR_VIEW from xmodule.modulestore.tests.factories import LibraryFactory, ItemFactory from xmodule.modulestore.tests.utils import MixedSplitTestCase dummy_render = lambda block, _: Fragment(block.data) # pylint: disable=invalid-name @patch( 'xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.render', VanillaRuntime.render ) @patch('xmodule.html_module.HtmlDescriptor.author_view', dummy_render, create=True) @patch('xmodule.html_module.HtmlDescriptor.has_author_view', True, create=True) @patch('xmodule.x_module.DescriptorSystem.applicable_aside_types', lambda self, block: []) class TestLibraryRoot(MixedSplitTestCase): """ Basic unit tests for LibraryRoot (library_root_xblock.py) """ def test_library_author_view(self): """ Test that LibraryRoot.author_view can run and includes content from its children. We have to patch the runtime (module system) in order to be able to render blocks in our test environment. """ message = u"Hello world" library = LibraryFactory.create(modulestore=self.store) # Add one HTML block to the library: ItemFactory.create( category="html", parent_location=library.location, user_id=self.user_id, publish_item=False, modulestore=self.store, data=message ) library = self.store.get_library(library.location.library_key) context = {'reorderable_items': set(), } # Patch the HTML block to always render "Hello world" result = library.render(AUTHOR_VIEW, context) self.assertIn(message, result.content) def test_library_author_view_with_paging(self): """ Test that LibraryRoot.author_view can apply paging We have to patch the runtime (module system) in order to be able to render blocks in our test environment. """ library = LibraryFactory.create(modulestore=self.store) # Add five HTML blocks to the library: blocks = [ ItemFactory.create( category="html", parent_location=library.location, user_id=self.user_id, publish_item=False, modulestore=self.store, data="HtmlBlock" + str(i) ) for i in range(5) ] library = self.store.get_library(library.location.library_key) def render_and_check_contents(page, page_size): """ Renders block and asserts on returned content """ context = {'reorderable_items': set(), 'paging': {'page_number': page, 'page_size': page_size}} expected_blocks = blocks[page_size * page:page_size * (page + 1)] result = library.render(AUTHOR_VIEW, context) for expected_block in expected_blocks: self.assertIn(expected_block.data, result.content) render_and_check_contents(0, 3) render_and_check_contents(1, 3) render_and_check_contents(0, 2) render_and_check_contents(1, 2)
agpl-3.0
BMJHayward/numpy
numpy/core/__init__.py
18
2564
from __future__ import division, absolute_import, print_function from .info import __doc__ from numpy.version import version as __version__ # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core import os envbak = os.environ.copy() if 'OPENBLAS_MAIN_FREE' not in os.environ: os.environ['OPENBLAS_MAIN_FREE'] = '1' if 'GOTOBLAS_MAIN_FREE' not in os.environ: os.environ['GOTOBLAS_MAIN_FREE'] = '1' from . import multiarray os.environ.clear() os.environ.update(envbak) del envbak del os from . import umath from . import _internal # for freeze programs from . import numerictypes as nt multiarray.set_typeDict(nt.sctypeDict) from . import numeric from .numeric import * from . import fromnumeric from .fromnumeric import * from . import defchararray as char from . import records as rec from .records import * from .memmap import * from .defchararray import chararray from . import function_base from .function_base import * from . import machar from .machar import * from . import getlimits from .getlimits import * from . import shape_base from .shape_base import * del nt from .fromnumeric import amax as max, amin as min, \ round_ as round from .numeric import absolute as abs __all__ = ['char', 'rec', 'memmap'] __all__ += numeric.__all__ __all__ += fromnumeric.__all__ __all__ += rec.__all__ __all__ += ['chararray'] __all__ += function_base.__all__ __all__ += machar.__all__ __all__ += getlimits.__all__ __all__ += shape_base.__all__ from numpy.testing import Tester test = Tester().test bench = Tester().bench # Make it possible so that ufuncs can be pickled # Here are the loading and unloading functions # The name numpy.core._ufunc_reconstruct must be # available for unpickling to work. def _ufunc_reconstruct(module, name): # The `fromlist` kwarg is required to ensure that `mod` points to the # inner-most module rather than the parent package when module name is # nested. This makes it possible to pickle non-toplevel ufuncs such as # scipy.special.expit for instance. mod = __import__(module, fromlist=[name]) return getattr(mod, name) def _ufunc_reduce(func): from pickle import whichmodule name = func.__name__ return _ufunc_reconstruct, (whichmodule(func, name), name) import sys if sys.version_info[0] >= 3: import copyreg else: import copy_reg as copyreg copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) # Unclutter namespace (must keep _ufunc_reconstruct for unpickling) del copyreg del sys del _ufunc_reduce
bsd-3-clause
funtion/shadowsocks-1
shadowsocks/server.py
652
4836
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import sys import os import logging import signal sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../')) from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, \ asyncdns, manager def main(): shell.check_python() config = shell.get_config(False) daemon.daemon_exec(config) if config['port_password']: if config['password']: logging.warn('warning: port_password should not be used with ' 'server_port and password. server_port and password ' 'will be ignored') else: config['port_password'] = {} server_port = config.get('server_port', None) if server_port: if type(server_port) == list: for a_server_port in server_port: config['port_password'][a_server_port] = config['password'] else: config['port_password'][str(server_port)] = config['password'] if config.get('manager_address', 0): logging.info('entering manager mode') manager.run(config) return tcp_servers = [] udp_servers = [] if 'dns_server' in config: # allow override settings in resolv.conf dns_resolver = asyncdns.DNSResolver(config['dns_server']) else: dns_resolver = asyncdns.DNSResolver() port_password = config['port_password'] del config['port_password'] for port, password in port_password.items(): a_config = config.copy() a_config['server_port'] = int(port) a_config['password'] = password logging.info("starting server at %s:%d" % (a_config['server'], int(port))) tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False)) udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False)) def run_server(): def child_handler(signum, _): logging.warn('received SIGQUIT, doing graceful shutting down..') list(map(lambda s: s.close(next_tick=True), tcp_servers + udp_servers)) signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), child_handler) def int_handler(signum, _): sys.exit(1) signal.signal(signal.SIGINT, int_handler) try: loop = eventloop.EventLoop() dns_resolver.add_to_loop(loop) list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers)) daemon.set_user(config.get('user', None)) loop.run() except Exception as e: shell.print_exception(e) sys.exit(1) if int(config['workers']) > 1: if os.name == 'posix': children = [] is_child = False for i in range(0, int(config['workers'])): r = os.fork() if r == 0: logging.info('worker started') is_child = True run_server() break else: children.append(r) if not is_child: def handler(signum, _): for pid in children: try: os.kill(pid, signum) os.waitpid(pid, 0) except OSError: # child may already exited pass sys.exit() signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGQUIT, handler) signal.signal(signal.SIGINT, handler) # master for a_tcp_server in tcp_servers: a_tcp_server.close() for a_udp_server in udp_servers: a_udp_server.close() dns_resolver.close() for child in children: os.waitpid(child, 0) else: logging.warn('worker is only available on Unix/Linux') run_server() else: run_server() if __name__ == '__main__': main()
apache-2.0
lsaffre/lino-welfare
lino_welfare/projects/mathieu/tests/test_notify.py
1
18512
# -*- coding: UTF-8 -*- # Copyright 2016-2018 Rumma & Ko Ltd # License: BSD (see file COPYING for details) """ Miscellaneous tests about the notification framework (:mod:`lino.modlib.notify` and :mod:`lino_xl.lib.notes`). Consult the source code of this module. You can run these tests individually by issuing:: $ cd lino_welfare/projects/chatelet $ python manage.py test tests.test_notify """ from __future__ import unicode_literals from __future__ import print_function from builtins import str import json from six.moves.urllib.parse import urlencode import six from django.conf import settings from django.utils import translation from lino.utils.djangotest import TestCase from lino.utils import i2d, AttrDict from lino.api import rt from lino.modlib.users.choicelists import UserTypes class TestCase(TestCase): maxDiff = None def check_notifications(self, expected=''): """ Check whether the database contains notification messages as expected. Hint: when `expected` is empty, then the found result is being printed to stdout so you can copy it into your code. """ ar = rt.models.notify.Messages.request() rst = ar.to_rst(column_names="subject owner user") if expected: self.assertEquivalent(expected, rst) else: print(rst) # print rst # handy when something fails def check_notes(self, expected=''): ar = rt.models.notes.Notes.request() rst = ar.to_rst(column_names="id user project subject") if expected: self.assertEquivalent(expected, rst) else: print(rst) def check_coachings(self, expected=''): ar = rt.models.coachings.Coachings.request() rst = ar.to_rst( column_names="id client start_date end_date user primary") if expected: self.assertEquivalent(expected, rst) else: print(rst) def test_checkin_guest(self): """Test whether notifications are being emitted. - when a visitor checks in - when a client is modified - when a coaching is created or modified - when a note is created or modified """ User = settings.SITE.user_model Message = rt.models.notify.Message Note = rt.models.notes.Note NoteType = rt.models.notes.EventType Guest = rt.models.cal.Guest Event = rt.models.cal.Event EventType = rt.models.cal.EventType Client = rt.models.pcsw.Client ClientStates = rt.models.pcsw.ClientStates Coaching = rt.models.coachings.Coaching ContentType = rt.models.contenttypes.ContentType self.assertEqual(settings.SITE.use_websockets, False) robin = self.create_obj( User, username='robin', user_type=UserTypes.admin, language="en") caroline = self.create_obj( User, username='caróline', user_type='200', language="fr") alicia = self.create_obj( User, username='alícia', first_name="Alicia", user_type='120', language="fr") roger = self.create_obj( User, username='róger', user_type='420', language="en") ses = rt.login('robin') translation.activate('fr') first = self.create_obj( Client, first_name="First", last_name="Gérard", client_state=ClientStates.coached) second = self.create_obj( Client, first_name="Second", last_name="Gérard", client_state=ClientStates.coached) self.create_obj( Coaching, client=second, start_date=i2d(20130501), end_date=i2d(20140501), user=caroline) second_roger = self.create_obj( Coaching, client=second, start_date=i2d(20140501), user=roger) self.create_obj( Coaching, client=second, start_date=i2d(20140520), user=alicia) nt = self.create_obj(NoteType, name="System note") settings.SITE.site_config.update(system_note_type=nt) consultation = self.create_obj(EventType, name="consultation") # gr = self.create_obj(GuestRole, name="client") event = self.create_obj( Event, event_type=consultation, user=caroline) guest = self.create_obj(Guest, event=event, partner=first) self.assertEqual(str(guest), 'Présence #1 (22.05.2014)') # Checkin a guest res = ses.run(guest.checkin) # 'GÉRARD First (100) has started waiting for caroline' self.assertEqual(res, { 'message': "GÉRARD First (100) a commencé d'attendre caróline", 'success': True, 'refresh': True}) # it has caused a notification message: self.assertEqual(Message.objects.count(), 1) msg = Message.objects.all()[0] self.assertEqual(msg.user.username, 'caróline') self.assertEqual( msg.subject, "GÉRARD First (100) a commencé d'attendre caróline") # it does *not* cause a system note: self.assertEqual(Note.objects.count(), 0) # When a client is modified, all active coaches get a # notification. # Note that Caroline doesn't get a notification because her # coaching is not active. # Alicia doesn't get a notification because she did it herself. # Roger doesn't get notified because he is user_type 420 data = dict(first_name="Seconda", an="submit_detail") kwargs = dict(data=urlencode(data)) kwargs['REMOTE_USER'] = 'alícia' url = '/api/pcsw/Clients/{}'.format(second.pk) self.client.force_login(alicia) res = self.client.put(url, **kwargs) self.assertEqual(res.status_code, 200) # self.assertEqual(Message.objects.count(), 2) # self.check_notifications() self.check_notifications(""" =================================================== ======= ============== Sujet Lié à Destinataire --------------------------------------------------- ------- -------------- GÉRARD First (100) a commencé d'attendre caróline caróline =================================================== ======= ============== """) # When a coaching is modified, all active coaches of that # client get a notification. Message.objects.all().delete() data = dict(start_date="02.05.2014", an="grid_put") data.update(mt=51) data.update(mk=second.pk) kwargs = dict(data=urlencode(data)) kwargs['REMOTE_USER'] = 'robin' self.client.force_login(robin) url = '/api/coachings/CoachingsByClient/{}'.format(second_roger.pk) res = self.client.put(url, **kwargs) self.assertEqual(res.status_code, 200) # self.check_notifications() self.check_notifications(""" ================================== ==================== =========== Subject Controlled by Recipient ---------------------------------- -------------------- ----------- robin a modifié róger / Gérard S *róger / Gérard S* Alicia ================================== ==================== =========== """) # AssignCoach. we are going to Assign caroline as coach for # first client. # Request URL:http://127.0.0.1:8000/api/newcomers/AvailableCoachesByClient/5?_dc=1469707129689&fv=EVERS%20Eberhart%20(127)%20assigned%20to%20Hubert%20Huppertz%20&fv=EVERS%20Eberhart%20(127)%20is%20now%20coached%20by%20Hubert%20Huppertz%20for%20Laufende%20Beihilfe.&fv=false&mt=48&mk=127&an=assign_coach&sr=5 # Request Method:GET # fv:EVERS Eberhart (127) assigned to Hubert Huppertz # fv:EVERS Eberhart (127) is now coached by Hubert Huppertz for Laufende Beihilfe. # fv:false # mt:48 # mk:127 # an:assign_coach # sr:5 Message.objects.all().delete() # self.assertEqual(Coaching.objects.count(), 1) # self.check_coachings() self.check_coachings(""" ==== ====================== ============== ============ ========== ========= ID Client Coached from until Coach Primary ---- ---------------------- -------------- ------------ ---------- --------- 1 GÉRARD Seconda (101) 01/05/2013 01/05/2014 caróline No 2 GÉRARD Seconda (101) 02/05/2014 róger No 3 GÉRARD Seconda (101) 20/05/2014 Alicia No ==== ====================== ============== ============ ========== ========= """) self.assertEqual(Note.objects.count(), 0) data = dict( fv=["First GÉRARD assigned to caróline", "Body", 'false'], an="assign_coach") data.update(mt=ContentType.objects.get_for_model(Client).pk) data.update(mk=first.pk) kwargs = dict(data=data) # kwargs = dict(data=urlencode(data)) kwargs['REMOTE_USER'] = 'alícia' self.client.force_login(alicia) url = '/api/newcomers/AvailableCoachesByClient/{}'.format( caroline.pk) res = self.client.get(url, **kwargs) self.assertEqual(res.status_code, 200) self.check_notifications(""" =================================== ======= ============== Sujet Lié à Destinataire ----------------------------------- ------- -------------- First GÉRARD assigned to caróline caróline =================================== ======= ============== """) # self.check_coachings("") self.check_coachings(""" ==== ====================== ======================== ============ ============= ========== ID Bénéficiaire En intervention depuis au Intervenant Primaire ---- ---------------------- ------------------------ ------------ ------------- ---------- 1 GÉRARD Seconda (101) 01/05/2013 01/05/2014 caróline Non 2 GÉRARD Seconda (101) 02/05/2014 róger Non 3 GÉRARD Seconda (101) 20/05/2014 Alicia Non 4 GÉRARD First (100) 22/05/2014 caróline Oui ==== ====================== ======================== ============ ============= ========== """) self.check_notes(""" ==== ======== ==================== =================================== ID Auteur Bénéficiaire Sujet ---- -------- -------------------- ----------------------------------- 1 Alicia GÉRARD First (100) First GÉRARD assigned to caróline ==== ======== ==================== =================================== """) # Mark client as former # Request URL:http://127.0.0.1:8000/api/pcsw/Clients/181?_dc=1469714189945&an=mark_former&sr=181 # Request Method:GET # an:mark_former Message.objects.all().delete() Note.objects.all().delete() data = dict(an="mark_former") kwargs = dict(data=data) # kwargs = dict(data=urlencode(data)) kwargs['REMOTE_USER'] = 'alícia' self.client.force_login(alicia) url = '/api/pcsw/Clients/{}'.format(second.pk) res = self.client.get(url, **kwargs) self.assertEqual(res.status_code, 200) res = AttrDict(json.loads(res.content)) self.assertEqual( res.message, 'This will end 2 coachings of GÉRARD Seconda (101).') self.assertEqual(res.xcallback['title'], "Confirmation") kwargs = dict() kwargs['REMOTE_USER'] = 'alícia' self.client.force_login(alicia) url = '/callbacks/{}/yes'.format(res.xcallback['id']) res = self.client.get(url, **kwargs) self.assertEqual(res.status_code, 200) res = AttrDict(json.loads(res.content)) self.assertEqual( res.message, 'Alicia a classé GÉRARD Seconda (101) comme <b>Ancien</b>.') self.assertTrue(res.success) self.check_notifications(""" =========================================================== ======================== ============== Sujet Lié à Destinataire ----------------------------------------------------------- ------------------------ -------------- Alicia a classé GÉRARD Seconda (101) comme <b>Ancien</b>. *GÉRARD Seconda (101)* róger =========================================================== ======================== ============== """) # check two coachings have now an end_date set: # self.check_coachings() self.check_coachings(""" ==== ====================== ======================== ============ ============= ========== ID Bénéficiaire En intervention depuis au Intervenant Primaire ---- ---------------------- ------------------------ ------------ ------------- ---------- 1 GÉRARD Seconda (101) 01/05/2013 01/05/2014 caróline Non 2 GÉRARD Seconda (101) 02/05/2014 22/05/2014 róger Non 3 GÉRARD Seconda (101) 20/05/2014 22/05/2014 Alicia Non 4 GÉRARD First (100) 22/05/2014 caróline Oui ==== ====================== ======================== ============ ============= ========== """) # self.check_notes() self.check_notes(""" ==== ======== ====================== =========================================================== ID Auteur Bénéficiaire Sujet ---- -------- ---------------------- ----------------------------------------------------------- 2 Alicia GÉRARD Seconda (101) Alicia a classé GÉRARD Seconda (101) comme <b>Ancien</b>. ==== ======== ====================== =========================================================== """) # # RefuseClient # Message.objects.all().delete() Note.objects.all().delete() self.create_obj( Coaching, client=first, start_date=i2d(20130501), user=roger) first.client_state = ClientStates.newcomer first.save() data = dict(fv=["20", ""], an="refuse_client") kwargs = dict(data=data) # kwargs = dict(data=urlencode(data)) kwargs['REMOTE_USER'] = 'alícia' self.client.force_login(alicia) url = '/api/pcsw/Clients/{}'.format(first.pk) res = self.client.get(url, **kwargs) self.assertEqual(res.status_code, 200) # self.check_notifications("") #if six.PY2: self.check_notifications(""" ========================================================= ====================== ============== Sujet Lié à Destinataire --------------------------------------------------------- ---------------------- -------------- Alicia a classé GÉRARD First (100) comme <b>Refusé</b>. *GÉRARD First (100)* caróline Alicia a classé GÉRARD First (100) comme <b>Refusé</b>. *GÉRARD First (100)* róger ========================================================= ====================== ============== """) # self.check_notes() self.check_notes(""" ==== ======== ==================== ========================================================= ID Auteur Bénéficiaire Sujet ---- -------- -------------------- --------------------------------------------------------- 3 Alicia GÉRARD First (100) Alicia a classé GÉRARD First (100) comme <b>Refusé</b>. ==== ======== ==================== ========================================================= """) # When a note is created, all active coaches of that # client get a notification. Message.objects.all().delete() data = dict() data.update(mt=51) data.update(mk=second.pk) data.update(an='submit_insert') data.update( subject="test", projectHidden=second.pk) kwargs = dict(data=data) kwargs['REMOTE_USER'] = 'alícia' self.client.force_login(alicia) url = '/api/notes/NotesByProject/{}'.format(second.pk) res = self.client.post(url, **kwargs) self.assertEqual(res.status_code, 200) res = AttrDict(json.loads(res.content)) self.assertEqual(res.data_record['id'], 4) new_note_pk = res.data_record['id'] # self.check_notifications() self.check_notifications(""" ============================== ================== ============== Sujet Lié à Destinataire ------------------------------ ------------------ -------------- Alicia created Event/Note #4 *Observation #4* róger ============================== ================== ============== """) Message.objects.all().delete() data = dict() data.update(mt=51) data.update(mk=second.pk) data.update(an='submit_detail') data.update( subject="test 2", body="<p>Bla bla bla</p>", projectHidden=second.pk) kwargs = dict(data=urlencode(data)) # kwargs = dict(data=data) kwargs['REMOTE_USER'] = 'alícia' self.client.force_login(alicia) url = '/api/notes/NotesByProject/{}'.format(new_note_pk) res = self.client.put(url, **kwargs) self.assertEqual(res.status_code, 200) # self.check_notifications() # self.check_notifications("Aucun enregistrement") self.check_notifications(""" =============================== ================== ============== Sujet Lié à Destinataire ------------------------------- ------------------ -------------- Alicia modified Event/Note #4 *Observation #4* róger =============================== ================== ============== """) self.assertEqual(Message.objects.count(), 1) msg = Message.objects.all()[0] # print msg.body self.assertEquivalent(msg.body, """ <div><p>Subject: test 2<br/>Client: [client 101] (Seconda GÉRARD)</p><p>Alicia modified [note 4] (test 2):</p><ul><li><b>Body</b> : 1 lines added</li><li><b>Subject</b> : test --&gt; test 2</li></ul></div> """)
agpl-3.0
Ictp/indico
indico/MaKaC/authentication/baseAuthentication.py
2
10817
# -*- coding: utf-8 -*- ## ## ## This file is part of Indico. ## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN). ## ## Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or (at your option) any later version. ## ## Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Indico;if not, see <http://www.gnu.org/licenses/>. from flask import request from persistent import Persistent from MaKaC.common.ObjectHolders import ObjectHolder from MaKaC.errors import UserError, MaKaCError from MaKaC.i18n import _ from indico.core.config import Config from MaKaC.user import LoginInfo """ In this file the Authenticator base class is defined, also the PIdentity base. Every Authenticator that is developed has to overwrite the main methods if they want to have a full functionallity. """ class Authenthicator(ObjectHolder): def add( self, newId ): """ Add a new Id to the ObjectHolder. Returns the identity Id. :param newId: a PIdentity object that contains the user and login :type newId: MaKaC.baseAuthentication.PIdentity child class """ if self.hasKey( newId.getId() ): raise UserError( _("identity already exists")) id = newId.getId() tree = self._getIdx() if tree.has_key(id): raise UserError tree[ id ] = newId return id def _transformLogin(self, login): """Override to convert login names e.g. to lowercase""" return login def getAvatar(self, li): """ Returns an Avatar object, checking that the password is right. :param li: a LoginInfo object with the person's login string and password :type li: MaKaC.user.LoginInfo """ login = self._transformLogin(li.getLogin()) if self.hasKey(login): identity = self.getById(login) avatar = identity.authenticate(li) if avatar: self._postLogin(login, avatar) return avatar return None def getAvatarByLogin(self, login): """ Returns an Avatar object, WITHOUT checking the password! Will throw KeyError if not found. :param login: the person's login string :type login: str """ login = self._transformLogin(login) if self.hasKey(login): return self.getById(login).getUser() return None def getIdx(self): """ Returns the index of the ObjectHolder """ return self._getIdx() @classmethod def getId(self): """ Returns the Id of the Authenticator """ return self.id def getName(self): """ Returns the name of the Authenticator. If it is setup in the configuration, otherwise, default name. """ return Config.getInstance().getAuthenticatorConfigById(self.getId()).get("name", self.name) def getDescription(self): """ Returns the description of the Authenticator. """ return self.description def isSSOLoginActive(self): """ Returns if Single Sign-on is active """ return Config.getInstance().getAuthenticatorConfigById(self.getId()).get("SSOActive", False) def canUserBeActivated(self): """ Returns if the Avatar object of the created users are activated by default To override """ return False def SSOLogin(self, rh): """ Returns the Avatar object when the Authenticator makes login trough Single Sign-On :param rh: the Request Handler :type rh: MaKaC.webinterface.rh.base.RH and subclasses """ return None def createIdentity(self, li, avatar): """ Returns the created PIdentity object with the LoginInfo an Avatar :param li: a LoginInfo object with the person's login string and password :type li: MaKaC.user.LoginInfo :param avatar: an Avatar object of the user :type avatar: MaKaC.user.Avatar """ return None def createIdentitySSO(self, login, avatar): """Like createIdentity but with just a login (coming from SSO) instead of a login/pw combination.""" return None def fetchIdentity(self, avatar): """ Returns the created PIdentity object with the Avatar fetching from the authenticator :param avatar: an Avatar object of the user :type avatar: MaKaC.user.Avatar """ return None def createUser(self, li): """ Returns the created Avatar object through an LoginInfo object :param li: a LoginInfo object with the person's login string :type li: MaKaC.user.LoginInfo """ return None def matchUser(self, criteria, exact=0): """ Returns the list of users (Avatar) with the given criteria :param criteria: the criteria to search :type criteria: dict :param exact: the match has to be exact :type exact: boolean """ return [] def matchUserFirstLetter(self, index, letter): """ Returns the list of users (Avatar) starting by the given letter :param index: the index string (name, Surname...) :type index: str :param letter: the letter char :type letter: str """ return [] def searchUserById(self, id): """ Returns an Avatar by the given id :param id: the id string :type id: str """ return None def matchGroup(self, criteria, exact=0): """ Returns the list of groups (Group) with the given criteria :param criteria: the criteria to search :type criteria: dict :param exact: the match has to be exact :type exact: boolean """ return [] def matchGroupFirstLetter(self, letter): """ Returns the list of groups (Group) starting by the given letter :param letter: the letter char :type letter: str """ return [] def getGroupMemberList(self, group): """ Returns the list of members (string) for the given group :param group: the group string :type group: str """ return [] def isUserInGroup(self, user, group): """ Returns True if the user belongs to the group :param user: the user string :type user: str :param group: the group string :type group: str """ return False def _postLogin(self, login, av, sso=False): if not login: return if not self.hasKey(login): if not sso: # createIdentity expects a LoginInfo object, not a string! # However, _postLogin is never called for a non-existant identity unless SSO has been used. raise MaKaCError('postLogin called for new non-SSO identity') self.add(self.createIdentitySSO(login, av)) elif not av.getIdentityById(login, self.getId()): av.addIdentity(self.getById(login)) class PIdentity(Persistent): def __init__(self, login, user): self.setLogin( login ) self.setUser( user ) def getId(self): return self.getLogin() def setUser(self, newUser): self.user = newUser newUser.addIdentity( self ) def getUser(self): return self.user def setLogin(self, newLogin): self.login = newLogin.strip() def getLogin(self): return self.login def match(self, id): return self.getLogin() == id.getLogin() def authenticate(self, id): return None class SSOHandler: def retrieveAvatar(self, rh): """ Login using Shibbolet. """ from MaKaC.user import AvatarHolder, Avatar config = Config.getInstance().getAuthenticatorConfigById(self.id).get("SSOMapping", {}) if config.get('email', 'ADFS_EMAIL') in request.environ: email = request.environ[config.get("email", "ADFS_EMAIL")] login = request.environ.get(config.get("login", "ADFS_LOGIN")) personId = request.environ.get(config.get("personId", "ADFS_PERSONID")) phone = request.environ.get(config.get("phone", "ADFS_PHONENUMBER"), "") fax = request.environ.get(config.get("fax", "ADFS_FAXNUMBER"), "") lastname = request.environ.get(config.get("lastname", "ADFS_LASTNAME"), "") firstname = request.environ.get(config.get("firstname", "ADFS_FIRSTNAME"), "") institute = request.environ.get(config.get("institute", "ADFS_HOMEINSTITUTE"), "") if personId == '-1': personId = None ah = AvatarHolder() av = ah.match({"email": email}, exact=1, onlyActivated=False, searchInAuthenticators=False) if av: av = av[0] # don't allow disabled accounts if av.isDisabled(): return None elif not av.isActivated(): av.activateAccount() av.clearAuthenticatorPersonalData() av.setAuthenticatorPersonalData('phone', phone) av.setAuthenticatorPersonalData('fax', fax) av.setAuthenticatorPersonalData('surName', lastname) av.setAuthenticatorPersonalData('firstName', firstname) av.setAuthenticatorPersonalData('affiliation', institute) if personId != None and personId != av.getPersonId(): av.setPersonId(personId) else: avDict = {"email": email, "name": firstname, "surName": lastname, "organisation": institute, "telephone": phone, "login": login} av = Avatar(avDict) ah.add(av) av.setPersonId(personId) av.activateAccount() self._postLogin(login, av, True) return av return None def getLogoutCallbackURL(self): return Config.getInstance().getAuthenticatorConfigById(self.id).get("LogoutCallbackURL")
gpl-3.0
windelbouwman/ppci-mirror
ppci/lang/c/nodes/types.py
1
9214
""" This module contains internal representations of types. """ # pylint: disable=R0903 def is_scalar(typ): """ Determine whether the given type is of scalar kind """ return isinstance(typ, (BasicType, PointerType)) and not is_void(typ) def is_char_array(typ): """ Check if the given type is of string type. """ return isinstance(typ, ArrayType) and typ.element_type.is_scalar def is_integer(typ): """ Test if the given type is of integer type """ return ( isinstance(typ, BasicType) and typ.type_id in BasicType.INTEGER_TYPES ) def is_signed_integer(typ): """ Test if the given type is of signed integer type """ return ( isinstance(typ, BasicType) and typ.type_id in BasicType.SIGNED_INTEGER_TYPES ) def is_void(typ): """ Check if the given type is void """ return isinstance(typ, BasicType) and typ.type_id == BasicType.VOID def is_double(typ): """ Check if the given type is double """ return isinstance(typ, BasicType) and typ.type_id == BasicType.DOUBLE def is_float(typ): """ Check if the given type is float """ return isinstance(typ, BasicType) and typ.type_id == BasicType.FLOAT def is_array(typ): """ Check if the given type is an array """ return isinstance(typ, ArrayType) def is_union(typ): """ Check if the given type is of union type """ return isinstance(typ, UnionType) def is_struct(typ): """ Check if the given type is a struct """ return isinstance(typ, StructType) def is_struct_or_union(typ): """ Check if the given type is either struct or union type. """ return isinstance(typ, StructOrUnionType) # A type system: class CType: """ Base class for all types """ def __init__(self, qualifiers=None): self.qualifiers = qualifiers @property def is_void(self): """ See if this type is void """ return is_void(self) @property def is_pointer(self): """ Test if this type is of pointer type. """ return isinstance(self, PointerType) @property def is_float(self): """ See if this type is float """ return is_float(self) @property def is_double(self): """ See if this type is double """ return is_double(self) @property def is_scalar(self): """ Check if this is a scalar type """ return is_scalar(self) @property def is_compound(self): """ Test if this type is of compound type. """ return not self.is_scalar @property def is_char_array(self): """ Check if this type is string type. """ return is_char_array(self) @property def is_integer(self): """ Check if this type is an integer type """ return is_integer(self) @property def is_signed(self): """ Check if this type is of signed integer type """ return is_signed_integer(self) @property def is_array(self): """ Check if this type is array type. """ return is_array(self) @property def is_struct(self): """ Check if this type is a struct """ return is_struct(self) @property def is_union(self): """ Check if this type is of union type """ return is_union(self) @property def is_struct_or_union(self): """ Check if this type is either struct or union type. """ return is_struct_or_union(self) def pointer_to(self): """ Create a new pointer type to this type. """ return PointerType(self) class FunctionType(CType): """ Function type """ def __init__(self, arguments, return_type, is_vararg=False): super().__init__() self.is_vararg = is_vararg # assert all(isinstance(a, VariableDeclaration) for a in arguments) self.arguments = arguments self.argument_types = [a.typ for a in arguments] assert all(isinstance(t, CType) for t in self.argument_types) self.return_type = return_type assert isinstance(return_type, CType) def __repr__(self): return "Function-type" class IndexableType(CType): """ Array or pointer type """ def __init__(self, element_type): super().__init__() assert isinstance(element_type, CType) self.element_type = element_type class ArrayType(IndexableType): """ Array type """ def __init__(self, element_type, size): super().__init__(element_type) self.size = size def __repr__(self): return "Array-type" class PointerType(IndexableType): """ The famous pointer! """ def __repr__(self): return "Pointer-type" class EnumType(CType): """ Enum type """ def __init__(self, constants=None): super().__init__() self.constants = constants @property def complete(self): """ Test if this enum is complete (values are defined) """ return self.constants is not None def __repr__(self): return "Enum-type" class StructOrUnionType(CType): """ Common base for struct and union types """ def __init__(self, tag=None, fields=None): super().__init__() self._fields = None self._field_map = None self.tag = tag self.fields = fields @property def incomplete(self): """ Check whether this type is incomplete or not """ return self.fields is None @property def complete(self): """ Test if this type is complete """ return not self.incomplete def _get_fields(self): return self._fields def _set_fields(self, fields): """ Set the fields of this type, updating the field map. """ self._fields = fields if fields: self._field_map = { f[-1].name: f for f in self.get_named_field_paths() } fields = property(_get_fields, _set_fields) def get_named_field_paths(self): """ Create a list of field-paths, including those in anonymous members. A field path is a sequence of fields to arrive at the named field. """ field_paths = [] for field in self.fields: if field.is_anonymous: if field.typ.is_struct_or_union: for sub_field_path in field.typ.get_named_field_paths(): field_paths.append((field,) + sub_field_path) else: field_paths.append((field,)) return field_paths def get_field_names(self): """ Get a list of valid field names. """ return [f[-1].name for f in self.get_named_field_paths()] def has_field(self, name: str): """ Check if this type has the given field """ assert isinstance(name, str) return name in self._field_map def get_field(self, name: str): """ Get the field with the given name """ assert isinstance(name, str) return self._field_map[name][-1] def get_field_path(self, name: str): """ Return the full path to a specific field. This takes into account eventual anonymous struct members. """ return self._field_map[name] class StructType(StructOrUnionType): """ Structure type """ def __repr__(self): if self.complete: field_names = self.get_field_names() return "Structured-type field_names={}".format(field_names) else: return "Incomplete structured" class Field: """ A field inside a union or struct """ def __init__(self, typ, name, bitsize): self.typ = typ assert isinstance(typ, CType) self.name = name self.bitsize = bitsize def __repr__(self): if self.bitsize is None: return "Struct-field .{}".format(self.name) else: return "Struct-field .{} : {}".format(self.name, self.bitsize) @property def is_anonymous(self): """ Test if this is an anonymous field. """ return self.name is None @property def is_bitfield(self): """ Test if this field is a bitfield (or not) """ return self.bitsize is not None class UnionType(StructOrUnionType): """ Union type """ def __repr__(self): return "Union-type" class BasicType(CType): """ This type is one of: int, unsigned int, float or void """ VOID = "void" CHAR = "char" UCHAR = "unsigned char" SHORT = "short" USHORT = "unsigned short" INT = "int" UINT = "unsigned int" LONG = "long" ULONG = "unsigned long" LONGLONG = "long long" ULONGLONG = "unsigned long long" FLOAT = "float" DOUBLE = "double" LONGDOUBLE = "long double" SIGNED_INTEGER_TYPES = {CHAR, SHORT, INT, LONG, LONGLONG} UNSIGNED_INTEGER_TYPES = {UCHAR, USHORT, UINT, ULONG, ULONGLONG} INTEGER_TYPES = SIGNED_INTEGER_TYPES | UNSIGNED_INTEGER_TYPES FLOAT_TYPES = {FLOAT, DOUBLE, LONGDOUBLE} NUMERIC_TYPES = INTEGER_TYPES | FLOAT_TYPES def __init__(self, type_id): super().__init__() self.type_id = type_id def __repr__(self): return "Basic type {}".format(self.type_id)
bsd-2-clause
digideskio/brackets-shell
gyp/pylib/gyp/MSVSNew.py
225
12061
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """New implementation of Visual Studio project generation for SCons.""" import os import random import gyp.common # hashlib is supplied as of Python 2.5 as the replacement interface for md5 # and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if # available, avoiding a deprecation warning under 2.6. Import md5 otherwise, # preserving 2.4 compatibility. try: import hashlib _new_md5 = hashlib.md5 except ImportError: import md5 _new_md5 = md5.new # Initialize random number generator random.seed() # GUIDs for project types ENTRY_TYPE_GUIDS = { 'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}', 'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}', } #------------------------------------------------------------------------------ # Helper functions def MakeGuid(name, seed='msvs_new'): """Returns a GUID for the specified target name. Args: name: Target name. seed: Seed for MD5 hash. Returns: A GUID-line string calculated from the name and seed. This generates something which looks like a GUID, but depends only on the name and seed. This means the same name/seed will always generate the same GUID, so that projects and solutions which refer to each other can explicitly determine the GUID to refer to explicitly. It also means that the GUID will not change when the project for a target is rebuilt. """ # Calculate a MD5 signature for the seed and name. d = _new_md5(str(seed) + str(name)).hexdigest().upper() # Convert most of the signature to GUID form (discard the rest) guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20] + '-' + d[20:32] + '}') return guid #------------------------------------------------------------------------------ class MSVSSolutionEntry(object): def __cmp__(self, other): # Sort by name then guid (so things are in order on vs2008). return cmp((self.name, self.get_guid()), (other.name, other.get_guid())) class MSVSFolder(MSVSSolutionEntry): """Folder in a Visual Studio project or solution.""" def __init__(self, path, name = None, entries = None, guid = None, items = None): """Initializes the folder. Args: path: Full path to the folder. name: Name of the folder. entries: List of folder entries to nest inside this folder. May contain Folder or Project objects. May be None, if the folder is empty. guid: GUID to use for folder, if not None. items: List of solution items to include in the folder project. May be None, if the folder does not directly contain items. """ if name: self.name = name else: # Use last layer. self.name = os.path.basename(path) self.path = path self.guid = guid # Copy passed lists (or set to empty lists) self.entries = sorted(list(entries or [])) self.items = list(items or []) self.entry_type_guid = ENTRY_TYPE_GUIDS['folder'] def get_guid(self): if self.guid is None: # Use consistent guids for folders (so things don't regenerate). self.guid = MakeGuid(self.path, seed='msvs_folder') return self.guid #------------------------------------------------------------------------------ class MSVSProject(MSVSSolutionEntry): """Visual Studio project.""" def __init__(self, path, name = None, dependencies = None, guid = None, spec = None, build_file = None, config_platform_overrides = None, fixpath_prefix = None): """Initializes the project. Args: path: Absolute path to the project file. name: Name of project. If None, the name will be the same as the base name of the project file. dependencies: List of other Project objects this project is dependent upon, if not None. guid: GUID to use for project, if not None. spec: Dictionary specifying how to build this project. build_file: Filename of the .gyp file that the vcproj file comes from. config_platform_overrides: optional dict of configuration platforms to used in place of the default for this target. fixpath_prefix: the path used to adjust the behavior of _fixpath """ self.path = path self.guid = guid self.spec = spec self.build_file = build_file # Use project filename if name not specified self.name = name or os.path.splitext(os.path.basename(path))[0] # Copy passed lists (or set to empty lists) self.dependencies = list(dependencies or []) self.entry_type_guid = ENTRY_TYPE_GUIDS['project'] if config_platform_overrides: self.config_platform_overrides = config_platform_overrides else: self.config_platform_overrides = {} self.fixpath_prefix = fixpath_prefix self.msbuild_toolset = None def set_dependencies(self, dependencies): self.dependencies = list(dependencies or []) def get_guid(self): if self.guid is None: # Set GUID from path # TODO(rspangler): This is fragile. # 1. We can't just use the project filename sans path, since there could # be multiple projects with the same base name (for example, # foo/unittest.vcproj and bar/unittest.vcproj). # 2. The path needs to be relative to $SOURCE_ROOT, so that the project # GUID is the same whether it's included from base/base.sln or # foo/bar/baz/baz.sln. # 3. The GUID needs to be the same each time this builder is invoked, so # that we don't need to rebuild the solution when the project changes. # 4. We should be able to handle pre-built project files by reading the # GUID from the files. self.guid = MakeGuid(self.name) return self.guid def set_msbuild_toolset(self, msbuild_toolset): self.msbuild_toolset = msbuild_toolset #------------------------------------------------------------------------------ class MSVSSolution: """Visual Studio solution.""" def __init__(self, path, version, entries=None, variants=None, websiteProperties=True): """Initializes the solution. Args: path: Path to solution file. version: Format version to emit. entries: List of entries in solution. May contain Folder or Project objects. May be None, if the folder is empty. variants: List of build variant strings. If none, a default list will be used. websiteProperties: Flag to decide if the website properties section is generated. """ self.path = path self.websiteProperties = websiteProperties self.version = version # Copy passed lists (or set to empty lists) self.entries = list(entries or []) if variants: # Copy passed list self.variants = variants[:] else: # Use default self.variants = ['Debug|Win32', 'Release|Win32'] # TODO(rspangler): Need to be able to handle a mapping of solution config # to project config. Should we be able to handle variants being a dict, # or add a separate variant_map variable? If it's a dict, we can't # guarantee the order of variants since dict keys aren't ordered. # TODO(rspangler): Automatically write to disk for now; should delay until # node-evaluation time. self.Write() def Write(self, writer=gyp.common.WriteOnDiff): """Writes the solution file to disk. Raises: IndexError: An entry appears multiple times. """ # Walk the entry tree and collect all the folders and projects. all_entries = set() entries_to_check = self.entries[:] while entries_to_check: e = entries_to_check.pop(0) # If this entry has been visited, nothing to do. if e in all_entries: continue all_entries.add(e) # If this is a folder, check its entries too. if isinstance(e, MSVSFolder): entries_to_check += e.entries all_entries = sorted(all_entries) # Open file and print header f = writer(self.path) f.write('Microsoft Visual Studio Solution File, ' 'Format Version %s\r\n' % self.version.SolutionVersion()) f.write('# %s\r\n' % self.version.Description()) # Project entries sln_root = os.path.split(self.path)[0] for e in all_entries: relative_path = gyp.common.RelativePath(e.path, sln_root) # msbuild does not accept an empty folder_name. # use '.' in case relative_path is empty. folder_name = relative_path.replace('/', '\\') or '.' f.write('Project("%s") = "%s", "%s", "%s"\r\n' % ( e.entry_type_guid, # Entry type GUID e.name, # Folder name folder_name, # Folder name (again) e.get_guid(), # Entry GUID )) # TODO(rspangler): Need a way to configure this stuff if self.websiteProperties: f.write('\tProjectSection(WebsiteProperties) = preProject\r\n' '\t\tDebug.AspNetCompiler.Debug = "True"\r\n' '\t\tRelease.AspNetCompiler.Debug = "False"\r\n' '\tEndProjectSection\r\n') if isinstance(e, MSVSFolder): if e.items: f.write('\tProjectSection(SolutionItems) = preProject\r\n') for i in e.items: f.write('\t\t%s = %s\r\n' % (i, i)) f.write('\tEndProjectSection\r\n') if isinstance(e, MSVSProject): if e.dependencies: f.write('\tProjectSection(ProjectDependencies) = postProject\r\n') for d in e.dependencies: f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid())) f.write('\tEndProjectSection\r\n') f.write('EndProject\r\n') # Global section f.write('Global\r\n') # Configurations (variants) f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n') for v in self.variants: f.write('\t\t%s = %s\r\n' % (v, v)) f.write('\tEndGlobalSection\r\n') # Sort config guids for easier diffing of solution changes. config_guids = [] config_guids_overrides = {} for e in all_entries: if isinstance(e, MSVSProject): config_guids.append(e.get_guid()) config_guids_overrides[e.get_guid()] = e.config_platform_overrides config_guids.sort() f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n') for g in config_guids: for v in self.variants: nv = config_guids_overrides[g].get(v, v) # Pick which project configuration to build for this solution # configuration. f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % ( g, # Project GUID v, # Solution build configuration nv, # Project build config for that solution config )) # Enable project in this solution configuration. f.write('\t\t%s.%s.Build.0 = %s\r\n' % ( g, # Project GUID v, # Solution build configuration nv, # Project build config for that solution config )) f.write('\tEndGlobalSection\r\n') # TODO(rspangler): Should be able to configure this stuff too (though I've # never seen this be any different) f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n') f.write('\t\tHideSolutionNode = FALSE\r\n') f.write('\tEndGlobalSection\r\n') # Folder mappings # TODO(rspangler): Should omit this section if there are no folders f.write('\tGlobalSection(NestedProjects) = preSolution\r\n') for e in all_entries: if not isinstance(e, MSVSFolder): continue # Does not apply to projects, only folders for subentry in e.entries: f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid())) f.write('\tEndGlobalSection\r\n') f.write('EndGlobal\r\n') f.close()
mit
kerwinxu/barcodeManager
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/pdflatex.py
1
2991
"""SCons.Tool.pdflatex Tool-specific initialization for pdflatex. Generates .pdf files from .latex or .ltx files There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/pdflatex.py 5023 2010/06/14 22:05:46 scons" import SCons.Action import SCons.Util import SCons.Tool.pdf import SCons.Tool.tex PDFLaTeXAction = None def PDFLaTeXAuxFunction(target = None, source= None, env=None): result = SCons.Tool.tex.InternalLaTeXAuxAction( PDFLaTeXAction, target, source, env ) if result != 0: SCons.Tool.tex.check_file_error_message(env['PDFLATEX']) return result PDFLaTeXAuxAction = None def generate(env): """Add Builders and construction variables for pdflatex to an Environment.""" global PDFLaTeXAction if PDFLaTeXAction is None: PDFLaTeXAction = SCons.Action.Action('$PDFLATEXCOM', '$PDFLATEXCOMSTR') global PDFLaTeXAuxAction if PDFLaTeXAuxAction is None: PDFLaTeXAuxAction = SCons.Action.Action(PDFLaTeXAuxFunction, strfunction=SCons.Tool.tex.TeXLaTeXStrFunction) env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes) import pdf pdf.generate(env) bld = env['BUILDERS']['PDF'] bld.add_action('.ltx', PDFLaTeXAuxAction) bld.add_action('.latex', PDFLaTeXAuxAction) bld.add_emitter('.ltx', SCons.Tool.tex.tex_pdf_emitter) bld.add_emitter('.latex', SCons.Tool.tex.tex_pdf_emitter) SCons.Tool.tex.generate_common(env) def exists(env): return env.Detect('pdflatex') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
bsd-2-clause
garimakhulbe/autorest
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/SubscriptionIdApiVersion/setup.py
10
1118
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # coding: utf-8 from setuptools import setup, find_packages NAME = "microsoftazuretesturl" VERSION = "2014-04-01-preview" # To install the library, run the following # # python setup.py install # # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools REQUIRES = ["msrest>=0.4.0", "msrestazure>=0.4.0"] setup( name=NAME, version=VERSION, description="MicrosoftAzureTestUrl", author_email="", url="", keywords=["Swagger", "MicrosoftAzureTestUrl"], install_requires=REQUIRES, packages=find_packages(), include_package_data=True, long_description="""\ Some cool documentation. """ )
mit
cchurch/ansible
lib/ansible/module_utils/facts/virtual/openbsd.py
199
2319
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin from ansible.module_utils.facts.utils import get_file_content class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin): """ This is a OpenBSD-specific subclass of Virtual. It defines - virtualization_type - virtualization_role """ platform = 'OpenBSD' DMESG_BOOT = '/var/run/dmesg.boot' def get_virtual_facts(self): virtual_facts = {} # Set empty values as default virtual_facts['virtualization_type'] = '' virtual_facts['virtualization_role'] = '' virtual_product_facts = self.detect_virt_product('hw.product') virtual_facts.update(virtual_product_facts) if virtual_facts['virtualization_type'] == '': virtual_vendor_facts = self.detect_virt_vendor('hw.vendor') virtual_facts.update(virtual_vendor_facts) # Check the dmesg if vmm(4) attached, indicating the host is # capable of virtualization. dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT) for line in dmesg_boot.splitlines(): match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line) if match: virtual_facts['virtualization_type'] = 'vmm' virtual_facts['virtualization_role'] = 'host' return virtual_facts class OpenBSDVirtualCollector(VirtualCollector): _fact_class = OpenBSDVirtual _platform = 'OpenBSD'
gpl-3.0
JulienMcJay/eclock
windows/Python27/Lib/site-packages/requests-2.2.1-py2.7.egg/requests/exceptions.py
222
1609
# -*- coding: utf-8 -*- """ requests.exceptions ~~~~~~~~~~~~~~~~~~~ This module contains the set of Requests' exceptions. """ from .packages.urllib3.exceptions import HTTPError as BaseHTTPError class RequestException(IOError): """There was an ambiguous exception that occurred while handling your request.""" class HTTPError(RequestException): """An HTTP error occurred.""" def __init__(self, *args, **kwargs): """ Initializes HTTPError with optional `response` object. """ self.response = kwargs.pop('response', None) super(HTTPError, self).__init__(*args, **kwargs) class ConnectionError(RequestException): """A Connection error occurred.""" class ProxyError(ConnectionError): """A proxy error occurred.""" class SSLError(ConnectionError): """An SSL error occurred.""" class Timeout(RequestException): """The request timed out.""" class URLRequired(RequestException): """A valid URL is required to make a request.""" class TooManyRedirects(RequestException): """Too many redirects.""" class MissingSchema(RequestException, ValueError): """The URL schema (e.g. http or https) is missing.""" class InvalidSchema(RequestException, ValueError): """See defaults.py for valid schemas.""" class InvalidURL(RequestException, ValueError): """ The URL provided was somehow invalid. """ class ChunkedEncodingError(RequestException): """The server declared chunked encoding but sent an invalid chunk.""" class ContentDecodingError(RequestException, BaseHTTPError): """Failed to decode response content"""
gpl-2.0
OpenPLi/enigma2
lib/python/Screens/TaskView.py
3
5621
from Components.ActionMap import ActionMap from Components.config import config, ConfigSubsection, ConfigSelection, getConfigListEntry from Components.ConfigList import ConfigListScreen from Components.Sources.Progress import Progress from Components.Sources.StaticText import StaticText from Components.SystemInfo import SystemInfo from Components.Task import job_manager from InfoBarGenerics import InfoBarNotifications from Tools import Notifications from Screen import Screen from Screens.MessageBox import MessageBox import Screens.Standby class JobView(InfoBarNotifications, Screen, ConfigListScreen): def __init__(self, session, job, parent=None, cancelable=True, backgroundable=True, afterEventChangeable=True): Screen.__init__(self, session, parent) InfoBarNotifications.__init__(self) ConfigListScreen.__init__(self, []) self.parent = parent self.job = job self.setTitle(_("Job overview")) self["job_name"] = StaticText(job.name) self["job_progress"] = Progress() self["job_task"] = StaticText() self["summary_job_name"] = StaticText(job.name) self["summary_job_progress"] = Progress() self["summary_job_task"] = StaticText() self["job_status"] = StaticText() self.cancelable = cancelable self.backgroundable = backgroundable self["key_green"] = StaticText("") if self.cancelable: self["key_red"] = StaticText(_("Cancel")) else: self["key_red"] = StaticText("") if self.backgroundable: self["key_blue"] = StaticText(_("Background")) else: self["key_blue"] = StaticText("") self.onShow.append(self.windowShow) self.onHide.append(self.windowHide) self["setupActions"] = ActionMap(["ColorActions", "SetupActions"], { "green": self.ok, "red": self.abort, "blue": self.background, "cancel": self.abort, "ok": self.ok, }, -2) self.settings = ConfigSubsection() if SystemInfo["DeepstandbySupport"]: shutdownString = _("go to deep standby") else: shutdownString = _("shut down") self.settings.afterEvent = ConfigSelection(choices=[("nothing", _("do nothing")), ("close", _("Close")), ("standby", _("go to standby")), ("deepstandby", shutdownString)], default=self.job.afterEvent or "nothing") self.job.afterEvent = self.settings.afterEvent.getValue() self.afterEventChangeable = afterEventChangeable self.setupList() self.state_changed() def setupList(self): if self.afterEventChangeable: self["config"].setList([getConfigListEntry(_("After event"), self.settings.afterEvent)]) else: self["config"].hide() self.job.afterEvent = self.settings.afterEvent.getValue() def keyLeft(self): ConfigListScreen.keyLeft(self) self.setupList() def keyRight(self): ConfigListScreen.keyRight(self) self.setupList() def windowShow(self): job_manager.visible = True self.job.state_changed.append(self.state_changed) def windowHide(self): job_manager.visible = False if len(self.job.state_changed) > 0: self.job.state_changed.remove(self.state_changed) def state_changed(self): j = self.job self["job_progress"].range = j.end self["summary_job_progress"].range = j.end self["job_progress"].value = j.progress self["summary_job_progress"].value = j.progress #print "JobView::state_changed:", j.end, j.progress self["job_status"].text = j.getStatustext() if j.status == j.IN_PROGRESS: self["job_task"].text = j.tasks[j.current_task].name self["summary_job_task"].text = j.tasks[j.current_task].name else: self["job_task"].text = "" self["summary_job_task"].text = j.getStatustext() if j.status in (j.FINISHED, j.FAILED): self.performAfterEvent() self.backgroundable = False self["key_blue"].setText("") if j.status == j.FINISHED: self["key_green"].setText(_("OK")) self.cancelable = False self["key_red"].setText("") elif j.status == j.FAILED: self.cancelable = True self["key_red"].setText(_("Cancel")) def background(self): if self.backgroundable: self.close(True) def ok(self): if self.job.status in (self.job.FINISHED, self.job.FAILED): self.close(False) else: self.background() def abort(self): if self.job.status == self.job.NOT_STARTED: job_manager.active_jobs.remove(self.job) self.close(False) elif self.job.status == self.job.IN_PROGRESS and self.cancelable: self.job.cancel() else: self.close(False) def performAfterEvent(self): self["config"].hide() if self.settings.afterEvent.getValue() == "nothing": return elif self.settings.afterEvent.getValue() == "close" and self.job.status == self.job.FINISHED: self.close(False) if self.settings.afterEvent.getValue() == "deepstandby": if not Screens.Standby.inTryQuitMainloop: Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A sleep timer wants to shut down\nyour receiver. Shutdown now?"), timeout=20) elif self.settings.afterEvent.getValue() == "standby": if not Screens.Standby.inStandby: Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A sleep timer wants to set your\nreceiver to standby. Do that now?"), timeout=20) def checkNotifications(self): InfoBarNotifications.checkNotifications(self) if not Notifications.notifications: if self.settings.afterEvent.getValue() == "close" and self.job.status == self.job.FAILED: self.close(False) def sendStandbyNotification(self, answer): if answer: Notifications.AddNotification(Screens.Standby.Standby) def sendTryQuitMainloopNotification(self, answer): if answer: Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
gpl-2.0
gusai-francelabs/datafari
windows/python/Lib/test/test_resource.py
45
4870
import unittest from test import test_support import time resource = test_support.import_module('resource') # This test is checking a few specific problem spots with the resource module. class ResourceTest(unittest.TestCase): def test_args(self): self.assertRaises(TypeError, resource.getrlimit) self.assertRaises(TypeError, resource.getrlimit, 42, 42) self.assertRaises(TypeError, resource.setrlimit) self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42) def test_fsize_ismax(self): try: (cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE) except AttributeError: self.skipTest('RLIMIT_FSIZE not available') # RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big # number on a platform with large file support. On these platforms, # we need to test that the get/setrlimit functions properly convert # the number to a C long long and that the conversion doesn't raise # an error. self.assertEqual(resource.RLIM_INFINITY, max) resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max)) def test_fsize_enforced(self): try: (cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE) except AttributeError: self.skipTest('RLIMIT_FSIZE not available') # Check to see what happens when the RLIMIT_FSIZE is small. Some # versions of Python were terminated by an uncaught SIGXFSZ, but # pythonrun.c has been fixed to ignore that exception. If so, the # write() should return EFBIG when the limit is exceeded. # At least one platform has an unlimited RLIMIT_FSIZE and attempts # to change it raise ValueError instead. try: try: resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max)) limit_set = True except ValueError: limit_set = False f = open(test_support.TESTFN, "wb") try: f.write("X" * 1024) try: f.write("Y") f.flush() # On some systems (e.g., Ubuntu on hppa) the flush() # doesn't always cause the exception, but the close() # does eventually. Try flushing several times in # an attempt to ensure the file is really synced and # the exception raised. for i in range(5): time.sleep(.1) f.flush() except IOError: if not limit_set: raise if limit_set: # Close will attempt to flush the byte we wrote # Restore limit first to avoid getting a spurious error resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max)) finally: f.close() finally: if limit_set: resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max)) test_support.unlink(test_support.TESTFN) def test_fsize_toobig(self): # Be sure that setrlimit is checking for really large values too_big = 10L**50 try: (cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE) except AttributeError: self.skipTest('RLIMIT_FSIZE not available') try: resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max)) except (OverflowError, ValueError): pass try: resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big)) except (OverflowError, ValueError): pass def test_getrusage(self): self.assertRaises(TypeError, resource.getrusage) self.assertRaises(TypeError, resource.getrusage, 42, 42) usageself = resource.getrusage(resource.RUSAGE_SELF) usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN) # May not be available on all systems. try: usageboth = resource.getrusage(resource.RUSAGE_BOTH) except (ValueError, AttributeError): pass # Issue 6083: Reference counting bug def test_setrusage_refcount(self): try: limits = resource.getrlimit(resource.RLIMIT_CPU) except AttributeError: self.skipTest('RLIMIT_CPU not available') class BadSequence: def __len__(self): return 2 def __getitem__(self, key): if key in (0, 1): return len(tuple(range(1000000))) raise IndexError resource.setrlimit(resource.RLIMIT_CPU, BadSequence()) def test_main(verbose=None): test_support.run_unittest(ResourceTest) if __name__ == "__main__": test_main()
apache-2.0
adaur/SickRage
lib/sqlalchemy/dialects/postgresql/hstore.py
78
11344
# postgresql/hstore.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re from .base import ARRAY, ischema_names from ... import types as sqltypes from ...sql import functions as sqlfunc from ...sql.operators import custom_op from ... import util __all__ = ('HSTORE', 'hstore') # My best guess at the parsing rules of hstore literals, since no formal # grammar is given. This is mostly reverse engineered from PG's input parser # behavior. HSTORE_PAIR_RE = re.compile(r""" ( "(?P<key> (\\ . | [^"])* )" # Quoted key ) [ ]* => [ ]* # Pair operator, optional adjoining whitespace ( (?P<value_null> NULL ) # NULL value | "(?P<value> (\\ . | [^"])* )" # Quoted value ) """, re.VERBOSE) HSTORE_DELIMITER_RE = re.compile(r""" [ ]* , [ ]* """, re.VERBOSE) def _parse_error(hstore_str, pos): """format an unmarshalling error.""" ctx = 20 hslen = len(hstore_str) parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)] residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)] if len(parsed_tail) > ctx: parsed_tail = '[...]' + parsed_tail[1:] if len(residual) > ctx: residual = residual[:-1] + '[...]' return "After %r, could not parse residual at position %d: %r" % ( parsed_tail, pos, residual) def _parse_hstore(hstore_str): """Parse an hstore from it's literal string representation. Attempts to approximate PG's hstore input parsing rules as closely as possible. Although currently this is not strictly necessary, since the current implementation of hstore's output syntax is stricter than what it accepts as input, the documentation makes no guarantees that will always be the case. """ result = {} pos = 0 pair_match = HSTORE_PAIR_RE.match(hstore_str) while pair_match is not None: key = pair_match.group('key').replace(r'\"', '"').replace("\\\\", "\\") if pair_match.group('value_null'): value = None else: value = pair_match.group('value').replace(r'\"', '"').replace("\\\\", "\\") result[key] = value pos += pair_match.end() delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:]) if delim_match is not None: pos += delim_match.end() pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:]) if pos != len(hstore_str): raise ValueError(_parse_error(hstore_str, pos)) return result def _serialize_hstore(val): """Serialize a dictionary into an hstore literal. Keys and values must both be strings (except None for values). """ def esc(s, position): if position == 'value' and s is None: return 'NULL' elif isinstance(s, util.string_types): return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"') else: raise ValueError("%r in %s position is not a string." % (s, position)) return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value')) for k, v in val.items()) class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine): """Represent the Postgresql HSTORE type. The :class:`.HSTORE` type stores dictionaries containing strings, e.g.:: data_table = Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('data', HSTORE) ) with engine.connect() as conn: conn.execute( data_table.insert(), data = {"key1": "value1", "key2": "value2"} ) :class:`.HSTORE` provides for a wide range of operations, including: * Index operations:: data_table.c.data['some key'] == 'some value' * Containment operations:: data_table.c.data.has_key('some key') data_table.c.data.has_all(['one', 'two', 'three']) * Concatenation:: data_table.c.data + {"k1": "v1"} For a full list of special methods see :class:`.HSTORE.comparator_factory`. For usage with the SQLAlchemy ORM, it may be desirable to combine the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now part of the :mod:`sqlalchemy.ext.mutable` extension. This extension will allow "in-place" changes to the dictionary, e.g. addition of new keys or replacement/removal of existing keys to/from the current dictionary, to produce events which will be detected by the unit of work:: from sqlalchemy.ext.mutable import MutableDict class MyClass(Base): __tablename__ = 'data_table' id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(HSTORE)) my_object = session.query(MyClass).one() # in-place mutation, requires Mutable extension # in order for the ORM to detect my_object.data['some_key'] = 'some value' session.commit() When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM will not be alerted to any changes to the contents of an existing dictionary, unless that dictionary value is re-assigned to the HSTORE-attribute itself, thus generating a change event. .. versionadded:: 0.8 .. seealso:: :class:`.hstore` - render the Postgresql ``hstore()`` function. """ __visit_name__ = 'HSTORE' class comparator_factory(sqltypes.Concatenable.Comparator): """Define comparison operations for :class:`.HSTORE`.""" def has_key(self, other): """Boolean expression. Test for presence of a key. Note that the key may be a SQLA expression. """ return self.expr.op('?')(other) def has_all(self, other): """Boolean expression. Test for presence of all keys in the PG array. """ return self.expr.op('?&')(other) def has_any(self, other): """Boolean expression. Test for presence of any key in the PG array. """ return self.expr.op('?|')(other) def defined(self, key): """Boolean expression. Test for presence of a non-NULL value for the key. Note that the key may be a SQLA expression. """ return _HStoreDefinedFunction(self.expr, key) def contains(self, other, **kwargs): """Boolean expression. Test if keys are a superset of the keys of the argument hstore expression. """ return self.expr.op('@>')(other) def contained_by(self, other): """Boolean expression. Test if keys are a proper subset of the keys of the argument hstore expression. """ return self.expr.op('<@')(other) def __getitem__(self, other): """Text expression. Get the value at a given key. Note that the key may be a SQLA expression. """ return self.expr.op('->', precedence=5)(other) def delete(self, key): """HStore expression. Returns the contents of this hstore with the given key deleted. Note that the key may be a SQLA expression. """ if isinstance(key, dict): key = _serialize_hstore(key) return _HStoreDeleteFunction(self.expr, key) def slice(self, array): """HStore expression. Returns a subset of an hstore defined by array of keys. """ return _HStoreSliceFunction(self.expr, array) def keys(self): """Text array expression. Returns array of keys.""" return _HStoreKeysFunction(self.expr) def vals(self): """Text array expression. Returns array of values.""" return _HStoreValsFunction(self.expr) def array(self): """Text array expression. Returns array of alternating keys and values. """ return _HStoreArrayFunction(self.expr) def matrix(self): """Text array expression. Returns array of [key, value] pairs.""" return _HStoreMatrixFunction(self.expr) def _adapt_expression(self, op, other_comparator): if isinstance(op, custom_op): if op.opstring in ['?', '?&', '?|', '@>', '<@']: return op, sqltypes.Boolean elif op.opstring == '->': return op, sqltypes.Text return sqltypes.Concatenable.Comparator.\ _adapt_expression(self, op, other_comparator) def bind_processor(self, dialect): if util.py2k: encoding = dialect.encoding def process(value): if isinstance(value, dict): return _serialize_hstore(value).encode(encoding) else: return value else: def process(value): if isinstance(value, dict): return _serialize_hstore(value) else: return value return process def result_processor(self, dialect, coltype): if util.py2k: encoding = dialect.encoding def process(value): if value is not None: return _parse_hstore(value.decode(encoding)) else: return value else: def process(value): if value is not None: return _parse_hstore(value) else: return value return process ischema_names['hstore'] = HSTORE class hstore(sqlfunc.GenericFunction): """Construct an hstore value within a SQL expression using the Postgresql ``hstore()`` function. The :class:`.hstore` function accepts one or two arguments as described in the Postgresql documentation. E.g.:: from sqlalchemy.dialects.postgresql import array, hstore select([hstore('key1', 'value1')]) select([ hstore( array(['key1', 'key2', 'key3']), array(['value1', 'value2', 'value3']) ) ]) .. versionadded:: 0.8 .. seealso:: :class:`.HSTORE` - the Postgresql ``HSTORE`` datatype. """ type = HSTORE name = 'hstore' class _HStoreDefinedFunction(sqlfunc.GenericFunction): type = sqltypes.Boolean name = 'defined' class _HStoreDeleteFunction(sqlfunc.GenericFunction): type = HSTORE name = 'delete' class _HStoreSliceFunction(sqlfunc.GenericFunction): type = HSTORE name = 'slice' class _HStoreKeysFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'akeys' class _HStoreValsFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'avals' class _HStoreArrayFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'hstore_to_array' class _HStoreMatrixFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'hstore_to_matrix'
gpl-3.0
initNirvana/Easyphotos
env/lib/python3.4/site-packages/pip/_vendor/packaging/version.py
451
11884
# Copyright 2014 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import collections import itertools import re from ._structures import Infinity __all__ = [ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" ] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"], ) def parse(version): """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is a valid PEP 440 version or a legacy version. """ try: return Version(version) except InvalidVersion: return LegacyVersion(version) class InvalidVersion(ValueError): """ An invalid version was found, users should refer to PEP 440. """ class _BaseVersion(object): def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, _BaseVersion): return NotImplemented return method(self._key, other._key) class LegacyVersion(_BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return "<LegacyVersion({0})>".format(repr(str(self))) @property def public(self): return self._version @property def base_version(self): return self._version @property def local(self): return None @property def is_prerelease(self): return False @property def is_postrelease(self): return False _legacy_version_component_re = re.compile( r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, ) _legacy_version_replacement_map = { "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", } def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final" def _legacy_cmpkey(version): # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, # which uses the defacto standard originally implemented by setuptools, # as before all PEP 440 versions. epoch = -1 # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. parts = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag if part < "*final": while parts and parts[-1] == "*final-": parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == "00000000": parts.pop() parts.append(part) parts = tuple(parts) return epoch, parts # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse VERSION_PATTERN = r""" v? (?: (?:(?P<epoch>[0-9]+)!)? # epoch (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment (?P<pre> # pre-release [-_\.]? (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) [-_\.]? (?P<pre_n>[0-9]+)? )? (?P<post> # post release (?:-(?P<post_n1>[0-9]+)) | (?: [-_\.]? (?P<post_l>post|rev|r) [-_\.]? (?P<post_n2>[0-9]+)? ) )? (?P<dev> # dev release [-_\.]? (?P<dev_l>dev) [-_\.]? (?P<dev_n>[0-9]+)? )? ) (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version """ class Version(_BaseVersion): _regex = re.compile( r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE, ) def __init__(self, version): # Validate the version and parse it into pieces match = self._regex.search(version) if not match: raise InvalidVersion("Invalid version: '{0}'".format(version)) # Store the parsed out pieces of the version self._version = _Version( epoch=int(match.group("epoch")) if match.group("epoch") else 0, release=tuple(int(i) for i in match.group("release").split(".")), pre=_parse_letter_version( match.group("pre_l"), match.group("pre_n"), ), post=_parse_letter_version( match.group("post_l"), match.group("post_n1") or match.group("post_n2"), ), dev=_parse_letter_version( match.group("dev_l"), match.group("dev_n"), ), local=_parse_local_version(match.group("local")), ) # Generate a key which will be used for sorting self._key = _cmpkey( self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local, ) def __repr__(self): return "<Version({0})>".format(repr(str(self))) def __str__(self): parts = [] # Epoch if self._version.epoch != 0: parts.append("{0}!".format(self._version.epoch)) # Release segment parts.append(".".join(str(x) for x in self._version.release)) # Pre-release if self._version.pre is not None: parts.append("".join(str(x) for x in self._version.pre)) # Post-release if self._version.post is not None: parts.append(".post{0}".format(self._version.post[1])) # Development release if self._version.dev is not None: parts.append(".dev{0}".format(self._version.dev[1])) # Local version segment if self._version.local is not None: parts.append( "+{0}".format(".".join(str(x) for x in self._version.local)) ) return "".join(parts) @property def public(self): return str(self).split("+", 1)[0] @property def base_version(self): parts = [] # Epoch if self._version.epoch != 0: parts.append("{0}!".format(self._version.epoch)) # Release segment parts.append(".".join(str(x) for x in self._version.release)) return "".join(parts) @property def local(self): version_string = str(self) if "+" in version_string: return version_string.split("+", 1)[1] @property def is_prerelease(self): return bool(self._version.dev or self._version.pre) @property def is_postrelease(self): return bool(self._version.post) def _parse_letter_version(letter, number): if letter: # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. if number is None: number = 0 # We normalize any letters to their lower case form letter = letter.lower() # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. if letter == "alpha": letter = "a" elif letter == "beta": letter = "b" elif letter in ["c", "pre", "preview"]: letter = "rc" return letter, int(number) if not letter and number: # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) letter = "post" return letter, int(number) _local_version_seperators = re.compile(r"[\._-]") def _parse_local_version(local): """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ if local is not None: return tuple( part.lower() if not part.isdigit() else int(part) for part in _local_version_seperators.split(local) ) def _cmpkey(epoch, release, pre, post, dev, local): # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. release = tuple( reversed(list( itertools.dropwhile( lambda x: x == 0, reversed(release), ) )) ) # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. # We'll do this by abusing the pre segment, but we _only_ want to do this # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: pre = -Infinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: pre = Infinity # Versions without a post segment should sort before those with one. if post is None: post = -Infinity # Versions without a development segment should sort after those with one. if dev is None: dev = Infinity if local is None: # Versions without a local segment should sort before those with one. local = -Infinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly local = tuple( (i, "") if isinstance(i, int) else (-Infinity, i) for i in local ) return epoch, release, pre, post, dev, local
mit
DoubleNegativeVisualEffects/cortex
test/IECoreHoudini/FromHoudiniPointsConverter.py
4
39834
########################################################################## # # Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios), # its affiliates and/or its licensors. # # Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import hou import IECore import IECoreHoudini import unittest import os class TestFromHoudiniPointsConverter( IECoreHoudini.TestCase ) : def createBox( self ) : obj = hou.node("/obj") geo = obj.createNode("geo", run_init_scripts=False) box = geo.createNode( "box" ) return box def createTorus( self ) : obj = hou.node("/obj") geo = obj.createNode("geo", run_init_scripts=False) torus = geo.createNode( "torus" ) return torus def createPoints( self ) : obj = hou.node("/obj") geo = obj.createNode("geo", run_init_scripts=False) box = geo.createNode( "box" ) facet = geo.createNode( "facet" ) facet.parm("postnml").set(True) points = geo.createNode( "scatter" ) facet.setInput( 0, box ) points.setInput( 0, facet ) return points # creates a converter def testCreateConverter( self ) : box = self.createBox() converter = IECoreHoudini.FromHoudiniPointsConverter( box ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) return converter # creates a converter def testFactory( self ) : box = self.createBox() converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) ) converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.PointsPrimitive ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.Parameter ) self.assertEqual( converter, None ) self.failUnless( IECore.TypeId.PointsPrimitive in IECoreHoudini.FromHoudiniGeometryConverter.supportedTypes() ) converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( IECore.TypeId.PointsPrimitive ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( [ IECore.TypeId.PointsPrimitive ] ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) # performs geometry conversion def testDoConversion( self ) : converter = self.testCreateConverter() result = converter.convert() self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) ) def testConvertFromHOMGeo( self ) : geo = self.createPoints().geometry() converter = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo ) self.failUnless( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) result = converter.convert() self.failUnless( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) ) converter2 = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo, IECore.TypeId.PointsPrimitive ) self.failUnless( converter2.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) # convert a mesh def testConvertMesh( self ) : torus = self.createTorus() converter = IECoreHoudini.FromHoudiniPointsConverter( torus ) result = converter.convert() self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() ) bbox = result.bound() self.assertEqual( bbox.min.x, -1.5 ) self.assertEqual( bbox.max.x, 1.5 ) self.assertEqual( result.numPoints, 100 ) for i in range( result.numPoints ) : self.assert_( result["P"].data[i].x >= bbox.min.x ) self.assert_( result["P"].data[i].x <= bbox.max.x ) # test prim/vertex attributes def testConvertPrimVertAttributes( self ) : torus = self.createTorus() geo = torus.parent() # add vertex normals facet = geo.createNode( "facet", node_name = "add_point_normals" ) facet.parm("postnml").set(True) facet.setInput( 0, torus ) # add a primitive colour attributes primcol = geo.createNode( "primitive", node_name = "prim_colour" ) primcol.parm("doclr").set(1) primcol.parm("diffr").setExpression("rand($PR)") primcol.parm("diffg").setExpression("rand($PR+1)") primcol.parm("diffb").setExpression("rand($PR+2)") primcol.setInput( 0, facet ) # add a load of different vertex attributes vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True ) vert_f1.parm("name").set("vert_f1") vert_f1.parm("class").set(3) vert_f1.parm("value1").setExpression("$VTX*0.1") vert_f1.setInput( 0, primcol ) vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True ) vert_f2.parm("name").set("vert_f2") vert_f2.parm("class").set(3) vert_f2.parm("size").set(2) vert_f2.parm("value1").setExpression("$VTX*0.1") vert_f2.parm("value2").setExpression("$VTX*0.1") vert_f2.setInput( 0, vert_f1 ) vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True ) vert_f3.parm("name").set("vert_f3") vert_f3.parm("class").set(3) vert_f3.parm("size").set(3) vert_f3.parm("value1").setExpression("$VTX*0.1") vert_f3.parm("value2").setExpression("$VTX*0.1") vert_f3.parm("value3").setExpression("$VTX*0.1") vert_f3.setInput( 0, vert_f2 ) vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True ) vert_i1.parm("name").set("vert_i1") vert_i1.parm("class").set(3) vert_i1.parm("type").set(1) vert_i1.parm("value1").setExpression("$VTX*0.1") vert_i1.setInput( 0, vert_f3 ) vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True ) vert_i2.parm("name").set("vert_i2") vert_i2.parm("class").set(3) vert_i2.parm("type").set(1) vert_i2.parm("size").set(2) vert_i2.parm("value1").setExpression("$VTX*0.1") vert_i2.parm("value2").setExpression("$VTX*0.1") vert_i2.setInput( 0, vert_i1 ) vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True ) vert_i3.parm("name").set("vert_i3") vert_i3.parm("class").set(3) vert_i3.parm("type").set(1) vert_i3.parm("size").set(3) vert_i3.parm("value1").setExpression("$VTX*0.1") vert_i3.parm("value2").setExpression("$VTX*0.1") vert_i3.parm("value3").setExpression("$VTX*0.1") vert_i3.setInput( 0, vert_i2 ) vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True ) vert_v3f.parm("name").set("vert_v3f") vert_v3f.parm("class").set(3) vert_v3f.parm("type").set(2) vert_v3f.parm("value1").setExpression("$VTX*0.1") vert_v3f.parm("value2").setExpression("$VTX*0.1") vert_v3f.parm("value3").setExpression("$VTX*0.1") vert_v3f.setInput( 0, vert_i3 ) detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True ) detail_i3.parm("name").set("detail_i3") detail_i3.parm("class").set(0) detail_i3.parm("type").set(1) detail_i3.parm("size").set(3) detail_i3.parm("value1").set(123) detail_i3.parm("value2").set(456.789) # can we catch it out with a float? detail_i3.parm("value3").set(789) detail_i3.setInput( 0, vert_v3f ) out = geo.createNode( "null", node_name="OUT" ) out.setInput( 0, detail_i3 ) # convert it all converter = IECoreHoudini.FromHoudiniPointsConverter( out ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) result = converter.convert() self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) ) bbox = result.bound() self.assertEqual( bbox.min.x, -1.5 ) self.assertEqual( bbox.max.x, 1.5 ) self.assertEqual( result.numPoints, 100 ) for i in range( result.numPoints ) : self.assert_( result["P"].data[i].x >= bbox.min.x ) self.assert_( result["P"].data[i].x <= bbox.max.x ) # test point attributes self.assert_( "P" in result ) self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData ) self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point ) self.assert_( "N" in result ) self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData ) self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal ) # test detail attributes self.assert_( "detail_i3" in result ) self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData ) self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assertEqual( result['detail_i3'].data.value.x, 123 ) self.assertEqual( result['detail_i3'].data.value.y, 456 ) self.assertEqual( result['detail_i3'].data.value.z, 789 ) # test primitive attributes self.assert_( "Cd" not in result ) # test vertex attributes attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f" ] for a in attrs : self.assert_( a not in result ) self.assert_( result.arePrimitiveVariablesValid() ) # test prim/vertex attributes on a single primitive (mesh) def testConvertMeshPrimVertAttributes( self ) : torus = self.createTorus() torus.parm( "type" ).set( 1 ) geo = torus.parent() # add vertex normals facet = geo.createNode( "facet", node_name = "add_point_normals" ) facet.parm("postnml").set(True) facet.setInput( 0, torus ) # add a primitive colour attributes primcol = geo.createNode( "primitive", node_name = "prim_colour" ) primcol.parm("doclr").set(1) primcol.parm("diffr").setExpression("rand($PR)") primcol.parm("diffg").setExpression("rand($PR+1)") primcol.parm("diffb").setExpression("rand($PR+2)") primcol.setInput( 0, facet ) # add a load of different vertex attributes vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True ) vert_f1.parm("name").set("vert_f1") vert_f1.parm("class").set(3) vert_f1.parm("value1").setExpression("$VTX*0.1") vert_f1.setInput( 0, primcol ) vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True ) vert_f2.parm("name").set("vert_f2") vert_f2.parm("class").set(3) vert_f2.parm("size").set(2) vert_f2.parm("value1").setExpression("$VTX*0.1") vert_f2.parm("value2").setExpression("$VTX*0.1") vert_f2.setInput( 0, vert_f1 ) vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True ) vert_f3.parm("name").set("vert_f3") vert_f3.parm("class").set(3) vert_f3.parm("size").set(3) vert_f3.parm("value1").setExpression("$VTX*0.1") vert_f3.parm("value2").setExpression("$VTX*0.1") vert_f3.parm("value3").setExpression("$VTX*0.1") vert_f3.setInput( 0, vert_f2 ) vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True ) vert_i1.parm("name").set("vert_i1") vert_i1.parm("class").set(3) vert_i1.parm("type").set(1) vert_i1.parm("value1").setExpression("$VTX*0.1") vert_i1.setInput( 0, vert_f3 ) vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True ) vert_i2.parm("name").set("vert_i2") vert_i2.parm("class").set(3) vert_i2.parm("type").set(1) vert_i2.parm("size").set(2) vert_i2.parm("value1").setExpression("$VTX*0.1") vert_i2.parm("value2").setExpression("$VTX*0.1") vert_i2.setInput( 0, vert_i1 ) vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True ) vert_i3.parm("name").set("vert_i3") vert_i3.parm("class").set(3) vert_i3.parm("type").set(1) vert_i3.parm("size").set(3) vert_i3.parm("value1").setExpression("$VTX*0.1") vert_i3.parm("value2").setExpression("$VTX*0.1") vert_i3.parm("value3").setExpression("$VTX*0.1") vert_i3.setInput( 0, vert_i2 ) vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True ) vert_v3f.parm("name").set("vert_v3f") vert_v3f.parm("class").set(3) vert_v3f.parm("type").set(2) vert_v3f.parm("value1").setExpression("$VTX*0.1") vert_v3f.parm("value2").setExpression("$VTX*0.1") vert_v3f.parm("value3").setExpression("$VTX*0.1") vert_v3f.setInput( 0, vert_i3 ) vertString = geo.createNode( "attribcreate", node_name = "vertString", exact_type_name=True ) vertString.parm("name").set("vertString") vertString.parm("class").set(3) vertString.parm("type").set(3) vertString.parm("string").set("string $VTX!") vertString.setInput( 0, vert_v3f ) detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True ) detail_i3.parm("name").set("detail_i3") detail_i3.parm("class").set(0) detail_i3.parm("type").set(1) detail_i3.parm("size").set(3) detail_i3.parm("value1").set(123) detail_i3.parm("value2").set(456.789) # can we catch it out with a float? detail_i3.parm("value3").set(789) detail_i3.setInput( 0, vertString ) out = geo.createNode( "null", node_name="OUT" ) out.setInput( 0, detail_i3 ) # convert it all converter = IECoreHoudini.FromHoudiniPointsConverter( out ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) result = converter.convert() self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) ) bbox = result.bound() self.assertEqual( bbox.min.x, -1.5 ) self.assertEqual( bbox.max.x, 1.5 ) self.assertEqual( result.numPoints, 100 ) for i in range( result.numPoints ) : self.assert_( result["P"].data[i].x >= bbox.min.x ) self.assert_( result["P"].data[i].x <= bbox.max.x ) # test point attributes self.assert_( "P" in result ) self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData ) self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point ) self.assert_( "N" in result ) self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData ) self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal ) # test detail attributes self.assert_( "detail_i3" in result ) self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData ) self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assertEqual( result['detail_i3'].data.value.x, 123 ) self.assertEqual( result['detail_i3'].data.value.y, 456 ) self.assertEqual( result['detail_i3'].data.value.z, 789 ) # test primitive attributes self.assert_( "Cs" in result ) self.assertEqual( result["Cs"].data.typeId(), IECore.TypeId.Color3fVectorData ) self.assertEqual( result["Cs"].interpolation, IECore.PrimitiveVariable.Interpolation.Uniform ) self.assertEqual( result["Cs"].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) : for j in range( 0, 3 ) : self.assert_( result["Cs"].data[i][j] >= 0.0 ) self.assert_( result["Cs"].data[i][j] <= 1.0 ) # test vertex attributes attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f", "vertStringIndices" ] for a in attrs : self.assert_( a in result ) self.assertEqual( result[a].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assertEqual( result[a].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) self.assertEqual( result["vert_f1"].data.typeId(), IECore.FloatVectorData.staticTypeId() ) self.assertEqual( result["vert_f2"].data.typeId(), IECore.V2fVectorData.staticTypeId() ) self.assertEqual( result["vert_f3"].data.typeId(), IECore.V3fVectorData.staticTypeId() ) for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) : for j in range( 0, 3 ) : self.assert_( result["vert_f3"].data[i][j] >= 0.0 ) self.assert_( result["vert_f3"].data[i][j] < 400.1 ) self.assertEqual( result["vert_i1"].data.typeId(), IECore.IntVectorData.staticTypeId() ) self.assertEqual( result["vert_i2"].data.typeId(), IECore.V2iVectorData.staticTypeId() ) self.assertEqual( result["vert_i3"].data.typeId(), IECore.V3iVectorData.staticTypeId() ) for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) : for j in range( 0, 3 ) : self.assert_( result["vert_i3"].data[i][j] < 10 ) self.assertEqual( result["vert_v3f"].data.typeId(), IECore.V3fVectorData.staticTypeId() ) self.assertEqual( result["vertString"].data.typeId(), IECore.TypeId.StringVectorData ) self.assertEqual( result["vertString"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assertEqual( result["vertStringIndices"].data.typeId(), IECore.TypeId.IntVectorData ) for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) : self.assertEqual( result["vertString"].data[i], "string %d!" % i ) self.assertEqual( result["vertStringIndices"].data[i], i ) self.assert_( result.arePrimitiveVariablesValid() ) # convert some points def testConvertPoints( self ) : points = self.createPoints() converter = IECoreHoudini.FromHoudiniPointsConverter( points ) result = converter.convert() self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() ) self.assertEqual( points.parm('npts').eval(), result.numPoints ) self.assert_( "P" in result.keys() ) self.assert_( "N" in result.keys() ) self.assert_( result.arePrimitiveVariablesValid() ) # simple attribute conversion def testSetupAttributes( self ) : points = self.createPoints() geo = points.parent() attr = geo.createNode( "attribcreate", exact_type_name=True ) attr.setInput( 0, points ) attr.parm("name").set( "test_attribute" ) attr.parm("type").set(0) # float attr.parm("size").set(1) # 1 element attr.parm("value1").set(123.456) attr.parm("value2").set(654.321) converter = IECoreHoudini.FromHoudiniPointsConverter( attr ) result = converter.convert() self.assert_( "test_attribute" in result.keys() ) self.assertEqual( result["test_attribute"].data.size(), points.parm('npts').eval() ) self.assert_( result.arePrimitiveVariablesValid() ) return attr # testing point attributes and types def testPointAttributes( self ) : attr = self.testSetupAttributes() result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatVectorData ) self.assert_( result["test_attribute"].data[0] > 123.0 ) self.assertEqual( result["test_attribute"].data.size(), 5000 ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(1) # integer result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntVectorData ) self.assertEqual( result["test_attribute"].data[0], 123 ) self.assertEqual( result["test_attribute"].data.size(), 5000 ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(0) # float attr.parm("size").set(2) # 2 elementS attr.parm("value2").set(456.789) result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fVectorData ) self.assertEqual( result["test_attribute"].data[0], IECore.V2f( 123.456, 456.789 ) ) self.assertEqual( result["test_attribute"].data.size(), 5000 ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(1) # int result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iVectorData ) self.assertEqual( result["test_attribute"].data[0], IECore.V2i( 123, 456 ) ) self.assertEqual( result["test_attribute"].data.size(), 5000 ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(0) # float attr.parm("size").set(3) # 3 elements attr.parm("value3").set(999.999) result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fVectorData ) self.assertEqual( result["test_attribute"].data[0],IECore.V3f( 123.456, 456.789, 999.999 ) ) self.assertEqual( result["test_attribute"].data.size(), 5000 ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(1) # int result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iVectorData ) self.assertEqual( result["test_attribute"].data[0], IECore.V3i( 123, 456, 999 ) ) self.assertEqual( result["test_attribute"].data.size(), 5000 ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set( 3 ) # string attr.parm( "string" ).set( "string $PT!" ) result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringVectorData ) self.assertEqual( result["test_attribute"].data[10], "string 10!" ) self.assertEqual( result["test_attribute"].data.size(), 5000 ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assertEqual( result["test_attributeIndices"].data.typeId(), IECore.TypeId.IntVectorData ) self.assertEqual( result["test_attributeIndices"].data[10], 10 ) self.assertEqual( result["test_attributeIndices"].data.size(), 5000 ) self.assertEqual( result["test_attributeIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assert_( result.arePrimitiveVariablesValid() ) # testing detail attributes and types def testDetailAttributes( self ) : attr = self.testSetupAttributes() attr.parm("class").set(0) # detail attribute result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() attr.parm("value1").set(123.456) self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatData ) self.assert_( result["test_attribute"].data > IECore.FloatData( 123.0 ) ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(1) # integer result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntData ) self.assertEqual( result["test_attribute"].data, IECore.IntData( 123 ) ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(0) # float attr.parm("size").set(2) # 2 elementS attr.parm("value2").set(456.789) result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fData ) self.assertEqual( result["test_attribute"].data.value, IECore.V2f( 123.456, 456.789 ) ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(1) # int result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iData ) self.assertEqual( result["test_attribute"].data.value, IECore.V2i( 123, 456 ) ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(0) # float attr.parm("size").set(3) # 3 elements attr.parm("value3").set(999.999) result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fData ) self.assertEqual( result["test_attribute"].data.value, IECore.V3f( 123.456, 456.789, 999.999 ) ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set(1) # int result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iData ) self.assertEqual( result["test_attribute"].data.value, IECore.V3i( 123, 456, 999 ) ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assert_( result.arePrimitiveVariablesValid() ) attr.parm("type").set( 3 ) # string attr.parm( "string" ).set( "string!" ) result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert() self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringData ) self.assertEqual( result["test_attribute"].data.value, "string!" ) self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assert_( result.arePrimitiveVariablesValid() ) # testing that float[4] doesn't work! def testFloat4attr( self ) : # we can't deal with float 4's right now attr = self.testSetupAttributes() attr.parm("name").set( "test_attribute" ) attr.parm("size").set(4) # 4 elements per point-attribute converter = IECoreHoudini.FromHoudiniPointsConverter( attr ) result = converter.convert() self.assert_( "test_attribute" not in result.keys() ) # invalid due to being float[4] self.assert_( result.arePrimitiveVariablesValid() ) # testing conversion of animating geometry def testAnimatingGeometry( self ) : obj = hou.node("/obj") geo = obj.createNode("geo", run_init_scripts=False) torus = geo.createNode( "torus" ) facet = geo.createNode( "facet" ) facet.parm("postnml").set(True) mountain = geo.createNode( "mountain" ) mountain.parm("offset1").setExpression( "$FF" ) points = geo.createNode( "scatter" ) facet.setInput( 0, torus ) mountain.setInput( 0, facet ) points.setInput( 0, mountain ) converter = IECoreHoudini.FromHoudiniPointsConverter( points ) hou.setFrame(1) points_1 = converter.convert() hou.setFrame(2) converter = IECoreHoudini.FromHoudiniPointsConverter( points ) points_2 = converter.convert() self.assertNotEqual( points_1["P"].data, points_2["P"].data ) # testing we can handle an object being deleted def testObjectWasDeleted( self ) : obj = hou.node("/obj") geo = obj.createNode("geo", run_init_scripts=False) torus = geo.createNode( "torus" ) converter = IECoreHoudini.FromHoudiniPointsConverter( torus ) g1 = converter.convert() torus.destroy() g2 = converter.convert() self.assertEqual( g2, g1 ) self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniPointsConverter, torus ) ) # testing we can handle an object being deleted def testObjectWasDeletedFactory( self ) : obj = hou.node("/obj") geo = obj.createNode("geo", run_init_scripts=False) torus = geo.createNode( "torus" ) converter = IECoreHoudini.FromHoudiniGeometryConverter.create( torus ) g1 = converter.convert() torus.destroy() g2 = converter.convert() self.assertEqual( g2, g1 ) self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniGeometryConverter.create, torus ) ) # testing converting a Houdini particle primitive with detail and point attribs def testParticlePrimitive( self ) : obj = hou.node("/obj") geo = obj.createNode( "geo", run_init_scripts=False ) popnet = geo.createNode( "popnet" ) location = popnet.createNode( "location" ) detailAttr = popnet.createOutputNode( "attribcreate", exact_type_name=True ) detailAttr.parm("name").set( "float3detail" ) detailAttr.parm("class").set( 0 ) # detail detailAttr.parm("type").set( 0 ) # float detailAttr.parm("size").set( 3 ) # 3 elements detailAttr.parm("value1").set( 1 ) detailAttr.parm("value2").set( 2 ) detailAttr.parm("value3").set( 3 ) pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True ) pointAttr.parm("name").set( "float3point" ) pointAttr.parm("class").set( 2 ) # point pointAttr.parm("type").set( 0 ) # float pointAttr.parm("size").set( 3 ) # 3 elements pointAttr.parm("value1").set( 1 ) pointAttr.parm("value2").set( 2 ) pointAttr.parm("value3").set( 3 ) hou.setFrame( 5 ) converter = IECoreHoudini.FromHoudiniGeometryConverter.create( pointAttr ) self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) points = converter.convert() self.assertEqual( type(points), IECore.PointsPrimitive ) self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 21 ) self.assertEqual( points["float3detail"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assertEqual( type(points["float3detail"].data), IECore.V3fData ) self.assert_( points["float3detail"].data.value.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) ) self.assertEqual( type(points["float3point"].data), IECore.V3fVectorData ) self.assertEqual( points["float3point"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) for p in points["float3point"].data : self.assert_( p.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) ) self.assert_( points.arePrimitiveVariablesValid() ) add = pointAttr.createOutputNode( "add" ) add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points converter = IECoreHoudini.FromHoudiniPointsConverter( add ) points2 = converter.convert() del points['generator'] del points['generatorIndices'] del points['born'] del points['source'] self.assertEqual( points2, points ) def testMultipleParticlePrimitives( self ) : obj = hou.node("/obj") geo = obj.createNode( "geo", run_init_scripts=False ) popnet = geo.createNode( "popnet" ) fireworks = popnet.createNode( "fireworks" ) hou.setFrame( 15 ) converter = IECoreHoudini.FromHoudiniPointsConverter( popnet ) points = converter.convert() self.assertEqual( type(points), IECore.PointsPrimitive ) self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 24 ) self.assertEqual( points["accel"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) self.assertEqual( type(points["accel"].data), IECore.V3fVectorData ) self.assertEqual( points["accel"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector ) self.assertEqual( points["nextid"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant ) self.assertEqual( points["nextid"].data, IECore.IntData( 25 ) ) self.assertTrue( points.arePrimitiveVariablesValid() ) add = popnet.createOutputNode( "add" ) add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points converter = IECoreHoudini.FromHoudiniPointsConverter( add ) points2 = converter.convert() # showing that prim attribs don't get converted because the interpolation size doesn't match self.assertEqual( points2, points ) def testName( self ) : points = self.createPoints() particles = points.createOutputNode( "add" ) particles.parm( "addparticlesystem" ).set( True ) name = particles.createOutputNode( "name" ) name.parm( "name1" ).set( "points" ) box = points.parent().createNode( "box" ) name2 = box.createOutputNode( "name" ) name2.parm( "name1" ).set( "box" ) merge = name.createOutputNode( "merge" ) merge.setInput( 1, name2 ) converter = IECoreHoudini.FromHoudiniPointsConverter( merge ) result = converter.convert() # names are not stored on the object at all self.assertEqual( result.blindData(), IECore.CompoundData() ) self.assertFalse( "name" in result ) self.assertFalse( "nameIndices" in result ) # both shapes were converted as one PointsPrimitive self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5008 ) self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 ) self.assertTrue( result.arePrimitiveVariablesValid() ) converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "points" ) self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) result = converter.convert() # names are not stored on the object at all self.assertEqual( result.blindData(), IECore.CompoundData() ) self.assertFalse( "name" in result ) self.assertFalse( "nameIndices" in result ) # only the named points were converted self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5000 ) self.assertTrue( result.arePrimitiveVariablesValid() ) converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "box", IECore.TypeId.PointsPrimitive ) self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) ) result = converter.convert() # names are not stored on the object at all self.assertEqual( result.blindData(), IECore.CompoundData() ) self.assertFalse( "name" in result ) self.assertFalse( "nameIndices" in result ) # only the named points were converted self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 8 ) self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 ) self.assertTrue( result.arePrimitiveVariablesValid() ) def testAttributeFilter( self ) : points = self.createPoints() particles = points.createOutputNode( "add" ) particles.parm( "addparticlesystem" ).set( True ) # add vertex normals facet = particles.createOutputNode( "facet", node_name = "add_point_normals" ) facet.parm("postnml").set(True) # add a primitive colour attributes primcol = facet.createOutputNode( "primitive", node_name = "prim_colour" ) primcol.parm("doclr").set(1) primcol.parm("diffr").setExpression("rand($PR)") primcol.parm("diffg").setExpression("rand($PR+1)") primcol.parm("diffb").setExpression("rand($PR+2)") detail = primcol.createOutputNode( "attribcreate", node_name = "detail", exact_type_name=True ) detail.parm("name").set("detailAttr") detail.parm("class").set(0) detail.parm("type").set(1) detail.parm("size").set(3) detail.parm("value1").set(123) detail.parm("value2").set(456.789) # can we catch it out with a float? detail.parm("value3").set(789) converter = IECoreHoudini.FromHoudiniPointsConverter( detail ) self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "N", "P", "detailAttr", "varmap" ] ) converter.parameters()["attributeFilter"].setTypedValue( "P" ) self.assertEqual( sorted(converter.convert().keys()), [ "P" ] ) converter.parameters()["attributeFilter"].setTypedValue( "* ^N ^varmap" ) self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P", "detailAttr" ] ) # P must be converted converter.parameters()["attributeFilter"].setTypedValue( "* ^P" ) self.assertTrue( "P" in converter.convert().keys() ) def testStandardAttributeConversion( self ) : points = self.createPoints() color = points.createOutputNode( "color" ) color.parm( "colortype" ).set( 2 ) rest = color.createOutputNode( "rest" ) scale = rest.createOutputNode( "attribcreate" ) scale.parm( "name1" ).set( "pscale" ) scale.parm( "value1v1" ).setExpression( "$PT" ) converter = IECoreHoudini.FromHoudiniPointsConverter( scale ) result = converter.convert() self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "varmap", "width" ] ) self.assertTrue( result.arePrimitiveVariablesValid() ) self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point ) self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point ) self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal ) converter["convertStandardAttributes"].setTypedValue( False ) result = converter.convert() self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest", "varmap" ] ) self.assertTrue( result.arePrimitiveVariablesValid() ) self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point ) self.assertEqual( result["rest"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point ) self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal ) if __name__ == "__main__": unittest.main()
bsd-3-clause
jeenalee/servo
tests/dromaeo/run_dromaeo.py
111
2507
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import subprocess import sys import BaseHTTPServer import SimpleHTTPServer import urlparse import json # Port to run the HTTP server on for Dromaeo. TEST_SERVER_PORT = 8192 # Run servo and print / parse the results for a specific Dromaeo module. def run_servo(servo_exe, tests): url = "http://localhost:{0}/dromaeo/web/index.html?{1}&automated&post_json".format(TEST_SERVER_PORT, tests) args = [servo_exe, url, "-z", "-f"] return subprocess.Popen(args) # Print usage if command line args are incorrect def print_usage(): print("USAGE: {0} tests servo_binary dromaeo_base_dir".format(sys.argv[0])) # Handle the POST at the end class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_POST(self): self.send_response(200) self.end_headers() self.wfile.write("<HTML>POST OK.<BR><BR>") length = int(self.headers.getheader('content-length')) parameters = urlparse.parse_qs(self.rfile.read(length)) self.server.got_post = True self.server.post_data = parameters['data'] def log_message(self, format, *args): return if __name__ == '__main__': if len(sys.argv) == 4: tests = sys.argv[1] servo_exe = sys.argv[2] base_dir = sys.argv[3] os.chdir(base_dir) # Ensure servo binary can be found if not os.path.isfile(servo_exe): print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe)) sys.exit(1) # Start the test server server = BaseHTTPServer.HTTPServer(('', TEST_SERVER_PORT), RequestHandler) print("Testing Dromaeo on Servo!") proc = run_servo(servo_exe, tests) server.got_post = False while not server.got_post: server.handle_request() data = json.loads(server.post_data[0]) n = 0 l = 0 for test in data: n = max(n, len(data[test])) l = max(l, len(test)) print("\n Test{0} | Time".format(" " * (l - len("Test")))) print("-{0}-|-{1}-".format("-" * l, "-" * n)) for test in data: print(" {0}{1} | {2}".format(test, " " * (l - len(test)), data[test])) proc.kill() else: print_usage()
mpl-2.0
fidomason/kbengine
kbe/src/lib/python/Lib/test/test_doctest.py
72
94252
""" Test script for doctest. """ from test import support import doctest import os import sys # NOTE: There are some additional tests relating to interaction with # zipimport in the test_zipimport_support test module. ###################################################################### ## Sample Objects (used by test cases) ###################################################################### def sample_func(v): """ Blah blah >>> print(sample_func(22)) 44 Yee ha! """ return v+v class SampleClass: """ >>> print(1) 1 >>> # comments get ignored. so are empty PS1 and PS2 prompts: >>> ... Multiline example: >>> sc = SampleClass(3) >>> for i in range(10): ... sc = sc.double() ... print(' ', sc.get(), sep='', end='') 6 12 24 48 96 192 384 768 1536 3072 """ def __init__(self, val): """ >>> print(SampleClass(12).get()) 12 """ self.val = val def double(self): """ >>> print(SampleClass(12).double().get()) 24 """ return SampleClass(self.val + self.val) def get(self): """ >>> print(SampleClass(-5).get()) -5 """ return self.val def a_staticmethod(v): """ >>> print(SampleClass.a_staticmethod(10)) 11 """ return v+1 a_staticmethod = staticmethod(a_staticmethod) def a_classmethod(cls, v): """ >>> print(SampleClass.a_classmethod(10)) 12 >>> print(SampleClass(0).a_classmethod(10)) 12 """ return v+2 a_classmethod = classmethod(a_classmethod) a_property = property(get, doc=""" >>> print(SampleClass(22).a_property) 22 """) class NestedClass: """ >>> x = SampleClass.NestedClass(5) >>> y = x.square() >>> print(y.get()) 25 """ def __init__(self, val=0): """ >>> print(SampleClass.NestedClass().get()) 0 """ self.val = val def square(self): return SampleClass.NestedClass(self.val*self.val) def get(self): return self.val class SampleNewStyleClass(object): r""" >>> print('1\n2\n3') 1 2 3 """ def __init__(self, val): """ >>> print(SampleNewStyleClass(12).get()) 12 """ self.val = val def double(self): """ >>> print(SampleNewStyleClass(12).double().get()) 24 """ return SampleNewStyleClass(self.val + self.val) def get(self): """ >>> print(SampleNewStyleClass(-5).get()) -5 """ return self.val ###################################################################### ## Fake stdin (for testing interactive debugging) ###################################################################### class _FakeInput: """ A fake input stream for pdb's interactive debugger. Whenever a line is read, print it (to simulate the user typing it), and then return it. The set of lines to return is specified in the constructor; they should not have trailing newlines. """ def __init__(self, lines): self.lines = lines def readline(self): line = self.lines.pop(0) print(line) return line+'\n' ###################################################################### ## Test Cases ###################################################################### def test_Example(): r""" Unit tests for the `Example` class. Example is a simple container class that holds: - `source`: A source string. - `want`: An expected output string. - `exc_msg`: An expected exception message string (or None if no exception is expected). - `lineno`: A line number (within the docstring). - `indent`: The example's indentation in the input string. - `options`: An option dictionary, mapping option flags to True or False. These attributes are set by the constructor. `source` and `want` are required; the other attributes all have default values: >>> example = doctest.Example('print(1)', '1\n') >>> (example.source, example.want, example.exc_msg, ... example.lineno, example.indent, example.options) ('print(1)\n', '1\n', None, 0, 0, {}) The first three attributes (`source`, `want`, and `exc_msg`) may be specified positionally; the remaining arguments should be specified as keyword arguments: >>> exc_msg = 'IndexError: pop from an empty list' >>> example = doctest.Example('[].pop()', '', exc_msg, ... lineno=5, indent=4, ... options={doctest.ELLIPSIS: True}) >>> (example.source, example.want, example.exc_msg, ... example.lineno, example.indent, example.options) ('[].pop()\n', '', 'IndexError: pop from an empty list\n', 5, 4, {8: True}) The constructor normalizes the `source` string to end in a newline: Source spans a single line: no terminating newline. >>> e = doctest.Example('print(1)', '1\n') >>> e.source, e.want ('print(1)\n', '1\n') >>> e = doctest.Example('print(1)\n', '1\n') >>> e.source, e.want ('print(1)\n', '1\n') Source spans multiple lines: require terminating newline. >>> e = doctest.Example('print(1);\nprint(2)\n', '1\n2\n') >>> e.source, e.want ('print(1);\nprint(2)\n', '1\n2\n') >>> e = doctest.Example('print(1);\nprint(2)', '1\n2\n') >>> e.source, e.want ('print(1);\nprint(2)\n', '1\n2\n') Empty source string (which should never appear in real examples) >>> e = doctest.Example('', '') >>> e.source, e.want ('\n', '') The constructor normalizes the `want` string to end in a newline, unless it's the empty string: >>> e = doctest.Example('print(1)', '1\n') >>> e.source, e.want ('print(1)\n', '1\n') >>> e = doctest.Example('print(1)', '1') >>> e.source, e.want ('print(1)\n', '1\n') >>> e = doctest.Example('print', '') >>> e.source, e.want ('print\n', '') The constructor normalizes the `exc_msg` string to end in a newline, unless it's `None`: Message spans one line >>> exc_msg = 'IndexError: pop from an empty list' >>> e = doctest.Example('[].pop()', '', exc_msg) >>> e.exc_msg 'IndexError: pop from an empty list\n' >>> exc_msg = 'IndexError: pop from an empty list\n' >>> e = doctest.Example('[].pop()', '', exc_msg) >>> e.exc_msg 'IndexError: pop from an empty list\n' Message spans multiple lines >>> exc_msg = 'ValueError: 1\n 2' >>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg) >>> e.exc_msg 'ValueError: 1\n 2\n' >>> exc_msg = 'ValueError: 1\n 2\n' >>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg) >>> e.exc_msg 'ValueError: 1\n 2\n' Empty (but non-None) exception message (which should never appear in real examples) >>> exc_msg = '' >>> e = doctest.Example('raise X()', '', exc_msg) >>> e.exc_msg '\n' Compare `Example`: >>> example = doctest.Example('print 1', '1\n') >>> same_example = doctest.Example('print 1', '1\n') >>> other_example = doctest.Example('print 42', '42\n') >>> example == same_example True >>> example != same_example False >>> hash(example) == hash(same_example) True >>> example == other_example False >>> example != other_example True """ def test_DocTest(): r""" Unit tests for the `DocTest` class. DocTest is a collection of examples, extracted from a docstring, along with information about where the docstring comes from (a name, filename, and line number). The docstring is parsed by the `DocTest` constructor: >>> docstring = ''' ... >>> print(12) ... 12 ... ... Non-example text. ... ... >>> print('another\example') ... another ... example ... ''' >>> globs = {} # globals to run the test in. >>> parser = doctest.DocTestParser() >>> test = parser.get_doctest(docstring, globs, 'some_test', ... 'some_file', 20) >>> print(test) <DocTest some_test from some_file:20 (2 examples)> >>> len(test.examples) 2 >>> e1, e2 = test.examples >>> (e1.source, e1.want, e1.lineno) ('print(12)\n', '12\n', 1) >>> (e2.source, e2.want, e2.lineno) ("print('another\\example')\n", 'another\nexample\n', 6) Source information (name, filename, and line number) is available as attributes on the doctest object: >>> (test.name, test.filename, test.lineno) ('some_test', 'some_file', 20) The line number of an example within its containing file is found by adding the line number of the example and the line number of its containing test: >>> test.lineno + e1.lineno 21 >>> test.lineno + e2.lineno 26 If the docstring contains inconsistant leading whitespace in the expected output of an example, then `DocTest` will raise a ValueError: >>> docstring = r''' ... >>> print('bad\nindentation') ... bad ... indentation ... ''' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 4 of the docstring for some_test has inconsistent leading whitespace: 'indentation' If the docstring contains inconsistent leading whitespace on continuation lines, then `DocTest` will raise a ValueError: >>> docstring = r''' ... >>> print(('bad indentation', ... ... 2)) ... ('bad', 'indentation') ... ''' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 2 of the docstring for some_test has inconsistent leading whitespace: '... 2))' If there's no blank space after a PS1 prompt ('>>>'), then `DocTest` will raise a ValueError: >>> docstring = '>>>print(1)\n1' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 1 of the docstring for some_test lacks blank after >>>: '>>>print(1)' If there's no blank space after a PS2 prompt ('...'), then `DocTest` will raise a ValueError: >>> docstring = '>>> if 1:\n...print(1)\n1' >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) Traceback (most recent call last): ValueError: line 2 of the docstring for some_test lacks blank after ...: '...print(1)' Compare `DocTest`: >>> docstring = ''' ... >>> print 12 ... 12 ... ''' >>> test = parser.get_doctest(docstring, globs, 'some_test', ... 'some_test', 20) >>> same_test = parser.get_doctest(docstring, globs, 'some_test', ... 'some_test', 20) >>> test == same_test True >>> test != same_test False >>> hash(test) == hash(same_test) True >>> docstring = ''' ... >>> print 42 ... 42 ... ''' >>> other_test = parser.get_doctest(docstring, globs, 'other_test', ... 'other_file', 10) >>> test == other_test False >>> test != other_test True Compare `DocTestCase`: >>> DocTestCase = doctest.DocTestCase >>> test_case = DocTestCase(test) >>> same_test_case = DocTestCase(same_test) >>> other_test_case = DocTestCase(other_test) >>> test_case == same_test_case True >>> test_case != same_test_case False >>> hash(test_case) == hash(same_test_case) True >>> test == other_test_case False >>> test != other_test_case True """ class test_DocTestFinder: def basics(): r""" Unit tests for the `DocTestFinder` class. DocTestFinder is used to extract DocTests from an object's docstring and the docstrings of its contained objects. It can be used with modules, functions, classes, methods, staticmethods, classmethods, and properties. Finding Tests in Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~ For a function whose docstring contains examples, DocTestFinder.find() will return a single test (for that function's docstring): >>> finder = doctest.DocTestFinder() We'll simulate a __file__ attr that ends in pyc: >>> import test.test_doctest >>> old = test.test_doctest.__file__ >>> test.test_doctest.__file__ = 'test_doctest.pyc' >>> tests = finder.find(sample_func) >>> print(tests) # doctest: +ELLIPSIS [<DocTest sample_func from ...:18 (1 example)>] The exact name depends on how test_doctest was invoked, so allow for leading path components. >>> tests[0].filename # doctest: +ELLIPSIS '...test_doctest.py' >>> test.test_doctest.__file__ = old >>> e = tests[0].examples[0] >>> (e.source, e.want, e.lineno) ('print(sample_func(22))\n', '44\n', 3) By default, tests are created for objects with no docstring: >>> def no_docstring(v): ... pass >>> finder.find(no_docstring) [] However, the optional argument `exclude_empty` to the DocTestFinder constructor can be used to exclude tests for objects with empty docstrings: >>> def no_docstring(v): ... pass >>> excl_empty_finder = doctest.DocTestFinder(exclude_empty=True) >>> excl_empty_finder.find(no_docstring) [] If the function has a docstring with no examples, then a test with no examples is returned. (This lets `DocTestRunner` collect statistics about which functions have no tests -- but is that useful? And should an empty test also be created when there's no docstring?) >>> def no_examples(v): ... ''' no doctest examples ''' >>> finder.find(no_examples) # doctest: +ELLIPSIS [<DocTest no_examples from ...:1 (no examples)>] Finding Tests in Classes ~~~~~~~~~~~~~~~~~~~~~~~~ For a class, DocTestFinder will create a test for the class's docstring, and will recursively explore its contents, including methods, classmethods, staticmethods, properties, and nested classes. >>> finder = doctest.DocTestFinder() >>> tests = finder.find(SampleClass) >>> for t in tests: ... print('%2s %s' % (len(t.examples), t.name)) 3 SampleClass 3 SampleClass.NestedClass 1 SampleClass.NestedClass.__init__ 1 SampleClass.__init__ 2 SampleClass.a_classmethod 1 SampleClass.a_property 1 SampleClass.a_staticmethod 1 SampleClass.double 1 SampleClass.get New-style classes are also supported: >>> tests = finder.find(SampleNewStyleClass) >>> for t in tests: ... print('%2s %s' % (len(t.examples), t.name)) 1 SampleNewStyleClass 1 SampleNewStyleClass.__init__ 1 SampleNewStyleClass.double 1 SampleNewStyleClass.get Finding Tests in Modules ~~~~~~~~~~~~~~~~~~~~~~~~ For a module, DocTestFinder will create a test for the class's docstring, and will recursively explore its contents, including functions, classes, and the `__test__` dictionary, if it exists: >>> # A module >>> import types >>> m = types.ModuleType('some_module') >>> def triple(val): ... ''' ... >>> print(triple(11)) ... 33 ... ''' ... return val*3 >>> m.__dict__.update({ ... 'sample_func': sample_func, ... 'SampleClass': SampleClass, ... '__doc__': ''' ... Module docstring. ... >>> print('module') ... module ... ''', ... '__test__': { ... 'd': '>>> print(6)\n6\n>>> print(7)\n7\n', ... 'c': triple}}) >>> finder = doctest.DocTestFinder() >>> # Use module=test.test_doctest, to prevent doctest from >>> # ignoring the objects since they weren't defined in m. >>> import test.test_doctest >>> tests = finder.find(m, module=test.test_doctest) >>> for t in tests: ... print('%2s %s' % (len(t.examples), t.name)) 1 some_module 3 some_module.SampleClass 3 some_module.SampleClass.NestedClass 1 some_module.SampleClass.NestedClass.__init__ 1 some_module.SampleClass.__init__ 2 some_module.SampleClass.a_classmethod 1 some_module.SampleClass.a_property 1 some_module.SampleClass.a_staticmethod 1 some_module.SampleClass.double 1 some_module.SampleClass.get 1 some_module.__test__.c 2 some_module.__test__.d 1 some_module.sample_func Duplicate Removal ~~~~~~~~~~~~~~~~~ If a single object is listed twice (under different names), then tests will only be generated for it once: >>> from test import doctest_aliases >>> assert doctest_aliases.TwoNames.f >>> assert doctest_aliases.TwoNames.g >>> tests = excl_empty_finder.find(doctest_aliases) >>> print(len(tests)) 2 >>> print(tests[0].name) test.doctest_aliases.TwoNames TwoNames.f and TwoNames.g are bound to the same object. We can't guess which will be found in doctest's traversal of TwoNames.__dict__ first, so we have to allow for either. >>> tests[1].name.split('.')[-1] in ['f', 'g'] True Empty Tests ~~~~~~~~~~~ By default, an object with no doctests doesn't create any tests: >>> tests = doctest.DocTestFinder().find(SampleClass) >>> for t in tests: ... print('%2s %s' % (len(t.examples), t.name)) 3 SampleClass 3 SampleClass.NestedClass 1 SampleClass.NestedClass.__init__ 1 SampleClass.__init__ 2 SampleClass.a_classmethod 1 SampleClass.a_property 1 SampleClass.a_staticmethod 1 SampleClass.double 1 SampleClass.get By default, that excluded objects with no doctests. exclude_empty=False tells it to include (empty) tests for objects with no doctests. This feature is really to support backward compatibility in what doctest.master.summarize() displays. >>> tests = doctest.DocTestFinder(exclude_empty=False).find(SampleClass) >>> for t in tests: ... print('%2s %s' % (len(t.examples), t.name)) 3 SampleClass 3 SampleClass.NestedClass 1 SampleClass.NestedClass.__init__ 0 SampleClass.NestedClass.get 0 SampleClass.NestedClass.square 1 SampleClass.__init__ 2 SampleClass.a_classmethod 1 SampleClass.a_property 1 SampleClass.a_staticmethod 1 SampleClass.double 1 SampleClass.get Turning off Recursion ~~~~~~~~~~~~~~~~~~~~~ DocTestFinder can be told not to look for tests in contained objects using the `recurse` flag: >>> tests = doctest.DocTestFinder(recurse=False).find(SampleClass) >>> for t in tests: ... print('%2s %s' % (len(t.examples), t.name)) 3 SampleClass Line numbers ~~~~~~~~~~~~ DocTestFinder finds the line number of each example: >>> def f(x): ... ''' ... >>> x = 12 ... ... some text ... ... >>> # examples are not created for comments & bare prompts. ... >>> ... ... ... ... >>> for x in range(10): ... ... print(x, end=' ') ... 0 1 2 3 4 5 6 7 8 9 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> [e.lineno for e in test.examples] [1, 9, 12] """ if int.__doc__: # simple check for --without-doc-strings, skip if lacking def non_Python_modules(): r""" Finding Doctests in Modules Not Written in Python ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DocTestFinder can also find doctests in most modules not written in Python. We'll use builtins as an example, since it almost certainly isn't written in plain ol' Python and is guaranteed to be available. >>> import builtins >>> tests = doctest.DocTestFinder().find(builtins) >>> 790 < len(tests) < 800 # approximate number of objects with docstrings True >>> real_tests = [t for t in tests if len(t.examples) > 0] >>> len(real_tests) # objects that actually have doctests 8 >>> for t in real_tests: ... print('{} {}'.format(len(t.examples), t.name)) ... 1 builtins.bin 3 builtins.float.as_integer_ratio 2 builtins.float.fromhex 2 builtins.float.hex 1 builtins.hex 1 builtins.int 2 builtins.int.bit_length 1 builtins.oct Note here that 'bin', 'oct', and 'hex' are functions; 'float.as_integer_ratio', 'float.hex', and 'int.bit_length' are methods; 'float.fromhex' is a classmethod, and 'int' is a type. """ def test_DocTestParser(): r""" Unit tests for the `DocTestParser` class. DocTestParser is used to parse docstrings containing doctest examples. The `parse` method divides a docstring into examples and intervening text: >>> s = ''' ... >>> x, y = 2, 3 # no output expected ... >>> if 1: ... ... print(x) ... ... print(y) ... 2 ... 3 ... ... Some text. ... >>> x+y ... 5 ... ''' >>> parser = doctest.DocTestParser() >>> for piece in parser.parse(s): ... if isinstance(piece, doctest.Example): ... print('Example:', (piece.source, piece.want, piece.lineno)) ... else: ... print(' Text:', repr(piece)) Text: '\n' Example: ('x, y = 2, 3 # no output expected\n', '', 1) Text: '' Example: ('if 1:\n print(x)\n print(y)\n', '2\n3\n', 2) Text: '\nSome text.\n' Example: ('x+y\n', '5\n', 9) Text: '' The `get_examples` method returns just the examples: >>> for piece in parser.get_examples(s): ... print((piece.source, piece.want, piece.lineno)) ('x, y = 2, 3 # no output expected\n', '', 1) ('if 1:\n print(x)\n print(y)\n', '2\n3\n', 2) ('x+y\n', '5\n', 9) The `get_doctest` method creates a Test from the examples, along with the given arguments: >>> test = parser.get_doctest(s, {}, 'name', 'filename', lineno=5) >>> (test.name, test.filename, test.lineno) ('name', 'filename', 5) >>> for piece in test.examples: ... print((piece.source, piece.want, piece.lineno)) ('x, y = 2, 3 # no output expected\n', '', 1) ('if 1:\n print(x)\n print(y)\n', '2\n3\n', 2) ('x+y\n', '5\n', 9) """ class test_DocTestRunner: def basics(): r""" Unit tests for the `DocTestRunner` class. DocTestRunner is used to run DocTest test cases, and to accumulate statistics. Here's a simple DocTest case we can use: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print(x) ... 12 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] The main DocTestRunner interface is the `run` method, which runs a given DocTest case in a given namespace (globs). It returns a tuple `(f,t)`, where `f` is the number of failed tests and `t` is the number of tried tests. >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=3) If any example produces incorrect output, then the test runner reports the failure and proceeds to the next example: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print(x) ... 14 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=True).run(test) ... # doctest: +ELLIPSIS Trying: x = 12 Expecting nothing ok Trying: print(x) Expecting: 14 ********************************************************************** File ..., line 4, in f Failed example: print(x) Expected: 14 Got: 12 Trying: x//2 Expecting: 6 ok TestResults(failed=1, attempted=3) """ def verbose_flag(): r""" The `verbose` flag makes the test runner generate more detailed output: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print(x) ... 12 ... >>> x//2 ... 6 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=True).run(test) Trying: x = 12 Expecting nothing ok Trying: print(x) Expecting: 12 ok Trying: x//2 Expecting: 6 ok TestResults(failed=0, attempted=3) If the `verbose` flag is unspecified, then the output will be verbose iff `-v` appears in sys.argv: >>> # Save the real sys.argv list. >>> old_argv = sys.argv >>> # If -v does not appear in sys.argv, then output isn't verbose. >>> sys.argv = ['test'] >>> doctest.DocTestRunner().run(test) TestResults(failed=0, attempted=3) >>> # If -v does appear in sys.argv, then output is verbose. >>> sys.argv = ['test', '-v'] >>> doctest.DocTestRunner().run(test) Trying: x = 12 Expecting nothing ok Trying: print(x) Expecting: 12 ok Trying: x//2 Expecting: 6 ok TestResults(failed=0, attempted=3) >>> # Restore sys.argv >>> sys.argv = old_argv In the remaining examples, the test runner's verbosity will be explicitly set, to ensure that the test behavior is consistent. """ def exceptions(): r""" Tests of `DocTestRunner`'s exception handling. An expected exception is specified with a traceback message. The lines between the first line and the type/value may be omitted or replaced with any other string: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print(x//0) ... Traceback (most recent call last): ... ZeroDivisionError: integer division or modulo by zero ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) An example may not generate output before it raises an exception; if it does, then the traceback message will not be recognized as signaling an expected exception, so the example will be reported as an unexpected exception: >>> def f(x): ... ''' ... >>> x = 12 ... >>> print('pre-exception output', x//0) ... pre-exception output ... Traceback (most recent call last): ... ZeroDivisionError: integer division or modulo by zero ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 4, in f Failed example: print('pre-exception output', x//0) Exception raised: ... ZeroDivisionError: integer division or modulo by zero TestResults(failed=1, attempted=2) Exception messages may contain newlines: >>> def f(x): ... r''' ... >>> raise ValueError('multi\nline\nmessage') ... Traceback (most recent call last): ... ValueError: multi ... line ... message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) If an exception is expected, but an exception with the wrong type or message is raised, then it is reported as a failure: >>> def f(x): ... r''' ... >>> raise ValueError('message') ... Traceback (most recent call last): ... ValueError: wrong message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: raise ValueError('message') Expected: Traceback (most recent call last): ValueError: wrong message Got: Traceback (most recent call last): ... ValueError: message TestResults(failed=1, attempted=1) However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the detail: >>> def f(x): ... r''' ... >>> raise ValueError('message') #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... ValueError: wrong message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) IGNORE_EXCEPTION_DETAIL also ignores difference in exception formatting between Python versions. For example, in Python 2.x, the module path of the exception is not in the output, but this will fail under Python 3: >>> def f(x): ... r''' ... >>> from http.client import HTTPException ... >>> raise HTTPException('message') ... Traceback (most recent call last): ... HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 4, in f Failed example: raise HTTPException('message') Expected: Traceback (most recent call last): HTTPException: message Got: Traceback (most recent call last): ... http.client.HTTPException: message TestResults(failed=1, attempted=2) But in Python 3 the module path is included, and therefore a test must look like the following test to succeed in Python 3. But that test will fail under Python 2. >>> def f(x): ... r''' ... >>> from http.client import HTTPException ... >>> raise HTTPException('message') ... Traceback (most recent call last): ... http.client.HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) However, with IGNORE_EXCEPTION_DETAIL, the module name of the exception (or its unexpected absence) will be ignored: >>> def f(x): ... r''' ... >>> from http.client import HTTPException ... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) The module path will be completely ignored, so two different module paths will still pass if IGNORE_EXCEPTION_DETAIL is given. This is intentional, so it can be used when exceptions have changed module. >>> def f(x): ... r''' ... >>> from http.client import HTTPException ... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... foo.bar.HTTPException: message ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type: >>> def f(x): ... r''' ... >>> raise ValueError('message') #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... TypeError: wrong type ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: raise ValueError('message') #doctest: +IGNORE_EXCEPTION_DETAIL Expected: Traceback (most recent call last): TypeError: wrong type Got: Traceback (most recent call last): ... ValueError: message TestResults(failed=1, attempted=1) If the exception does not have a message, you can still use IGNORE_EXCEPTION_DETAIL to normalize the modules between Python 2 and 3: >>> def f(x): ... r''' ... >>> from http.client import HTTPException ... >>> raise HTTPException() #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... foo.bar.HTTPException ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) Note that a trailing colon doesn't matter either: >>> def f(x): ... r''' ... >>> from http.client import HTTPException ... >>> raise HTTPException() #doctest: +IGNORE_EXCEPTION_DETAIL ... Traceback (most recent call last): ... foo.bar.HTTPException: ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) If an exception is raised but not expected, then it is reported as an unexpected exception: >>> def f(x): ... r''' ... >>> 1//0 ... 0 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: 1//0 Exception raised: Traceback (most recent call last): ... ZeroDivisionError: integer division or modulo by zero TestResults(failed=1, attempted=1) """ def displayhook(): r""" Test that changing sys.displayhook doesn't matter for doctest. >>> import sys >>> orig_displayhook = sys.displayhook >>> def my_displayhook(x): ... print('hi!') >>> sys.displayhook = my_displayhook >>> def f(): ... ''' ... >>> 3 ... 3 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> r = doctest.DocTestRunner(verbose=False).run(test) >>> post_displayhook = sys.displayhook We need to restore sys.displayhook now, so that we'll be able to test results. >>> sys.displayhook = orig_displayhook Ok, now we can check that everything is ok. >>> r TestResults(failed=0, attempted=1) >>> post_displayhook is my_displayhook True """ def optionflags(): r""" Tests of `DocTestRunner`'s option flag handling. Several option flags can be used to customize the behavior of the test runner. These are defined as module constants in doctest, and passed to the DocTestRunner constructor (multiple constants should be ORed together). The DONT_ACCEPT_TRUE_FOR_1 flag disables matches between True/False and 1/0: >>> def f(x): ... '>>> True\n1\n' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1 >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: True Expected: 1 Got: True TestResults(failed=1, attempted=1) The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines and the '<BLANKLINE>' marker: >>> def f(x): ... '>>> print("a\\n\\nb")\na\n<BLANKLINE>\nb\n' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.DONT_ACCEPT_BLANKLINE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print("a\n\nb") Expected: a <BLANKLINE> b Got: a <BLANKLINE> b TestResults(failed=1, attempted=1) The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be treated as equal: >>> def f(x): ... '>>> print(1, 2, 3)\n 1 2\n 3' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print(1, 2, 3) Expected: 1 2 3 Got: 1 2 3 TestResults(failed=1, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.NORMALIZE_WHITESPACE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) TestResults(failed=0, attempted=1) An example from the docs: >>> print(list(range(20))) #doctest: +NORMALIZE_WHITESPACE [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] The ELLIPSIS flag causes ellipsis marker ("...") in the expected output to match any substring in the actual output: >>> def f(x): ... '>>> print(list(range(15)))\n[0, 1, 2, ..., 14]\n' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print(list(range(15))) Expected: [0, 1, 2, ..., 14] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] TestResults(failed=1, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.ELLIPSIS >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) TestResults(failed=0, attempted=1) ... also matches nothing: >>> if 1: ... for i in range(100): ... print(i**2, end=' ') #doctest: +ELLIPSIS ... print('!') 0 1...4...9 16 ... 36 49 64 ... 9801 ! ... can be surprising; e.g., this test passes: >>> if 1: #doctest: +ELLIPSIS ... for i in range(20): ... print(i, end=' ') ... print(20) 0 1 2 ...1...2...0 Examples from the docs: >>> print(list(range(20))) # doctest:+ELLIPSIS [0, 1, ..., 18, 19] >>> print(list(range(20))) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE [0, 1, ..., 18, 19] The SKIP flag causes an example to be skipped entirely. I.e., the example is not run. It can be useful in contexts where doctest examples serve as both documentation and test cases, and an example should be included for documentation purposes, but should not be checked (e.g., because its output is random, or depends on resources which would be unavailable.) The SKIP flag can also be used for 'commenting out' broken examples. >>> import unavailable_resource # doctest: +SKIP >>> unavailable_resource.do_something() # doctest: +SKIP >>> unavailable_resource.blow_up() # doctest: +SKIP Traceback (most recent call last): ... UncheckedBlowUpError: Nobody checks me. >>> import random >>> print(random.random()) # doctest: +SKIP 0.721216923889 The REPORT_UDIFF flag causes failures that involve multi-line expected and actual outputs to be displayed using a unified diff: >>> def f(x): ... r''' ... >>> print('\n'.join('abcdefg')) ... a ... B ... c ... d ... f ... g ... h ... ''' >>> # Without the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print('\n'.join('abcdefg')) Expected: a B c d f g h Got: a b c d e f g TestResults(failed=1, attempted=1) >>> # With the flag: >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_UDIFF >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print('\n'.join('abcdefg')) Differences (unified diff with -expected +actual): @@ -1,7 +1,7 @@ a -B +b c d +e f g -h TestResults(failed=1, attempted=1) The REPORT_CDIFF flag causes failures that involve multi-line expected and actual outputs to be displayed using a context diff: >>> # Reuse f() from the REPORT_UDIFF example, above. >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_CDIFF >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print('\n'.join('abcdefg')) Differences (context diff with expected followed by actual): *************** *** 1,7 **** a ! B c d f g - h --- 1,7 ---- a ! b c d + e f g TestResults(failed=1, attempted=1) The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm used by the popular ndiff.py utility. This does intraline difference marking, as well as interline differences. >>> def f(x): ... r''' ... >>> print("a b c d e f g h i j k l m") ... a b c d e f g h i j k 1 m ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_NDIFF >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 3, in f Failed example: print("a b c d e f g h i j k l m") Differences (ndiff with -expected +actual): - a b c d e f g h i j k 1 m ? ^ + a b c d e f g h i j k l m ? + ++ ^ TestResults(failed=1, attempted=1) The REPORT_ONLY_FIRST_FAILURE suppresses result output after the first failing example: >>> def f(x): ... r''' ... >>> print(1) # first success ... 1 ... >>> print(2) # first failure ... 200 ... >>> print(3) # second failure ... 300 ... >>> print(4) # second success ... 4 ... >>> print(5) # third failure ... 500 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 5, in f Failed example: print(2) # first failure Expected: 200 Got: 2 TestResults(failed=3, attempted=5) However, output from `report_start` is not suppressed: >>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test) ... # doctest: +ELLIPSIS Trying: print(1) # first success Expecting: 1 ok Trying: print(2) # first failure Expecting: 200 ********************************************************************** File ..., line 5, in f Failed example: print(2) # first failure Expected: 200 Got: 2 TestResults(failed=3, attempted=5) The FAIL_FAST flag causes the runner to exit after the first failing example, so subsequent examples are not even attempted: >>> flags = doctest.FAIL_FAST >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 5, in f Failed example: print(2) # first failure Expected: 200 Got: 2 TestResults(failed=1, attempted=2) Specifying both FAIL_FAST and REPORT_ONLY_FIRST_FAILURE is equivalent to FAIL_FAST only: >>> flags = doctest.FAIL_FAST | doctest.REPORT_ONLY_FIRST_FAILURE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 5, in f Failed example: print(2) # first failure Expected: 200 Got: 2 TestResults(failed=1, attempted=2) For the purposes of both REPORT_ONLY_FIRST_FAILURE and FAIL_FAST, unexpected exceptions count as failures: >>> def f(x): ... r''' ... >>> print(1) # first success ... 1 ... >>> raise ValueError(2) # first failure ... 200 ... >>> print(3) # second failure ... 300 ... >>> print(4) # second success ... 4 ... >>> print(5) # third failure ... 500 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 5, in f Failed example: raise ValueError(2) # first failure Exception raised: ... ValueError: 2 TestResults(failed=3, attempted=5) >>> flags = doctest.FAIL_FAST >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 5, in f Failed example: raise ValueError(2) # first failure Exception raised: ... ValueError: 2 TestResults(failed=1, attempted=2) New option flags can also be registered, via register_optionflag(). Here we reach into doctest's internals a bit. >>> unlikely = "UNLIKELY_OPTION_NAME" >>> unlikely in doctest.OPTIONFLAGS_BY_NAME False >>> new_flag_value = doctest.register_optionflag(unlikely) >>> unlikely in doctest.OPTIONFLAGS_BY_NAME True Before 2.4.4/2.5, registering a name more than once erroneously created more than one flag value. Here we verify that's fixed: >>> redundant_flag_value = doctest.register_optionflag(unlikely) >>> redundant_flag_value == new_flag_value True Clean up. >>> del doctest.OPTIONFLAGS_BY_NAME[unlikely] """ def option_directives(): r""" Tests of `DocTestRunner`'s option directive mechanism. Option directives can be used to turn option flags on or off for a single example. To turn an option on for an example, follow that example with a comment of the form ``# doctest: +OPTION``: >>> def f(x): r''' ... >>> print(list(range(10))) # should fail: no ellipsis ... [0, 1, ..., 9] ... ... >>> print(list(range(10))) # doctest: +ELLIPSIS ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print(list(range(10))) # should fail: no ellipsis Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) To turn an option off for an example, follow that example with a comment of the form ``# doctest: -OPTION``: >>> def f(x): r''' ... >>> print(list(range(10))) ... [0, 1, ..., 9] ... ... >>> # should fail: no ellipsis ... >>> print(list(range(10))) # doctest: -ELLIPSIS ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False, ... optionflags=doctest.ELLIPSIS).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 6, in f Failed example: print(list(range(10))) # doctest: -ELLIPSIS Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) Option directives affect only the example that they appear with; they do not change the options for surrounding examples: >>> def f(x): r''' ... >>> print(list(range(10))) # Should fail: no ellipsis ... [0, 1, ..., 9] ... ... >>> print(list(range(10))) # doctest: +ELLIPSIS ... [0, 1, ..., 9] ... ... >>> print(list(range(10))) # Should fail: no ellipsis ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print(list(range(10))) # Should fail: no ellipsis Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ********************************************************************** File ..., line 8, in f Failed example: print(list(range(10))) # Should fail: no ellipsis Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=2, attempted=3) Multiple options may be modified by a single option directive. They may be separated by whitespace, commas, or both: >>> def f(x): r''' ... >>> print(list(range(10))) # Should fail ... [0, 1, ..., 9] ... >>> print(list(range(10))) # Should succeed ... ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print(list(range(10))) # Should fail Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) >>> def f(x): r''' ... >>> print(list(range(10))) # Should fail ... [0, 1, ..., 9] ... >>> print(list(range(10))) # Should succeed ... ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print(list(range(10))) # Should fail Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) >>> def f(x): r''' ... >>> print(list(range(10))) # Should fail ... [0, 1, ..., 9] ... >>> print(list(range(10))) # Should succeed ... ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) ... # doctest: +ELLIPSIS ********************************************************************** File ..., line 2, in f Failed example: print(list(range(10))) # Should fail Expected: [0, 1, ..., 9] Got: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TestResults(failed=1, attempted=2) The option directive may be put on the line following the source, as long as a continuation prompt is used: >>> def f(x): r''' ... >>> print(list(range(10))) ... ... # doctest: +ELLIPSIS ... [0, 1, ..., 9] ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) For examples with multi-line source, the option directive may appear at the end of any line: >>> def f(x): r''' ... >>> for x in range(10): # doctest: +ELLIPSIS ... ... print(' ', x, end='', sep='') ... 0 1 2 ... 9 ... ... >>> for x in range(10): ... ... print(' ', x, end='', sep='') # doctest: +ELLIPSIS ... 0 1 2 ... 9 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=2) If more than one line of an example with multi-line source has an option directive, then they are combined: >>> def f(x): r''' ... Should fail (option directive not on the last line): ... >>> for x in range(10): # doctest: +ELLIPSIS ... ... print(x, end=' ') # doctest: +NORMALIZE_WHITESPACE ... 0 1 2...9 ... ''' >>> test = doctest.DocTestFinder().find(f)[0] >>> doctest.DocTestRunner(verbose=False).run(test) TestResults(failed=0, attempted=1) It is an error to have a comment of the form ``# doctest:`` that is *not* followed by words of the form ``+OPTION`` or ``-OPTION``, where ``OPTION`` is an option that has been registered with `register_option`: >>> # Error: Option not registered >>> s = '>>> print(12) #doctest: +BADOPTION' >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) Traceback (most recent call last): ValueError: line 1 of the doctest for s has an invalid option: '+BADOPTION' >>> # Error: No + or - prefix >>> s = '>>> print(12) #doctest: ELLIPSIS' >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) Traceback (most recent call last): ValueError: line 1 of the doctest for s has an invalid option: 'ELLIPSIS' It is an error to use an option directive on a line that contains no source: >>> s = '>>> # doctest: +ELLIPSIS' >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) Traceback (most recent call last): ValueError: line 0 of the doctest for s has an option directive on a line with no example: '# doctest: +ELLIPSIS' """ def test_testsource(): r""" Unit tests for `testsource()`. The testsource() function takes a module and a name, finds the (first) test with that name in that module, and converts it to a script. The example code is converted to regular Python code. The surrounding words and expected output are converted to comments: >>> import test.test_doctest >>> name = 'test.test_doctest.sample_func' >>> print(doctest.testsource(test.test_doctest, name)) # Blah blah # print(sample_func(22)) # Expected: ## 44 # # Yee ha! <BLANKLINE> >>> name = 'test.test_doctest.SampleNewStyleClass' >>> print(doctest.testsource(test.test_doctest, name)) print('1\n2\n3') # Expected: ## 1 ## 2 ## 3 <BLANKLINE> >>> name = 'test.test_doctest.SampleClass.a_classmethod' >>> print(doctest.testsource(test.test_doctest, name)) print(SampleClass.a_classmethod(10)) # Expected: ## 12 print(SampleClass(0).a_classmethod(10)) # Expected: ## 12 <BLANKLINE> """ def test_debug(): r""" Create a docstring that we want to debug: >>> s = ''' ... >>> x = 12 ... >>> print(x) ... 12 ... ''' Create some fake stdin input, to feed to the debugger: >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput(['next', 'print(x)', 'continue']) Run the debugger on the docstring, and then restore sys.stdin. >>> try: doctest.debug_src(s) ... finally: sys.stdin = real_stdin > <string>(1)<module>() (Pdb) next 12 --Return-- > <string>(1)<module>()->None (Pdb) print(x) 12 (Pdb) continue """ if not hasattr(sys, 'gettrace') or not sys.gettrace(): def test_pdb_set_trace(): """Using pdb.set_trace from a doctest. You can use pdb.set_trace from a doctest. To do so, you must retrieve the set_trace function from the pdb module at the time you use it. The doctest module changes sys.stdout so that it can capture program output. It also temporarily replaces pdb.set_trace with a version that restores stdout. This is necessary for you to see debugger output. >>> doc = ''' ... >>> x = 42 ... >>> raise Exception('clé') ... Traceback (most recent call last): ... Exception: clé ... >>> import pdb; pdb.set_trace() ... ''' >>> parser = doctest.DocTestParser() >>> test = parser.get_doctest(doc, {}, "foo-bar@baz", "foo-bar@baz.py", 0) >>> runner = doctest.DocTestRunner(verbose=False) To demonstrate this, we'll create a fake standard input that captures our debugger input: >>> import tempfile >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print(x)', # print data defined by the example ... 'continue', # stop debugging ... '']) >>> try: runner.run(test) ... finally: sys.stdin = real_stdin --Return-- > <doctest foo-bar@baz[2]>(1)<module>()->None -> import pdb; pdb.set_trace() (Pdb) print(x) 42 (Pdb) continue TestResults(failed=0, attempted=3) You can also put pdb.set_trace in a function called from a test: >>> def calls_set_trace(): ... y=2 ... import pdb; pdb.set_trace() >>> doc = ''' ... >>> x=1 ... >>> calls_set_trace() ... ''' >>> test = parser.get_doctest(doc, globals(), "foo-bar@baz", "foo-bar@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print(y)', # print data defined in the function ... 'up', # out of function ... 'print(x)', # print data defined by the example ... 'continue', # stop debugging ... '']) >>> try: ... runner.run(test) ... finally: ... sys.stdin = real_stdin --Return-- > <doctest test.test_doctest.test_pdb_set_trace[8]>(3)calls_set_trace()->None -> import pdb; pdb.set_trace() (Pdb) print(y) 2 (Pdb) up > <doctest foo-bar@baz[1]>(1)<module>() -> calls_set_trace() (Pdb) print(x) 1 (Pdb) continue TestResults(failed=0, attempted=2) During interactive debugging, source code is shown, even for doctest examples: >>> doc = ''' ... >>> def f(x): ... ... g(x*2) ... >>> def g(x): ... ... print(x+3) ... ... import pdb; pdb.set_trace() ... >>> f(3) ... ''' >>> test = parser.get_doctest(doc, globals(), "foo-bar@baz", "foo-bar@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'list', # list source from example 2 ... 'next', # return from g() ... 'list', # list source from example 1 ... 'next', # return from f() ... 'list', # list source from example 3 ... 'continue', # stop debugging ... '']) >>> try: runner.run(test) ... finally: sys.stdin = real_stdin ... # doctest: +NORMALIZE_WHITESPACE --Return-- > <doctest foo-bar@baz[1]>(3)g()->None -> import pdb; pdb.set_trace() (Pdb) list 1 def g(x): 2 print(x+3) 3 -> import pdb; pdb.set_trace() [EOF] (Pdb) next --Return-- > <doctest foo-bar@baz[0]>(2)f()->None -> g(x*2) (Pdb) list 1 def f(x): 2 -> g(x*2) [EOF] (Pdb) next --Return-- > <doctest foo-bar@baz[2]>(1)<module>()->None -> f(3) (Pdb) list 1 -> f(3) [EOF] (Pdb) continue ********************************************************************** File "foo-bar@baz.py", line 7, in foo-bar@baz Failed example: f(3) Expected nothing Got: 9 TestResults(failed=1, attempted=3) """ def test_pdb_set_trace_nested(): """This illustrates more-demanding use of set_trace with nested functions. >>> class C(object): ... def calls_set_trace(self): ... y = 1 ... import pdb; pdb.set_trace() ... self.f1() ... y = 2 ... def f1(self): ... x = 1 ... self.f2() ... x = 2 ... def f2(self): ... z = 1 ... z = 2 >>> calls_set_trace = C().calls_set_trace >>> doc = ''' ... >>> a = 1 ... >>> calls_set_trace() ... ''' >>> parser = doctest.DocTestParser() >>> runner = doctest.DocTestRunner(verbose=False) >>> test = parser.get_doctest(doc, globals(), "foo-bar@baz", "foo-bar@baz.py", 0) >>> real_stdin = sys.stdin >>> sys.stdin = _FakeInput([ ... 'print(y)', # print data defined in the function ... 'step', 'step', 'step', 'step', 'step', 'step', 'print(z)', ... 'up', 'print(x)', ... 'up', 'print(y)', ... 'up', 'print(foo)', ... 'continue', # stop debugging ... '']) >>> try: ... runner.run(test) ... finally: ... sys.stdin = real_stdin ... # doctest: +REPORT_NDIFF > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace() -> self.f1() (Pdb) print(y) 1 (Pdb) step --Call-- > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(7)f1() -> def f1(self): (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(8)f1() -> x = 1 (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1() -> self.f2() (Pdb) step --Call-- > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(11)f2() -> def f2(self): (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(12)f2() -> z = 1 (Pdb) step > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(13)f2() -> z = 2 (Pdb) print(z) 1 (Pdb) up > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1() -> self.f2() (Pdb) print(x) 1 (Pdb) up > <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace() -> self.f1() (Pdb) print(y) 1 (Pdb) up > <doctest foo-bar@baz[1]>(1)<module>() -> calls_set_trace() (Pdb) print(foo) *** NameError: name 'foo' is not defined (Pdb) continue TestResults(failed=0, attempted=2) """ def test_DocTestSuite(): """DocTestSuite creates a unittest test suite from a doctest. We create a Suite by providing a module. A module can be provided by passing a module object: >>> import unittest >>> import test.sample_doctest >>> suite = doctest.DocTestSuite(test.sample_doctest) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=4> We can also supply the module by name: >>> suite = doctest.DocTestSuite('test.sample_doctest') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=4> The module need not contain any doctest examples: >>> suite = doctest.DocTestSuite('test.sample_doctest_no_doctests') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=0 errors=0 failures=0> However, if DocTestSuite finds no docstrings, it raises an error: >>> try: ... doctest.DocTestSuite('test.sample_doctest_no_docstrings') ... except ValueError as e: ... error = e >>> print(error.args[1]) has no docstrings You can prevent this error by passing a DocTestFinder instance with the `exclude_empty` keyword argument set to False: >>> finder = doctest.DocTestFinder(exclude_empty=False) >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings', ... test_finder=finder) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=0 errors=0 failures=0> We can use the current module: >>> suite = test.sample_doctest.test_suite() >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=4> We can supply global variables. If we pass globs, they will be used instead of the module globals. Here we'll pass an empty globals, triggering an extra error: >>> suite = doctest.DocTestSuite('test.sample_doctest', globs={}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=5> Alternatively, we can provide extra globals. Here we'll make an error go away by providing an extra global variable: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... extraglobs={'y': 1}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=3> You can pass option flags. Here we'll cause an extra error by disabling the blank-line feature: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=5> You can supply setUp and tearDown functions: >>> def setUp(t): ... import test.test_doctest ... test.test_doctest.sillySetup = True >>> def tearDown(t): ... import test.test_doctest ... del test.test_doctest.sillySetup Here, we installed a silly variable that the test expects: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... setUp=setUp, tearDown=tearDown) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=3> But the tearDown restores sanity: >>> import test.test_doctest >>> test.test_doctest.sillySetup Traceback (most recent call last): ... AttributeError: 'module' object has no attribute 'sillySetup' The setUp and tearDown funtions are passed test objects. Here we'll use the setUp function to supply the missing variable y: >>> def setUp(test): ... test.globs['y'] = 1 >>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=9 errors=0 failures=3> Here, we didn't need to use a tearDown function because we modified the test globals, which are a copy of the sample_doctest module dictionary. The test globals are automatically cleared for us after a test. """ def test_DocFileSuite(): """We can test tests found in text files using a DocFileSuite. We create a suite by providing the names of one or more text files that include examples: >>> import unittest >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=2> The test files are looked for in the directory containing the calling module. A package keyword argument can be provided to specify a different relative location. >>> import unittest >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... package='test') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=2> Support for using a package's __loader__.get_data() is also provided. >>> import unittest, pkgutil, test >>> added_loader = False >>> if not hasattr(test, '__loader__'): ... test.__loader__ = pkgutil.get_loader(test) ... added_loader = True >>> try: ... suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... package='test') ... suite.run(unittest.TestResult()) ... finally: ... if added_loader: ... del test.__loader__ <unittest.result.TestResult run=3 errors=0 failures=2> '/' should be used as a path separator. It will be converted to a native separator at run time: >>> suite = doctest.DocFileSuite('../test/test_doctest.txt') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=1> If DocFileSuite is used from an interactive session, then files are resolved relative to the directory of sys.argv[0]: >>> import types, os.path, test.test_doctest >>> save_argv = sys.argv >>> sys.argv = [test.test_doctest.__file__] >>> suite = doctest.DocFileSuite('test_doctest.txt', ... package=types.ModuleType('__main__')) >>> sys.argv = save_argv By setting `module_relative=False`, os-specific paths may be used (including absolute paths and paths relative to the working directory): >>> # Get the absolute path of the test package. >>> test_doctest_path = os.path.abspath(test.test_doctest.__file__) >>> test_pkg_path = os.path.split(test_doctest_path)[0] >>> # Use it to find the absolute path of test_doctest.txt. >>> test_file = os.path.join(test_pkg_path, 'test_doctest.txt') >>> suite = doctest.DocFileSuite(test_file, module_relative=False) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=1> It is an error to specify `package` when `module_relative=False`: >>> suite = doctest.DocFileSuite(test_file, module_relative=False, ... package='test') Traceback (most recent call last): ValueError: Package may only be specified for module-relative paths. You can specify initial global variables: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... globs={'favorite_color': 'blue'}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=1> In this case, we supplied a missing favorite color. You can provide doctest options: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE, ... globs={'favorite_color': 'blue'}) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=2> And, you can provide setUp and tearDown functions: >>> def setUp(t): ... import test.test_doctest ... test.test_doctest.sillySetup = True >>> def tearDown(t): ... import test.test_doctest ... del test.test_doctest.sillySetup Here, we installed a silly variable that the test expects: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... setUp=setUp, tearDown=tearDown) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=1> But the tearDown restores sanity: >>> import test.test_doctest >>> test.test_doctest.sillySetup Traceback (most recent call last): ... AttributeError: 'module' object has no attribute 'sillySetup' The setUp and tearDown funtions are passed test objects. Here, we'll use a setUp function to set the favorite color in test_doctest.txt: >>> def setUp(test): ... test.globs['favorite_color'] = 'blue' >>> suite = doctest.DocFileSuite('test_doctest.txt', setUp=setUp) >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=0> Here, we didn't need to use a tearDown function because we modified the test globals. The test globals are automatically cleared for us after a test. Tests in a file run using `DocFileSuite` can also access the `__file__` global, which is set to the name of the file containing the tests: >>> suite = doctest.DocFileSuite('test_doctest3.txt') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=1 errors=0 failures=0> If the tests contain non-ASCII characters, we have to specify which encoding the file is encoded with. We do so by using the `encoding` parameter: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... 'test_doctest2.txt', ... 'test_doctest4.txt', ... encoding='utf-8') >>> suite.run(unittest.TestResult()) <unittest.result.TestResult run=3 errors=0 failures=2> """ def test_trailing_space_in_test(): """ Trailing spaces in expected output are significant: >>> x, y = 'foo', '' >>> print(x, y) foo \n """ def test_unittest_reportflags(): """Default unittest reporting flags can be set to control reporting Here, we'll set the REPORT_ONLY_FIRST_FAILURE option so we see only the first failure of each test. First, we'll look at the output without the flag. The file test_doctest.txt file has two tests. They both fail if blank lines are disabled: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE) >>> import unittest >>> result = suite.run(unittest.TestResult()) >>> print(result.failures[0][1]) # doctest: +ELLIPSIS Traceback ... Failed example: favorite_color ... Failed example: if 1: ... Note that we see both failures displayed. >>> old = doctest.set_unittest_reportflags( ... doctest.REPORT_ONLY_FIRST_FAILURE) Now, when we run the test: >>> result = suite.run(unittest.TestResult()) >>> print(result.failures[0][1]) # doctest: +ELLIPSIS Traceback ... Failed example: favorite_color Exception raised: ... NameError: name 'favorite_color' is not defined <BLANKLINE> <BLANKLINE> We get only the first failure. If we give any reporting options when we set up the tests, however: >>> suite = doctest.DocFileSuite('test_doctest.txt', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF) Then the default eporting options are ignored: >>> result = suite.run(unittest.TestResult()) >>> print(result.failures[0][1]) # doctest: +ELLIPSIS Traceback ... Failed example: favorite_color ... Failed example: if 1: print('a') print() print('b') Differences (ndiff with -expected +actual): a - <BLANKLINE> + b <BLANKLINE> <BLANKLINE> Test runners can restore the formatting flags after they run: >>> ignored = doctest.set_unittest_reportflags(old) """ def test_testfile(): r""" Tests for the `testfile()` function. This function runs all the doctest examples in a given file. In its simple invokation, it is called with the name of a file, which is taken to be relative to the calling module. The return value is (#failures, #tests). We don't want `-v` in sys.argv for these tests. >>> save_argv = sys.argv >>> if '-v' in sys.argv: ... sys.argv = [arg for arg in save_argv if arg != '-v'] >>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in test_doctest.txt Failed example: favorite_color Exception raised: ... NameError: name 'favorite_color' is not defined ********************************************************************** 1 items had failures: 1 of 2 in test_doctest.txt ***Test Failed*** 1 failures. TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. (Note: we'll be clearing doctest.master after each call to `doctest.testfile`, to suppress warnings about multiple tests with the same name.) Globals may be specified with the `globs` and `extraglobs` parameters: >>> globs = {'favorite_color': 'blue'} >>> doctest.testfile('test_doctest.txt', globs=globs) TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. >>> extraglobs = {'favorite_color': 'red'} >>> doctest.testfile('test_doctest.txt', globs=globs, ... extraglobs=extraglobs) # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in test_doctest.txt Failed example: favorite_color Expected: 'blue' Got: 'red' ********************************************************************** 1 items had failures: 1 of 2 in test_doctest.txt ***Test Failed*** 1 failures. TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. The file may be made relative to a given module or package, using the optional `module_relative` parameter: >>> doctest.testfile('test_doctest.txt', globs=globs, ... module_relative='test') TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. Verbosity can be increased with the optional `verbose` parameter: >>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True) Trying: favorite_color Expecting: 'blue' ok Trying: if 1: print('a') print() print('b') Expecting: a <BLANKLINE> b ok 1 items passed all tests: 2 tests in test_doctest.txt 2 tests in 1 items. 2 passed and 0 failed. Test passed. TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. The name of the test may be specified with the optional `name` parameter: >>> doctest.testfile('test_doctest.txt', name='newname') ... # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in newname ... TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. The summary report may be suppressed with the optional `report` parameter: >>> doctest.testfile('test_doctest.txt', report=False) ... # doctest: +ELLIPSIS ********************************************************************** File "...", line 6, in test_doctest.txt Failed example: favorite_color Exception raised: ... NameError: name 'favorite_color' is not defined TestResults(failed=1, attempted=2) >>> doctest.master = None # Reset master. The optional keyword argument `raise_on_error` can be used to raise an exception on the first error (which may be useful for postmortem debugging): >>> doctest.testfile('test_doctest.txt', raise_on_error=True) ... # doctest: +ELLIPSIS Traceback (most recent call last): doctest.UnexpectedException: ... >>> doctest.master = None # Reset master. If the tests contain non-ASCII characters, the tests might fail, since it's unknown which encoding is used. The encoding can be specified using the optional keyword argument `encoding`: >>> doctest.testfile('test_doctest4.txt', encoding='latin-1') # doctest: +ELLIPSIS ********************************************************************** File "...", line 7, in test_doctest4.txt Failed example: '...' Expected: 'f\xf6\xf6' Got: 'f\xc3\xb6\xc3\xb6' ********************************************************************** ... ********************************************************************** 1 items had failures: 2 of 2 in test_doctest4.txt ***Test Failed*** 2 failures. TestResults(failed=2, attempted=2) >>> doctest.master = None # Reset master. >>> doctest.testfile('test_doctest4.txt', encoding='utf-8') TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. Test the verbose output: >>> doctest.testfile('test_doctest4.txt', encoding='utf-8', verbose=True) Trying: 'föö' Expecting: 'f\xf6\xf6' ok Trying: 'bąr' Expecting: 'b\u0105r' ok 1 items passed all tests: 2 tests in test_doctest4.txt 2 tests in 1 items. 2 passed and 0 failed. Test passed. TestResults(failed=0, attempted=2) >>> doctest.master = None # Reset master. >>> sys.argv = save_argv """ def test_testmod(): r""" Tests for the testmod function. More might be useful, but for now we're just testing the case raised by Issue 6195, where trying to doctest a C module would fail with a UnicodeDecodeError because doctest tried to read the "source" lines out of the binary module. >>> import unicodedata >>> doctest.testmod(unicodedata, verbose=False) TestResults(failed=0, attempted=0) """ try: os.fsencode("foo-bär@baz.py") except UnicodeEncodeError: # Skip the test: the filesystem encoding is unable to encode the filename pass else: def test_unicode(): """ Check doctest with a non-ascii filename: >>> doc = ''' ... >>> raise Exception('clé') ... ''' ... >>> parser = doctest.DocTestParser() >>> test = parser.get_doctest(doc, {}, "foo-bär@baz", "foo-bär@baz.py", 0) >>> test <DocTest foo-bär@baz from foo-bär@baz.py:0 (1 example)> >>> runner = doctest.DocTestRunner(verbose=False) >>> runner.run(test) # doctest: +ELLIPSIS ********************************************************************** File "foo-bär@baz.py", line 2, in foo-bär@baz Failed example: raise Exception('clé') Exception raised: Traceback (most recent call last): File ... compileflags, 1), test.globs) File "<doctest foo-bär@baz[0]>", line 1, in <module> raise Exception('clé') Exception: clé TestResults(failed=1, attempted=1) """ def test_CLI(): r""" The doctest module can be used to run doctests against an arbitrary file. These tests test this CLI functionality. We'll use the support module's script_helpers for this, and write a test files to a temp dir to run the command against. Due to a current limitation in script_helpers, though, we need a little utility function to turn the returned output into something we can doctest against: >>> def normalize(s): ... return '\n'.join(s.decode().splitlines()) Note: we also pass TERM='' to all the assert_python calls to avoid a bug in the readline library that is triggered in these tests because we are running them in a new python process. See: http://lists.gnu.org/archive/html/bug-readline/2013-06/msg00000.html With those preliminaries out of the way, we'll start with a file with two simple tests and no errors. We'll run both the unadorned doctest command, and the verbose version, and then check the output: >>> from test import script_helper >>> with script_helper.temp_dir() as tmpdir: ... fn = os.path.join(tmpdir, 'myfile.doc') ... with open(fn, 'w') as f: ... _ = f.write('This is a very simple test file.\n') ... _ = f.write(' >>> 1 + 1\n') ... _ = f.write(' 2\n') ... _ = f.write(' >>> "a"\n') ... _ = f.write(" 'a'\n") ... _ = f.write('\n') ... _ = f.write('And that is it.\n') ... rc1, out1, err1 = script_helper.assert_python_ok( ... '-m', 'doctest', fn, TERM='') ... rc2, out2, err2 = script_helper.assert_python_ok( ... '-m', 'doctest', '-v', fn, TERM='') With no arguments and passing tests, we should get no output: >>> rc1, out1, err1 (0, b'', b'') With the verbose flag, we should see the test output, but no error output: >>> rc2, err2 (0, b'') >>> print(normalize(out2)) Trying: 1 + 1 Expecting: 2 ok Trying: "a" Expecting: 'a' ok 1 items passed all tests: 2 tests in myfile.doc 2 tests in 1 items. 2 passed and 0 failed. Test passed. Now we'll write a couple files, one with three tests, the other a python module with two tests, both of the files having "errors" in the tests that can be made non-errors by applying the appropriate doctest options to the run (ELLIPSIS in the first file, NORMALIZE_WHITESPACE in the second). This combination will allow to thoroughly test the -f and -o flags, as well as the doctest command's ability to process more than one file on the command line and, since the second file ends in '.py', its handling of python module files (as opposed to straight text files). >>> from test import script_helper >>> with script_helper.temp_dir() as tmpdir: ... fn = os.path.join(tmpdir, 'myfile.doc') ... with open(fn, 'w') as f: ... _ = f.write('This is another simple test file.\n') ... _ = f.write(' >>> 1 + 1\n') ... _ = f.write(' 2\n') ... _ = f.write(' >>> "abcdef"\n') ... _ = f.write(" 'a...f'\n") ... _ = f.write(' >>> "ajkml"\n') ... _ = f.write(" 'a...l'\n") ... _ = f.write('\n') ... _ = f.write('And that is it.\n') ... fn2 = os.path.join(tmpdir, 'myfile2.py') ... with open(fn2, 'w') as f: ... _ = f.write('def test_func():\n') ... _ = f.write(' \"\"\"\n') ... _ = f.write(' This is simple python test function.\n') ... _ = f.write(' >>> 1 + 1\n') ... _ = f.write(' 2\n') ... _ = f.write(' >>> "abc def"\n') ... _ = f.write(" 'abc def'\n") ... _ = f.write("\n") ... _ = f.write(' \"\"\"\n') ... import shutil ... rc1, out1, err1 = script_helper.assert_python_failure( ... '-m', 'doctest', fn, fn2, TERM='') ... rc2, out2, err2 = script_helper.assert_python_ok( ... '-m', 'doctest', '-o', 'ELLIPSIS', fn, TERM='') ... rc3, out3, err3 = script_helper.assert_python_ok( ... '-m', 'doctest', '-o', 'ELLIPSIS', ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2, TERM='') ... rc4, out4, err4 = script_helper.assert_python_failure( ... '-m', 'doctest', '-f', fn, fn2, TERM='') ... rc5, out5, err5 = script_helper.assert_python_ok( ... '-m', 'doctest', '-v', '-o', 'ELLIPSIS', ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2, TERM='') Our first test run will show the errors from the first file (doctest stops if a file has errors). Note that doctest test-run error output appears on stdout, not stderr: >>> rc1, err1 (1, b'') >>> print(normalize(out1)) # doctest: +ELLIPSIS ********************************************************************** File "...myfile.doc", line 4, in myfile.doc Failed example: "abcdef" Expected: 'a...f' Got: 'abcdef' ********************************************************************** File "...myfile.doc", line 6, in myfile.doc Failed example: "ajkml" Expected: 'a...l' Got: 'ajkml' ********************************************************************** 1 items had failures: 2 of 3 in myfile.doc ***Test Failed*** 2 failures. With -o ELLIPSIS specified, the second run, against just the first file, should produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither should the third, which ran against both files: >>> rc2, out2, err2 (0, b'', b'') >>> rc3, out3, err3 (0, b'', b'') The fourth run uses FAIL_FAST, so we should see only one error: >>> rc4, err4 (1, b'') >>> print(normalize(out4)) # doctest: +ELLIPSIS ********************************************************************** File "...myfile.doc", line 4, in myfile.doc Failed example: "abcdef" Expected: 'a...f' Got: 'abcdef' ********************************************************************** 1 items had failures: 1 of 2 in myfile.doc ***Test Failed*** 1 failures. The fifth test uses verbose with the two options, so we should get verbose success output for the tests in both files: >>> rc5, err5 (0, b'') >>> print(normalize(out5)) Trying: 1 + 1 Expecting: 2 ok Trying: "abcdef" Expecting: 'a...f' ok Trying: "ajkml" Expecting: 'a...l' ok 1 items passed all tests: 3 tests in myfile.doc 3 tests in 1 items. 3 passed and 0 failed. Test passed. Trying: 1 + 1 Expecting: 2 ok Trying: "abc def" Expecting: 'abc def' ok 1 items had no tests: myfile2 1 items passed all tests: 2 tests in myfile2.test_func 2 tests in 2 items. 2 passed and 0 failed. Test passed. We should also check some typical error cases. Invalid file name: >>> rc, out, err = script_helper.assert_python_failure( ... '-m', 'doctest', 'nosuchfile', TERM='') >>> rc, out (1, b'') >>> print(normalize(err)) # doctest: +ELLIPSIS Traceback (most recent call last): ... FileNotFoundError: [Errno ...] No such file or directory: 'nosuchfile' Invalid doctest option: >>> rc, out, err = script_helper.assert_python_failure( ... '-m', 'doctest', '-o', 'nosuchoption', TERM='') >>> rc, out (2, b'') >>> print(normalize(err)) # doctest: +ELLIPSIS usage...invalid...nosuchoption... """ ###################################################################### ## Main ###################################################################### def test_main(): # Check the doctest cases in doctest itself: support.run_doctest(doctest, verbosity=True) # Check the doctest cases defined here: from test import test_doctest support.run_doctest(test_doctest, verbosity=True) import sys, re, io def test_coverage(coverdir): trace = support.import_module('trace') tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,], trace=0, count=1) tracer.run('test_main()') r = tracer.results() print('Writing coverage results...') r.write_results(show_missing=True, summary=True, coverdir=coverdir) if __name__ == '__main__': if '-c' in sys.argv: test_coverage('/tmp/doctest.cover') else: test_main()
lgpl-3.0
meghana1995/sympy
sympy/core/tests/test_diff.py
115
2793
from sympy import Symbol, Rational, cos, sin, tan, cot, exp, log, Function, \ Derivative, Expr, symbols, pi, I, S from sympy.utilities.pytest import raises def test_diff(): x, y = symbols('x, y') assert Rational(1, 3).diff(x) is S.Zero assert I.diff(x) is S.Zero assert pi.diff(x) is S.Zero assert x.diff(x, 0) == x assert (x**2).diff(x, 2, x) == 0 assert (x**2).diff(x, y, 0) == 2*x assert (x**2).diff(x, y) == 0 raises(ValueError, lambda: x.diff(1, x)) a = Symbol("a") b = Symbol("b") c = Symbol("c") p = Rational(5) e = a*b + b**p assert e.diff(a) == b assert e.diff(b) == a + 5*b**4 assert e.diff(b).diff(a) == Rational(1) e = a*(b + c) assert e.diff(a) == b + c assert e.diff(b) == a assert e.diff(b).diff(a) == Rational(1) e = c**p assert e.diff(c, 6) == Rational(0) assert e.diff(c, 5) == Rational(120) e = c**Rational(2) assert e.diff(c) == 2*c e = a*b*c assert e.diff(c) == a*b def test_diff2(): n3 = Rational(3) n2 = Rational(2) n6 = Rational(6) x, c = map(Symbol, 'xc') e = n3*(-n2 + x**n2)*cos(x) + x*(-n6 + x**n2)*sin(x) assert e == 3*(-2 + x**2)*cos(x) + x*(-6 + x**2)*sin(x) assert e.diff(x).expand() == x**3*cos(x) e = (x + 1)**3 assert e.diff(x) == 3*(x + 1)**2 e = x*(x + 1)**3 assert e.diff(x) == (x + 1)**3 + 3*x*(x + 1)**2 e = 2*exp(x*x)*x assert e.diff(x) == 2*exp(x**2) + 4*x**2*exp(x**2) def test_diff3(): a, b, c = map(Symbol, 'abc') p = Rational(5) e = a*b + sin(b**p) assert e == a*b + sin(b**5) assert e.diff(a) == b assert e.diff(b) == a + 5*b**4*cos(b**5) e = tan(c) assert e == tan(c) assert e.diff(c) in [cos(c)**(-2), 1 + sin(c)**2/cos(c)**2, 1 + tan(c)**2] e = c*log(c) - c assert e == -c + c*log(c) assert e.diff(c) == log(c) e = log(sin(c)) assert e == log(sin(c)) assert e.diff(c) in [sin(c)**(-1)*cos(c), cot(c)] e = (Rational(2)**a/log(Rational(2))) assert e == 2**a*log(Rational(2))**(-1) assert e.diff(a) == 2**a def test_diff_no_eval_derivative(): class My(Expr): def __new__(cls, x): return Expr.__new__(cls, x) x, y = symbols('x y') # My doesn't have its own _eval_derivative method assert My(x).diff(x).func is Derivative # it doesn't have y so it shouldn't need a method for this case assert My(x).diff(y) == 0 def test_speed(): # this should return in 0.0s. If it takes forever, it's wrong. x = Symbol("x") assert x.diff(x, 10**8) == 0 def test_deriv_noncommutative(): A = Symbol("A", commutative=False) f = Function("f") x = Symbol("x") assert A*f(x)*A == f(x)*A**2 assert A*f(x).diff(x)*A == f(x).diff(x) * A**2
bsd-3-clause
tedi3231/openerp
openerp/addons/decimal_precision/__openerp__.py
179
1721
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Decimal Precision Configuration', 'description': """ Configure the price accuracy you need for different kinds of usage: accounting, sales, purchases. ================================================================================================= The decimal precision is configured per company. """, 'author': 'OpenERP SA', 'version': '0.1', 'depends': ['base'], 'category' : 'Hidden/Dependency', 'data': [ 'decimal_precision_view.xml', 'security/ir.model.access.csv', ], 'demo': [], 'installable': True, 'images': ['images/1_decimal_accuracy_form.jpeg','images/1_decimal_accuracy_list.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
blueman-project/blueman
blueman/plugins/applet/Networking.py
2
2750
from gettext import gettext as _ from typing import Dict from gi.repository import GLib from blueman.main.Config import Config from blueman.bluez.NetworkServer import NetworkServer from blueman.main.DBusProxies import Mechanism from blueman.plugins.AppletPlugin import AppletPlugin from blueman.gui.CommonUi import ErrorDialog import logging class Networking(AppletPlugin): __icon__ = "network-workgroup" __description__ = _("Manages local network services, like NAP bridges") __author__ = "Walmis" _signal = None def on_load(self) -> None: self._registered: Dict[str, bool] = {} self.Config = Config("org.blueman.network") self.Config.connect("changed", self.on_config_changed) self.load_nap_settings() def on_manager_state_changed(self, state: bool) -> None: if state: self.update_status() def load_nap_settings(self) -> None: logging.info("Loading NAP settings") def reply(_obj: Mechanism, _result: None, _user_data: None) -> None: pass def err(_obj: Mechanism, result: GLib.Error, _user_data: None) -> None: d = ErrorDialog("<b>Failed to apply network settings</b>", "You might not be able to connect to the Bluetooth network via this machine", result, margin_left=9) d.run() d.destroy() m = Mechanism() m.ReloadNetwork(result_handler=reply, error_handler=err) def on_unload(self) -> None: for adapter_path in self._registered: s = NetworkServer(obj_path=adapter_path) s.unregister("nap") self._registered = {} del self.Config def on_adapter_added(self, path: str) -> None: self.update_status() def update_status(self) -> None: self.set_nap(self.Config["nap-enable"]) def on_config_changed(self, config: Config, key: str) -> None: if key == "nap-enable": self.set_nap(config[key]) def set_nap(self, on: bool) -> None: logging.info("set nap %s" % on) if self.parent.manager_state: adapters = self.parent.Manager.get_adapters() for adapter in adapters: object_path = adapter.get_object_path() registered = self._registered.setdefault(object_path, False) s = NetworkServer(obj_path=object_path) if on and not registered: s.register("nap", "pan1") self._registered[object_path] = True elif not on and registered: s.unregister("nap") self._registered[object_path] = False
gpl-3.0
camsong/node-gyp
gyp/pylib/gyp/generator/ninja.py
240
99242
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import copy import hashlib import json import multiprocessing import os.path import re import signal import subprocess import sys import gyp import gyp.common from gyp.common import OrderedSet import gyp.msvs_emulation import gyp.MSVSUtil as MSVSUtil import gyp.xcode_emulation from cStringIO import StringIO from gyp.common import GetEnvironFallback import gyp.ninja_syntax as ninja_syntax generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_PREFIX': 'lib', # Gyp expects the following variables to be expandable by the build # system to the appropriate locations. Ninja prefers paths to be # known at gyp time. To resolve this, introduce special # variables starting with $! and $| (which begin with a $ so gyp knows it # should be treated specially, but is otherwise an invalid # ninja/shell variable) that are passed to gyp here but expanded # before writing out into the target .ninja files; see # ExpandSpecial. # $! is used for variables that represent a path and that can only appear at # the start of a string, while $| is used for variables that can appear # anywhere in a string. 'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen', 'PRODUCT_DIR': '$!PRODUCT_DIR', 'CONFIGURATION_NAME': '$|CONFIGURATION_NAME', # Special variables that may be used by gyp 'rule' targets. # We generate definitions for these variables on the fly when processing a # rule. 'RULE_INPUT_ROOT': '${root}', 'RULE_INPUT_DIRNAME': '${dirname}', 'RULE_INPUT_PATH': '${source}', 'RULE_INPUT_EXT': '${ext}', 'RULE_INPUT_NAME': '${name}', } # Placates pylint. generator_additional_non_configuration_keys = [] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] generator_filelist_paths = None generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() def StripPrefix(arg, prefix): if arg.startswith(prefix): return arg[len(prefix):] return arg def QuoteShellArgument(arg, flavor): """Quote a string such that it will be interpreted as a single argument by the shell.""" # Rather than attempting to enumerate the bad shell characters, just # whitelist common OK ones and quote anything else. if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg): return arg # No quoting necessary. if flavor == 'win': return gyp.msvs_emulation.QuoteForRspFile(arg) return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'" def Define(d, flavor): """Takes a preprocessor define and returns a -D parameter that's ninja- and shell-escaped.""" if flavor == 'win': # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. d = d.replace('#', '\\%03o' % ord('#')) return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor) def AddArch(output, arch): """Adds an arch string to an output path.""" output, extension = os.path.splitext(output) return '%s.%s%s' % (output, arch, extension) class Target(object): """Target represents the paths used within a single gyp target. Conceptually, building a single target A is a series of steps: 1) actions/rules/copies generates source/resources/etc. 2) compiles generates .o files 3) link generates a binary (library/executable) 4) bundle merges the above in a mac bundle (Any of these steps can be optional.) From a build ordering perspective, a dependent target B could just depend on the last output of this series of steps. But some dependent commands sometimes need to reach inside the box. For example, when linking B it needs to get the path to the static library generated by A. This object stores those paths. To keep things simple, member variables only store concrete paths to single files, while methods compute derived values like "the last output of the target". """ def __init__(self, type): # Gyp type ("static_library", etc.) of this target. self.type = type # File representing whether any input dependencies necessary for # dependent actions have completed. self.preaction_stamp = None # File representing whether any input dependencies necessary for # dependent compiles have completed. self.precompile_stamp = None # File representing the completion of actions/rules/copies, if any. self.actions_stamp = None # Path to the output of the link step, if any. self.binary = None # Path to the file representing the completion of building the bundle, # if any. self.bundle = None # On Windows, incremental linking requires linking against all the .objs # that compose a .lib (rather than the .lib itself). That list is stored # here. self.component_objs = None # Windows only. The import .lib is the output of a build step, but # because dependents only link against the lib (not both the lib and the # dll) we keep track of the import library here. self.import_lib = None def Linkable(self): """Return true if this is a target that can be linked against.""" return self.type in ('static_library', 'shared_library') def UsesToc(self, flavor): """Return true if the target should produce a restat rule based on a TOC file.""" # For bundles, the .TOC should be produced for the binary, not for # FinalOutput(). But the naive approach would put the TOC file into the # bundle, so don't do this for bundles for now. if flavor == 'win' or self.bundle: return False return self.type in ('shared_library', 'loadable_module') def PreActionInput(self, flavor): """Return the path, if any, that should be used as a dependency of any dependent action step.""" if self.UsesToc(flavor): return self.FinalOutput() + '.TOC' return self.FinalOutput() or self.preaction_stamp def PreCompileInput(self): """Return the path, if any, that should be used as a dependency of any dependent compile step.""" return self.actions_stamp or self.precompile_stamp def FinalOutput(self): """Return the last output of the target, which depends on all prior steps.""" return self.bundle or self.binary or self.actions_stamp # A small discourse on paths as used within the Ninja build: # All files we produce (both at gyp and at build time) appear in the # build directory (e.g. out/Debug). # # Paths within a given .gyp file are always relative to the directory # containing the .gyp file. Call these "gyp paths". This includes # sources as well as the starting directory a given gyp rule/action # expects to be run from. We call the path from the source root to # the gyp file the "base directory" within the per-.gyp-file # NinjaWriter code. # # All paths as written into the .ninja files are relative to the build # directory. Call these paths "ninja paths". # # We translate between these two notions of paths with two helper # functions: # # - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file) # into the equivalent ninja path. # # - GypPathToUniqueOutput translates a gyp path into a ninja path to write # an output file; the result can be namespaced such that it is unique # to the input file name as well as the output target name. class NinjaWriter(object): def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir, output_file, toplevel_build, output_file_name, flavor, toplevel_dir=None): """ base_dir: path from source root to directory containing this gyp file, by gyp semantics, all input paths are relative to this build_dir: path from source root to build output toplevel_dir: path to the toplevel directory """ self.hash_for_rules = hash_for_rules self.target_outputs = target_outputs self.base_dir = base_dir self.build_dir = build_dir self.ninja = ninja_syntax.Writer(output_file) self.toplevel_build = toplevel_build self.output_file_name = output_file_name self.flavor = flavor self.abs_build_dir = None if toplevel_dir is not None: self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir, build_dir)) self.obj_ext = '.obj' if flavor == 'win' else '.o' if flavor == 'win': # See docstring of msvs_emulation.GenerateEnvironmentFiles(). self.win_env = {} for arch in ('x86', 'x64'): self.win_env[arch] = 'environment.' + arch # Relative path from build output dir to base dir. build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir) self.build_to_base = os.path.join(build_to_top, base_dir) # Relative path from base dir to build dir. base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir) self.base_to_build = os.path.join(base_to_top, build_dir) def ExpandSpecial(self, path, product_dir=None): """Expand specials like $!PRODUCT_DIR in |path|. If |product_dir| is None, assumes the cwd is already the product dir. Otherwise, |product_dir| is the relative path to the product dir. """ PRODUCT_DIR = '$!PRODUCT_DIR' if PRODUCT_DIR in path: if product_dir: path = path.replace(PRODUCT_DIR, product_dir) else: path = path.replace(PRODUCT_DIR + '/', '') path = path.replace(PRODUCT_DIR + '\\', '') path = path.replace(PRODUCT_DIR, '.') INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR' if INTERMEDIATE_DIR in path: int_dir = self.GypPathToUniqueOutput('gen') # GypPathToUniqueOutput generates a path relative to the product dir, # so insert product_dir in front if it is provided. path = path.replace(INTERMEDIATE_DIR, os.path.join(product_dir or '', int_dir)) CONFIGURATION_NAME = '$|CONFIGURATION_NAME' path = path.replace(CONFIGURATION_NAME, self.config_name) return path def ExpandRuleVariables(self, path, root, dirname, source, ext, name): if self.flavor == 'win': path = self.msvs_settings.ConvertVSMacros( path, config=self.config_name) path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root) path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'], dirname) path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source) path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext) path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name) return path def GypPathToNinja(self, path, env=None): """Translate a gyp path to a ninja path, optionally expanding environment variable references in |path| with |env|. See the above discourse on path conversions.""" if env: if self.flavor == 'mac': path = gyp.xcode_emulation.ExpandEnvVars(path, env) elif self.flavor == 'win': path = gyp.msvs_emulation.ExpandMacros(path, env) if path.startswith('$!'): expanded = self.ExpandSpecial(path) if self.flavor == 'win': expanded = os.path.normpath(expanded) return expanded if '$|' in path: path = self.ExpandSpecial(path) assert '$' not in path, path return os.path.normpath(os.path.join(self.build_to_base, path)) def GypPathToUniqueOutput(self, path, qualified=True): """Translate a gyp path to a ninja path for writing output. If qualified is True, qualify the resulting filename with the name of the target. This is necessary when e.g. compiling the same path twice for two separate output targets. See the above discourse on path conversions.""" path = self.ExpandSpecial(path) assert not path.startswith('$'), path # Translate the path following this scheme: # Input: foo/bar.gyp, target targ, references baz/out.o # Output: obj/foo/baz/targ.out.o (if qualified) # obj/foo/baz/out.o (otherwise) # (and obj.host instead of obj for cross-compiles) # # Why this scheme and not some other one? # 1) for a given input, you can compute all derived outputs by matching # its path, even if the input is brought via a gyp file with '..'. # 2) simple files like libraries and stamps have a simple filename. obj = 'obj' if self.toolset != 'target': obj += '.' + self.toolset path_dir, path_basename = os.path.split(path) assert not os.path.isabs(path_dir), ( "'%s' can not be absolute path (see crbug.com/462153)." % path_dir) if qualified: path_basename = self.name + '.' + path_basename return os.path.normpath(os.path.join(obj, self.base_dir, path_dir, path_basename)) def WriteCollapsedDependencies(self, name, targets, order_only=None): """Given a list of targets, return a path for a single file representing the result of building all the targets or None. Uses a stamp file if necessary.""" assert targets == filter(None, targets), targets if len(targets) == 0: assert not order_only return None if len(targets) > 1 or order_only: stamp = self.GypPathToUniqueOutput(name + '.stamp') targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only) self.ninja.newline() return targets[0] def _SubninjaNameForArch(self, arch): output_file_base = os.path.splitext(self.output_file_name)[0] return '%s.%s.ninja' % (output_file_base, arch) def WriteSpec(self, spec, config_name, generator_flags): """The main entry point for NinjaWriter: write the build rules for a spec. Returns a Target object, which represents the output paths for this spec. Returns None if there are no outputs (e.g. a settings-only 'none' type target).""" self.config_name = config_name self.name = spec['target_name'] self.toolset = spec['toolset'] config = spec['configurations'][config_name] self.target = Target(spec['type']) self.is_standalone_static_library = bool( spec.get('standalone_static_library', 0)) # Track if this target contains any C++ files, to decide if gcc or g++ # should be used for linking. self.uses_cpp = False self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) self.xcode_settings = self.msvs_settings = None if self.flavor == 'mac': self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) if self.flavor == 'win': self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec, generator_flags) arch = self.msvs_settings.GetArch(config_name) self.ninja.variable('arch', self.win_env[arch]) self.ninja.variable('cc', '$cl_' + arch) self.ninja.variable('cxx', '$cl_' + arch) self.ninja.variable('cc_host', '$cl_' + arch) self.ninja.variable('cxx_host', '$cl_' + arch) self.ninja.variable('asm', '$ml_' + arch) if self.flavor == 'mac': self.archs = self.xcode_settings.GetActiveArchs(config_name) if len(self.archs) > 1: self.arch_subninjas = dict( (arch, ninja_syntax.Writer( OpenOutput(os.path.join(self.toplevel_build, self._SubninjaNameForArch(arch)), 'w'))) for arch in self.archs) # Compute predepends for all rules. # actions_depends is the dependencies this target depends on before running # any of its action/rule/copy steps. # compile_depends is the dependencies this target depends on before running # any of its compile steps. actions_depends = [] compile_depends = [] # TODO(evan): it is rather confusing which things are lists and which # are strings. Fix these. if 'dependencies' in spec: for dep in spec['dependencies']: if dep in self.target_outputs: target = self.target_outputs[dep] actions_depends.append(target.PreActionInput(self.flavor)) compile_depends.append(target.PreCompileInput()) actions_depends = filter(None, actions_depends) compile_depends = filter(None, compile_depends) actions_depends = self.WriteCollapsedDependencies('actions_depends', actions_depends) compile_depends = self.WriteCollapsedDependencies('compile_depends', compile_depends) self.target.preaction_stamp = actions_depends self.target.precompile_stamp = compile_depends # Write out actions, rules, and copies. These must happen before we # compile any sources, so compute a list of predependencies for sources # while we do it. extra_sources = [] mac_bundle_depends = [] self.target.actions_stamp = self.WriteActionsRulesCopies( spec, extra_sources, actions_depends, mac_bundle_depends) # If we have actions/rules/copies, we depend directly on those, but # otherwise we depend on dependent target's actions/rules/copies etc. # We never need to explicitly depend on previous target's link steps, # because no compile ever depends on them. compile_depends_stamp = (self.target.actions_stamp or compile_depends) # Write out the compilation steps, if any. link_deps = [] sources = extra_sources + spec.get('sources', []) if sources: if self.flavor == 'mac' and len(self.archs) > 1: # Write subninja file containing compile and link commands scoped to # a single arch if a fat binary is being built. for arch in self.archs: self.ninja.subninja(self._SubninjaNameForArch(arch)) pch = None if self.flavor == 'win': gyp.msvs_emulation.VerifyMissingSources( sources, self.abs_build_dir, generator_flags, self.GypPathToNinja) pch = gyp.msvs_emulation.PrecompiledHeader( self.msvs_settings, config_name, self.GypPathToNinja, self.GypPathToUniqueOutput, self.obj_ext) else: pch = gyp.xcode_emulation.MacPrefixHeader( self.xcode_settings, self.GypPathToNinja, lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang)) link_deps = self.WriteSources( self.ninja, config_name, config, sources, compile_depends_stamp, pch, spec) # Some actions/rules output 'sources' that are already object files. obj_outputs = [f for f in sources if f.endswith(self.obj_ext)] if obj_outputs: if self.flavor != 'mac' or len(self.archs) == 1: link_deps += [self.GypPathToNinja(o) for o in obj_outputs] else: print "Warning: Actions/rules writing object files don't work with " \ "multiarch targets, dropping. (target %s)" % spec['target_name'] elif self.flavor == 'mac' and len(self.archs) > 1: link_deps = collections.defaultdict(list) if self.flavor == 'win' and self.target.type == 'static_library': self.target.component_objs = link_deps # Write out a link step, if needed. output = None is_empty_bundle = not link_deps and not mac_bundle_depends if link_deps or self.target.actions_stamp or actions_depends: output = self.WriteTarget(spec, config_name, config, link_deps, self.target.actions_stamp or actions_depends) if self.is_mac_bundle: mac_bundle_depends.append(output) # Bundle all of the above together, if needed. if self.is_mac_bundle: output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle) if not output: return None assert self.target.FinalOutput(), output return self.target def _WinIdlRule(self, source, prebuild, outputs): """Handle the implicit VS .idl rule for one source file. Fills |outputs| with files that are generated.""" outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData( source, self.config_name) outdir = self.GypPathToNinja(outdir) def fix_path(path, rel=None): path = os.path.join(outdir, path) dirname, basename = os.path.split(source) root, ext = os.path.splitext(basename) path = self.ExpandRuleVariables( path, root, dirname, source, ext, basename) if rel: path = os.path.relpath(path, rel) return path vars = [(name, fix_path(value, outdir)) for name, value in vars] output = [fix_path(p) for p in output] vars.append(('outdir', outdir)) vars.append(('idlflags', flags)) input = self.GypPathToNinja(source) self.ninja.build(output, 'idl', input, variables=vars, order_only=prebuild) outputs.extend(output) def WriteWinIdlFiles(self, spec, prebuild): """Writes rules to match MSVS's implicit idl handling.""" assert self.flavor == 'win' if self.msvs_settings.HasExplicitIdlRulesOrActions(spec): return [] outputs = [] for source in filter(lambda x: x.endswith('.idl'), spec['sources']): self._WinIdlRule(source, prebuild, outputs) return outputs def WriteActionsRulesCopies(self, spec, extra_sources, prebuild, mac_bundle_depends): """Write out the Actions, Rules, and Copies steps. Return a path representing the outputs of these steps.""" outputs = [] if self.is_mac_bundle: mac_bundle_resources = spec.get('mac_bundle_resources', [])[:] else: mac_bundle_resources = [] extra_mac_bundle_resources = [] if 'actions' in spec: outputs += self.WriteActions(spec['actions'], extra_sources, prebuild, extra_mac_bundle_resources) if 'rules' in spec: outputs += self.WriteRules(spec['rules'], extra_sources, prebuild, mac_bundle_resources, extra_mac_bundle_resources) if 'copies' in spec: outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends) if 'sources' in spec and self.flavor == 'win': outputs += self.WriteWinIdlFiles(spec, prebuild) stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs) if self.is_mac_bundle: xcassets = self.WriteMacBundleResources( extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends) partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends) self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends) return stamp def GenerateDescription(self, verb, message, fallback): """Generate and return a description of a build step. |verb| is the short summary, e.g. ACTION or RULE. |message| is a hand-written description, or None if not available. |fallback| is the gyp-level name of the step, usable as a fallback. """ if self.toolset != 'target': verb += '(%s)' % self.toolset if message: return '%s %s' % (verb, self.ExpandSpecial(message)) else: return '%s %s: %s' % (verb, self.name, fallback) def WriteActions(self, actions, extra_sources, prebuild, extra_mac_bundle_resources): # Actions cd into the base directory. env = self.GetToolchainEnv() all_outputs = [] for action in actions: # First write out a rule for the action. name = '%s_%s' % (action['action_name'], self.hash_for_rules) description = self.GenerateDescription('ACTION', action.get('message', None), name) is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action) if self.flavor == 'win' else False) args = action['action'] depfile = action.get('depfile', None) if depfile: depfile = self.ExpandSpecial(depfile, self.base_to_build) pool = 'console' if int(action.get('ninja_use_console', 0)) else None rule_name, _ = self.WriteNewNinjaRule(name, args, description, is_cygwin, env, pool, depfile=depfile) inputs = [self.GypPathToNinja(i, env) for i in action['inputs']] if int(action.get('process_outputs_as_sources', False)): extra_sources += action['outputs'] if int(action.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += action['outputs'] outputs = [self.GypPathToNinja(o, env) for o in action['outputs']] # Then write out an edge using the rule. self.ninja.build(outputs, rule_name, inputs, order_only=prebuild) all_outputs += outputs self.ninja.newline() return all_outputs def WriteRules(self, rules, extra_sources, prebuild, mac_bundle_resources, extra_mac_bundle_resources): env = self.GetToolchainEnv() all_outputs = [] for rule in rules: # Skip a rule with no action and no inputs. if 'action' not in rule and not rule.get('rule_sources', []): continue # First write out a rule for the rule action. name = '%s_%s' % (rule['rule_name'], self.hash_for_rules) args = rule['action'] description = self.GenerateDescription( 'RULE', rule.get('message', None), ('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name) is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule) if self.flavor == 'win' else False) pool = 'console' if int(rule.get('ninja_use_console', 0)) else None rule_name, args = self.WriteNewNinjaRule( name, args, description, is_cygwin, env, pool) # TODO: if the command references the outputs directly, we should # simplify it to just use $out. # Rules can potentially make use of some special variables which # must vary per source file. # Compute the list of variables we'll need to provide. special_locals = ('source', 'root', 'dirname', 'ext', 'name') needed_variables = set(['source']) for argument in args: for var in special_locals: if '${%s}' % var in argument: needed_variables.add(var) def cygwin_munge(path): # pylint: disable=cell-var-from-loop if is_cygwin: return path.replace('\\', '/') return path inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])] # If there are n source files matching the rule, and m additional rule # inputs, then adding 'inputs' to each build edge written below will # write m * n inputs. Collapsing reduces this to m + n. sources = rule.get('rule_sources', []) num_inputs = len(inputs) if prebuild: num_inputs += 1 if num_inputs > 2 and len(sources) > 2: inputs = [self.WriteCollapsedDependencies( rule['rule_name'], inputs, order_only=prebuild)] prebuild = [] # For each source file, write an edge that generates all the outputs. for source in sources: source = os.path.normpath(source) dirname, basename = os.path.split(source) root, ext = os.path.splitext(basename) # Gather the list of inputs and outputs, expanding $vars if possible. outputs = [self.ExpandRuleVariables(o, root, dirname, source, ext, basename) for o in rule['outputs']] if int(rule.get('process_outputs_as_sources', False)): extra_sources += outputs was_mac_bundle_resource = source in mac_bundle_resources if was_mac_bundle_resource or \ int(rule.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += outputs # Note: This is n_resources * n_outputs_in_rule. Put to-be-removed # items in a set and remove them all in a single pass if this becomes # a performance issue. if was_mac_bundle_resource: mac_bundle_resources.remove(source) extra_bindings = [] for var in needed_variables: if var == 'root': extra_bindings.append(('root', cygwin_munge(root))) elif var == 'dirname': # '$dirname' is a parameter to the rule action, which means # it shouldn't be converted to a Ninja path. But we don't # want $!PRODUCT_DIR in there either. dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build) extra_bindings.append(('dirname', cygwin_munge(dirname_expanded))) elif var == 'source': # '$source' is a parameter to the rule action, which means # it shouldn't be converted to a Ninja path. But we don't # want $!PRODUCT_DIR in there either. source_expanded = self.ExpandSpecial(source, self.base_to_build) extra_bindings.append(('source', cygwin_munge(source_expanded))) elif var == 'ext': extra_bindings.append(('ext', ext)) elif var == 'name': extra_bindings.append(('name', cygwin_munge(basename))) else: assert var == None, repr(var) outputs = [self.GypPathToNinja(o, env) for o in outputs] if self.flavor == 'win': # WriteNewNinjaRule uses unique_name for creating an rsp file on win. extra_bindings.append(('unique_name', hashlib.md5(outputs[0]).hexdigest())) self.ninja.build(outputs, rule_name, self.GypPathToNinja(source), implicit=inputs, order_only=prebuild, variables=extra_bindings) all_outputs.extend(outputs) return all_outputs def WriteCopies(self, copies, prebuild, mac_bundle_depends): outputs = [] env = self.GetToolchainEnv() for copy in copies: for path in copy['files']: # Normalize the path so trailing slashes don't confuse us. path = os.path.normpath(path) basename = os.path.split(path)[1] src = self.GypPathToNinja(path, env) dst = self.GypPathToNinja(os.path.join(copy['destination'], basename), env) outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild) if self.is_mac_bundle: # gyp has mac_bundle_resources to copy things into a bundle's # Resources folder, but there's no built-in way to copy files to other # places in the bundle. Hence, some targets use copies for this. Check # if this file is copied into the current bundle, and if so add it to # the bundle depends so that dependent targets get rebuilt if the copy # input changes. if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()): mac_bundle_depends.append(dst) return outputs def WriteMacBundleResources(self, resources, bundle_depends): """Writes ninja edges for 'mac_bundle_resources'.""" xcassets = [] for output, res in gyp.xcode_emulation.GetMacBundleResources( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, map(self.GypPathToNinja, resources)): output = self.ExpandSpecial(output) if os.path.splitext(output)[-1] != '.xcassets': isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) self.ninja.build(output, 'mac_tool', res, variables=[('mactool_cmd', 'copy-bundle-resource'), \ ('binary', isBinary)]) bundle_depends.append(output) else: xcassets.append(res) return xcassets def WriteMacXCassets(self, xcassets, bundle_depends): """Writes ninja edges for 'mac_bundle_resources' .xcassets files. This add an invocation of 'actool' via the 'mac_tool.py' helper script. It assumes that the assets catalogs define at least one imageset and thus an Assets.car file will be generated in the application resources directory. If this is not the case, then the build will probably be done at each invocation of ninja.""" if not xcassets: return extra_arguments = {} settings_to_arg = { 'XCASSETS_APP_ICON': 'app-icon', 'XCASSETS_LAUNCH_IMAGE': 'launch-image', } settings = self.xcode_settings.xcode_settings[self.config_name] for settings_key, arg_name in settings_to_arg.iteritems(): value = settings.get(settings_key) if value: extra_arguments[arg_name] = value partial_info_plist = None if extra_arguments: partial_info_plist = self.GypPathToUniqueOutput( 'assetcatalog_generated_info.plist') extra_arguments['output-partial-info-plist'] = partial_info_plist outputs = [] outputs.append( os.path.join( self.xcode_settings.GetBundleResourceFolder(), 'Assets.car')) if partial_info_plist: outputs.append(partial_info_plist) keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor) extra_env = self.xcode_settings.GetPerTargetSettings() env = self.GetSortedXcodeEnv(additional_settings=extra_env) env = self.ComputeExportEnvString(env) bundle_depends.extend(self.ninja.build( outputs, 'compile_xcassets', xcassets, variables=[('env', env), ('keys', keys)])) return partial_info_plist def WriteMacInfoPlist(self, partial_info_plist, bundle_depends): """Write build rules for bundle Info.plist files.""" info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, self.GypPathToNinja) if not info_plist: return out = self.ExpandSpecial(out) if defines: # Create an intermediate file to store preprocessed results. intermediate_plist = self.GypPathToUniqueOutput( os.path.basename(info_plist)) defines = ' '.join([Define(d, self.flavor) for d in defines]) info_plist = self.ninja.build( intermediate_plist, 'preprocess_infoplist', info_plist, variables=[('defines',defines)]) env = self.GetSortedXcodeEnv(additional_settings=extra_env) env = self.ComputeExportEnvString(env) if partial_info_plist: intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist') info_plist = self.ninja.build( intermediate_plist, 'merge_infoplist', [partial_info_plist, info_plist]) keys = self.xcode_settings.GetExtraPlistItems(self.config_name) keys = QuoteShellArgument(json.dumps(keys), self.flavor) isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) self.ninja.build(out, 'copy_infoplist', info_plist, variables=[('env', env), ('keys', keys), ('binary', isBinary)]) bundle_depends.append(out) def WriteSources(self, ninja_file, config_name, config, sources, predepends, precompiled_header, spec): """Write build rules to compile all of |sources|.""" if self.toolset == 'host': self.ninja.variable('ar', '$ar_host') self.ninja.variable('cc', '$cc_host') self.ninja.variable('cxx', '$cxx_host') self.ninja.variable('ld', '$ld_host') self.ninja.variable('ldxx', '$ldxx_host') self.ninja.variable('nm', '$nm_host') self.ninja.variable('readelf', '$readelf_host') if self.flavor != 'mac' or len(self.archs) == 1: return self.WriteSourcesForArch( self.ninja, config_name, config, sources, predepends, precompiled_header, spec) else: return dict((arch, self.WriteSourcesForArch( self.arch_subninjas[arch], config_name, config, sources, predepends, precompiled_header, spec, arch=arch)) for arch in self.archs) def WriteSourcesForArch(self, ninja_file, config_name, config, sources, predepends, precompiled_header, spec, arch=None): """Write build rules to compile all of |sources|.""" extra_defines = [] if self.flavor == 'mac': cflags = self.xcode_settings.GetCflags(config_name, arch=arch) cflags_c = self.xcode_settings.GetCflagsC(config_name) cflags_cc = self.xcode_settings.GetCflagsCC(config_name) cflags_objc = ['$cflags_c'] + \ self.xcode_settings.GetCflagsObjC(config_name) cflags_objcc = ['$cflags_cc'] + \ self.xcode_settings.GetCflagsObjCC(config_name) elif self.flavor == 'win': asmflags = self.msvs_settings.GetAsmflags(config_name) cflags = self.msvs_settings.GetCflags(config_name) cflags_c = self.msvs_settings.GetCflagsC(config_name) cflags_cc = self.msvs_settings.GetCflagsCC(config_name) extra_defines = self.msvs_settings.GetComputedDefines(config_name) # See comment at cc_command for why there's two .pdb files. pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName( config_name, self.ExpandSpecial) if not pdbpath_c: obj = 'obj' if self.toolset != 'target': obj += '.' + self.toolset pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name)) pdbpath_c = pdbpath + '.c.pdb' pdbpath_cc = pdbpath + '.cc.pdb' self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c]) self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc]) self.WriteVariableList(ninja_file, 'pchprefix', [self.name]) else: cflags = config.get('cflags', []) cflags_c = config.get('cflags_c', []) cflags_cc = config.get('cflags_cc', []) # Respect environment variables related to build, but target-specific # flags can still override them. if self.toolset == 'target': cflags_c = (os.environ.get('CPPFLAGS', '').split() + os.environ.get('CFLAGS', '').split() + cflags_c) cflags_cc = (os.environ.get('CPPFLAGS', '').split() + os.environ.get('CXXFLAGS', '').split() + cflags_cc) defines = config.get('defines', []) + extra_defines self.WriteVariableList(ninja_file, 'defines', [Define(d, self.flavor) for d in defines]) if self.flavor == 'win': self.WriteVariableList(ninja_file, 'asmflags', map(self.ExpandSpecial, asmflags)) self.WriteVariableList(ninja_file, 'rcflags', [QuoteShellArgument(self.ExpandSpecial(f), self.flavor) for f in self.msvs_settings.GetRcflags(config_name, self.GypPathToNinja)]) include_dirs = config.get('include_dirs', []) env = self.GetToolchainEnv() if self.flavor == 'win': include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs, config_name) self.WriteVariableList(ninja_file, 'includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in include_dirs]) if self.flavor == 'win': midl_include_dirs = config.get('midl_include_dirs', []) midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs( midl_include_dirs, config_name) self.WriteVariableList(ninja_file, 'midl_includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in midl_include_dirs]) pch_commands = precompiled_header.GetPchBuildCommands(arch) if self.flavor == 'mac': # Most targets use no precompiled headers, so only write these if needed. for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'), ('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]: include = precompiled_header.GetInclude(ext, arch) if include: ninja_file.variable(var, include) arflags = config.get('arflags', []) self.WriteVariableList(ninja_file, 'cflags', map(self.ExpandSpecial, cflags)) self.WriteVariableList(ninja_file, 'cflags_c', map(self.ExpandSpecial, cflags_c)) self.WriteVariableList(ninja_file, 'cflags_cc', map(self.ExpandSpecial, cflags_cc)) if self.flavor == 'mac': self.WriteVariableList(ninja_file, 'cflags_objc', map(self.ExpandSpecial, cflags_objc)) self.WriteVariableList(ninja_file, 'cflags_objcc', map(self.ExpandSpecial, cflags_objcc)) self.WriteVariableList(ninja_file, 'arflags', map(self.ExpandSpecial, arflags)) ninja_file.newline() outputs = [] has_rc_source = False for source in sources: filename, ext = os.path.splitext(source) ext = ext[1:] obj_ext = self.obj_ext if ext in ('cc', 'cpp', 'cxx'): command = 'cxx' self.uses_cpp = True elif ext == 'c' or (ext == 'S' and self.flavor != 'win'): command = 'cc' elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files. command = 'cc_s' elif (self.flavor == 'win' and ext == 'asm' and not self.msvs_settings.HasExplicitAsmRules(spec)): command = 'asm' # Add the _asm suffix as msvs is capable of handling .cc and # .asm files of the same name without collision. obj_ext = '_asm.obj' elif self.flavor == 'mac' and ext == 'm': command = 'objc' elif self.flavor == 'mac' and ext == 'mm': command = 'objcxx' self.uses_cpp = True elif self.flavor == 'win' and ext == 'rc': command = 'rc' obj_ext = '.res' has_rc_source = True else: # Ignore unhandled extensions. continue input = self.GypPathToNinja(source) output = self.GypPathToUniqueOutput(filename + obj_ext) if arch is not None: output = AddArch(output, arch) implicit = precompiled_header.GetObjDependencies([input], [output], arch) variables = [] if self.flavor == 'win': variables, output, implicit = precompiled_header.GetFlagsModifications( input, output, implicit, command, cflags_c, cflags_cc, self.ExpandSpecial) ninja_file.build(output, command, input, implicit=[gch for _, _, gch in implicit], order_only=predepends, variables=variables) outputs.append(output) if has_rc_source: resource_include_dirs = config.get('resource_include_dirs', include_dirs) self.WriteVariableList(ninja_file, 'resource_includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in resource_include_dirs]) self.WritePchTargets(ninja_file, pch_commands) ninja_file.newline() return outputs def WritePchTargets(self, ninja_file, pch_commands): """Writes ninja rules to compile prefix headers.""" if not pch_commands: return for gch, lang_flag, lang, input in pch_commands: var_name = { 'c': 'cflags_pch_c', 'cc': 'cflags_pch_cc', 'm': 'cflags_pch_objc', 'mm': 'cflags_pch_objcc', }[lang] map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', } cmd = map.get(lang) ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)]) def WriteLink(self, spec, config_name, config, link_deps): """Write out a link step. Fills out target.binary. """ if self.flavor != 'mac' or len(self.archs) == 1: return self.WriteLinkForArch( self.ninja, spec, config_name, config, link_deps) else: output = self.ComputeOutput(spec) inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec, config_name, config, link_deps[arch], arch=arch) for arch in self.archs] extra_bindings = [] build_output = output if not self.is_mac_bundle: self.AppendPostbuildVariable(extra_bindings, spec, output, output) # TODO(yyanagisawa): more work needed to fix: # https://code.google.com/p/gyp/issues/detail?id=411 if (spec['type'] in ('shared_library', 'loadable_module') and not self.is_mac_bundle): extra_bindings.append(('lib', output)) self.ninja.build([output, output + '.TOC'], 'solipo', inputs, variables=extra_bindings) else: self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings) return output def WriteLinkForArch(self, ninja_file, spec, config_name, config, link_deps, arch=None): """Write out a link step. Fills out target.binary. """ command = { 'executable': 'link', 'loadable_module': 'solink_module', 'shared_library': 'solink', }[spec['type']] command_suffix = '' implicit_deps = set() solibs = set() if 'dependencies' in spec: # Two kinds of dependencies: # - Linkable dependencies (like a .a or a .so): add them to the link line. # - Non-linkable dependencies (like a rule that generates a file # and writes a stamp file): add them to implicit_deps extra_link_deps = set() for dep in spec['dependencies']: target = self.target_outputs.get(dep) if not target: continue linkable = target.Linkable() if linkable: new_deps = [] if (self.flavor == 'win' and target.component_objs and self.msvs_settings.IsUseLibraryDependencyInputs(config_name)): new_deps = target.component_objs elif self.flavor == 'win' and target.import_lib: new_deps = [target.import_lib] elif target.UsesToc(self.flavor): solibs.add(target.binary) implicit_deps.add(target.binary + '.TOC') else: new_deps = [target.binary] for new_dep in new_deps: if new_dep not in extra_link_deps: extra_link_deps.add(new_dep) link_deps.append(new_dep) final_output = target.FinalOutput() if not linkable or final_output != target.binary: implicit_deps.add(final_output) extra_bindings = [] if self.uses_cpp and self.flavor != 'win': extra_bindings.append(('ld', '$ldxx')) output = self.ComputeOutput(spec, arch) if arch is None and not self.is_mac_bundle: self.AppendPostbuildVariable(extra_bindings, spec, output, output) is_executable = spec['type'] == 'executable' # The ldflags config key is not used on mac or win. On those platforms # linker flags are set via xcode_settings and msvs_settings, respectively. env_ldflags = os.environ.get('LDFLAGS', '').split() if self.flavor == 'mac': ldflags = self.xcode_settings.GetLdflags(config_name, self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']), self.GypPathToNinja, arch) ldflags = env_ldflags + ldflags elif self.flavor == 'win': manifest_base_name = self.GypPathToUniqueOutput( self.ComputeOutputFileName(spec)) ldflags, intermediate_manifest, manifest_files = \ self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja, self.ExpandSpecial, manifest_base_name, output, is_executable, self.toplevel_build) ldflags = env_ldflags + ldflags self.WriteVariableList(ninja_file, 'manifests', manifest_files) implicit_deps = implicit_deps.union(manifest_files) if intermediate_manifest: self.WriteVariableList( ninja_file, 'intermediatemanifest', [intermediate_manifest]) command_suffix = _GetWinLinkRuleNameSuffix( self.msvs_settings.IsEmbedManifest(config_name)) def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja) if def_file: implicit_deps.add(def_file) else: # Respect environment variables related to build, but target-specific # flags can still override them. ldflags = env_ldflags + config.get('ldflags', []) if is_executable and len(solibs): rpath = 'lib/' if self.toolset != 'target': rpath += self.toolset ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath) ldflags.append('-Wl,-rpath-link=%s' % rpath) self.WriteVariableList(ninja_file, 'ldflags', gyp.common.uniquer(map(self.ExpandSpecial, ldflags))) library_dirs = config.get('library_dirs', []) if self.flavor == 'win': library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name) for l in library_dirs] library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l), self.flavor) for l in library_dirs] else: library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l), self.flavor) for l in library_dirs] libraries = gyp.common.uniquer(map(self.ExpandSpecial, spec.get('libraries', []))) if self.flavor == 'mac': libraries = self.xcode_settings.AdjustLibraries(libraries, config_name) elif self.flavor == 'win': libraries = self.msvs_settings.AdjustLibraries(libraries) self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries) linked_binary = output if command in ('solink', 'solink_module'): extra_bindings.append(('soname', os.path.split(output)[1])) extra_bindings.append(('lib', gyp.common.EncodePOSIXShellArgument(output))) if self.flavor != 'win': link_file_list = output if self.is_mac_bundle: # 'Dependency Framework.framework/Versions/A/Dependency Framework' -> # 'Dependency Framework.framework.rsp' link_file_list = self.xcode_settings.GetWrapperName() if arch: link_file_list += '.' + arch link_file_list += '.rsp' # If an rspfile contains spaces, ninja surrounds the filename with # quotes around it and then passes it to open(), creating a file with # quotes in its name (and when looking for the rsp file, the name # makes it through bash which strips the quotes) :-/ link_file_list = link_file_list.replace(' ', '_') extra_bindings.append( ('link_file_list', gyp.common.EncodePOSIXShellArgument(link_file_list))) if self.flavor == 'win': extra_bindings.append(('binary', output)) if ('/NOENTRY' not in ldflags and not self.msvs_settings.GetNoImportLibrary(config_name)): self.target.import_lib = output + '.lib' extra_bindings.append(('implibflag', '/IMPLIB:%s' % self.target.import_lib)) pdbname = self.msvs_settings.GetPDBName( config_name, self.ExpandSpecial, output + '.pdb') output = [output, self.target.import_lib] if pdbname: output.append(pdbname) elif not self.is_mac_bundle: output = [output, output + '.TOC'] else: command = command + '_notoc' elif self.flavor == 'win': extra_bindings.append(('binary', output)) pdbname = self.msvs_settings.GetPDBName( config_name, self.ExpandSpecial, output + '.pdb') if pdbname: output = [output, pdbname] if len(solibs): extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs))) ninja_file.build(output, command + command_suffix, link_deps, implicit=list(implicit_deps), variables=extra_bindings) return linked_binary def WriteTarget(self, spec, config_name, config, link_deps, compile_deps): extra_link_deps = any(self.target_outputs.get(dep).Linkable() for dep in spec.get('dependencies', []) if dep in self.target_outputs) if spec['type'] == 'none' or (not link_deps and not extra_link_deps): # TODO(evan): don't call this function for 'none' target types, as # it doesn't do anything, and we fake out a 'binary' with a stamp file. self.target.binary = compile_deps self.target.type = 'none' elif spec['type'] == 'static_library': self.target.binary = self.ComputeOutput(spec) if (self.flavor not in ('mac', 'openbsd', 'win') and not self.is_standalone_static_library): self.ninja.build(self.target.binary, 'alink_thin', link_deps, order_only=compile_deps) else: variables = [] if self.xcode_settings: libtool_flags = self.xcode_settings.GetLibtoolflags(config_name) if libtool_flags: variables.append(('libtool_flags', libtool_flags)) if self.msvs_settings: libflags = self.msvs_settings.GetLibFlags(config_name, self.GypPathToNinja) variables.append(('libflags', libflags)) if self.flavor != 'mac' or len(self.archs) == 1: self.AppendPostbuildVariable(variables, spec, self.target.binary, self.target.binary) self.ninja.build(self.target.binary, 'alink', link_deps, order_only=compile_deps, variables=variables) else: inputs = [] for arch in self.archs: output = self.ComputeOutput(spec, arch) self.arch_subninjas[arch].build(output, 'alink', link_deps[arch], order_only=compile_deps, variables=variables) inputs.append(output) # TODO: It's not clear if libtool_flags should be passed to the alink # call that combines single-arch .a files into a fat .a file. self.AppendPostbuildVariable(variables, spec, self.target.binary, self.target.binary) self.ninja.build(self.target.binary, 'alink', inputs, # FIXME: test proving order_only=compile_deps isn't # needed. variables=variables) else: self.target.binary = self.WriteLink(spec, config_name, config, link_deps) return self.target.binary def WriteMacBundle(self, spec, mac_bundle_depends, is_empty): assert self.is_mac_bundle package_framework = spec['type'] in ('shared_library', 'loadable_module') output = self.ComputeMacBundleOutput() if is_empty: output += '.stamp' variables = [] self.AppendPostbuildVariable(variables, spec, output, self.target.binary, is_command_start=not package_framework) if package_framework and not is_empty: variables.append(('version', self.xcode_settings.GetFrameworkVersion())) self.ninja.build(output, 'package_framework', mac_bundle_depends, variables=variables) else: self.ninja.build(output, 'stamp', mac_bundle_depends, variables=variables) self.target.bundle = output return output def GetToolchainEnv(self, additional_settings=None): """Returns the variables toolchain would set for build steps.""" env = self.GetSortedXcodeEnv(additional_settings=additional_settings) if self.flavor == 'win': env = self.GetMsvsToolchainEnv( additional_settings=additional_settings) return env def GetMsvsToolchainEnv(self, additional_settings=None): """Returns the variables Visual Studio would set for build steps.""" return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR', config=self.config_name) def GetSortedXcodeEnv(self, additional_settings=None): """Returns the variables Xcode would set for build steps.""" assert self.abs_build_dir abs_build_dir = self.abs_build_dir return gyp.xcode_emulation.GetSortedXcodeEnv( self.xcode_settings, abs_build_dir, os.path.join(abs_build_dir, self.build_to_base), self.config_name, additional_settings) def GetSortedXcodePostbuildEnv(self): """Returns the variables Xcode would set for postbuild steps.""" postbuild_settings = {} # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. # TODO(thakis): It would be nice to have some general mechanism instead. strip_save_file = self.xcode_settings.GetPerTargetSetting( 'CHROMIUM_STRIP_SAVE_FILE') if strip_save_file: postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file return self.GetSortedXcodeEnv(additional_settings=postbuild_settings) def AppendPostbuildVariable(self, variables, spec, output, binary, is_command_start=False): """Adds a 'postbuild' variable if there is a postbuild for |output|.""" postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start) if postbuild: variables.append(('postbuilds', postbuild)) def GetPostbuildCommand(self, spec, output, output_binary, is_command_start): """Returns a shell command that runs all the postbuilds, and removes |output| if any of them fails. If |is_command_start| is False, then the returned string will start with ' && '.""" if not self.xcode_settings or spec['type'] == 'none' or not output: return '' output = QuoteShellArgument(output, self.flavor) postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True) if output_binary is not None: postbuilds = self.xcode_settings.AddImplicitPostbuilds( self.config_name, os.path.normpath(os.path.join(self.base_to_build, output)), QuoteShellArgument( os.path.normpath(os.path.join(self.base_to_build, output_binary)), self.flavor), postbuilds, quiet=True) if not postbuilds: return '' # Postbuilds expect to be run in the gyp file's directory, so insert an # implicit postbuild to cd to there. postbuilds.insert(0, gyp.common.EncodePOSIXShellList( ['cd', self.build_to_base])) env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv()) # G will be non-null if any postbuild fails. Run all postbuilds in a # subshell. commands = env + ' (' + \ ' && '.join([ninja_syntax.escape(command) for command in postbuilds]) command_string = (commands + '); G=$$?; ' # Remove the final output if any postbuild failed. '((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)') if is_command_start: return '(' + command_string + ' && ' else: return '$ && (' + command_string def ComputeExportEnvString(self, env): """Given an environment, returns a string looking like 'export FOO=foo; export BAR="${FOO} bar;' that exports |env| to the shell.""" export_str = [] for k, v in env: export_str.append('export %s=%s;' % (k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v)))) return ' '.join(export_str) def ComputeMacBundleOutput(self): """Return the 'output' (full output path) to a bundle output directory.""" assert self.is_mac_bundle path = generator_default_variables['PRODUCT_DIR'] return self.ExpandSpecial( os.path.join(path, self.xcode_settings.GetWrapperName())) def ComputeOutputFileName(self, spec, type=None): """Compute the filename of the final output for the current target.""" if not type: type = spec['type'] default_variables = copy.copy(generator_default_variables) CalculateVariables(default_variables, {'flavor': self.flavor}) # Compute filename prefix: the product prefix, or a default for # the product type. DEFAULT_PREFIX = { 'loadable_module': default_variables['SHARED_LIB_PREFIX'], 'shared_library': default_variables['SHARED_LIB_PREFIX'], 'static_library': default_variables['STATIC_LIB_PREFIX'], 'executable': default_variables['EXECUTABLE_PREFIX'], } prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, '')) # Compute filename extension: the product extension, or a default # for the product type. DEFAULT_EXTENSION = { 'loadable_module': default_variables['SHARED_LIB_SUFFIX'], 'shared_library': default_variables['SHARED_LIB_SUFFIX'], 'static_library': default_variables['STATIC_LIB_SUFFIX'], 'executable': default_variables['EXECUTABLE_SUFFIX'], } extension = spec.get('product_extension') if extension: extension = '.' + extension else: extension = DEFAULT_EXTENSION.get(type, '') if 'product_name' in spec: # If we were given an explicit name, use that. target = spec['product_name'] else: # Otherwise, derive a name from the target name. target = spec['target_name'] if prefix == 'lib': # Snip out an extra 'lib' from libs if appropriate. target = StripPrefix(target, 'lib') if type in ('static_library', 'loadable_module', 'shared_library', 'executable'): return '%s%s%s' % (prefix, target, extension) elif type == 'none': return '%s.stamp' % target else: raise Exception('Unhandled output type %s' % type) def ComputeOutput(self, spec, arch=None): """Compute the path for the final output of the spec.""" type = spec['type'] if self.flavor == 'win': override = self.msvs_settings.GetOutputName(self.config_name, self.ExpandSpecial) if override: return override if arch is None and self.flavor == 'mac' and type in ( 'static_library', 'executable', 'shared_library', 'loadable_module'): filename = self.xcode_settings.GetExecutablePath() else: filename = self.ComputeOutputFileName(spec, type) if arch is None and 'product_dir' in spec: path = os.path.join(spec['product_dir'], filename) return self.ExpandSpecial(path) # Some products go into the output root, libraries go into shared library # dir, and everything else goes into the normal place. type_in_output_root = ['executable', 'loadable_module'] if self.flavor == 'mac' and self.toolset == 'target': type_in_output_root += ['shared_library', 'static_library'] elif self.flavor == 'win' and self.toolset == 'target': type_in_output_root += ['shared_library'] if arch is not None: # Make sure partial executables don't end up in a bundle or the regular # output directory. archdir = 'arch' if self.toolset != 'target': archdir = os.path.join('arch', '%s' % self.toolset) return os.path.join(archdir, AddArch(filename, arch)) elif type in type_in_output_root or self.is_standalone_static_library: return filename elif type == 'shared_library': libdir = 'lib' if self.toolset != 'target': libdir = os.path.join('lib', '%s' % self.toolset) return os.path.join(libdir, filename) else: return self.GypPathToUniqueOutput(filename, qualified=False) def WriteVariableList(self, ninja_file, var, values): assert not isinstance(values, str) if values is None: values = [] ninja_file.variable(var, ' '.join(values)) def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool, depfile=None): """Write out a new ninja "rule" statement for a given command. Returns the name of the new rule, and a copy of |args| with variables expanded.""" if self.flavor == 'win': args = [self.msvs_settings.ConvertVSMacros( arg, self.base_to_build, config=self.config_name) for arg in args] description = self.msvs_settings.ConvertVSMacros( description, config=self.config_name) elif self.flavor == 'mac': # |env| is an empty list on non-mac. args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args] description = gyp.xcode_emulation.ExpandEnvVars(description, env) # TODO: we shouldn't need to qualify names; we do it because # currently the ninja rule namespace is global, but it really # should be scoped to the subninja. rule_name = self.name if self.toolset == 'target': rule_name += '.' + self.toolset rule_name += '.' + name rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name) # Remove variable references, but not if they refer to the magic rule # variables. This is not quite right, as it also protects these for # actions, not just for rules where they are valid. Good enough. protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ] protect = '(?!' + '|'.join(map(re.escape, protect)) + ')' description = re.sub(protect + r'\$', '_', description) # gyp dictates that commands are run from the base directory. # cd into the directory before running, and adjust paths in # the arguments to point to the proper locations. rspfile = None rspfile_content = None args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args] if self.flavor == 'win': rspfile = rule_name + '.$unique_name.rsp' # The cygwin case handles this inside the bash sub-shell. run_in = '' if is_cygwin else ' ' + self.build_to_base if is_cygwin: rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine( args, self.build_to_base) else: rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args) command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable + rspfile + run_in) else: env = self.ComputeExportEnvString(env) command = gyp.common.EncodePOSIXShellList(args) command = 'cd %s; ' % self.build_to_base + env + command # GYP rules/actions express being no-ops by not touching their outputs. # Avoid executing downstream dependencies in this case by specifying # restat=1 to ninja. self.ninja.rule(rule_name, command, description, depfile=depfile, restat=True, pool=pool, rspfile=rspfile, rspfile_content=rspfile_content) self.ninja.newline() return rule_name, args def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" global generator_additional_non_configuration_keys global generator_additional_path_sections flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib') default_variables.setdefault('SHARED_LIB_DIR', generator_default_variables['PRODUCT_DIR']) default_variables.setdefault('LIB_DIR', generator_default_variables['PRODUCT_DIR']) # Copy additional generator configuration data from Xcode, which is shared # by the Mac Ninja generator. import gyp.generator.xcode as xcode_generator generator_additional_non_configuration_keys = getattr(xcode_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(xcode_generator, 'generator_additional_path_sections', []) global generator_extra_sources_for_rules generator_extra_sources_for_rules = getattr(xcode_generator, 'generator_extra_sources_for_rules', []) elif flavor == 'win': exts = gyp.MSVSUtil.TARGET_TYPE_EXT default_variables.setdefault('OS', 'win') default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable'] default_variables['STATIC_LIB_PREFIX'] = '' default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library'] default_variables['SHARED_LIB_PREFIX'] = '' default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library'] # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system) default_variables.setdefault('SHARED_LIB_SUFFIX', '.so') default_variables.setdefault('SHARED_LIB_DIR', os.path.join('$!PRODUCT_DIR', 'lib')) default_variables.setdefault('LIB_DIR', os.path.join('$!PRODUCT_DIR', 'obj')) def ComputeOutputDir(params): """Returns the path from the toplevel_dir to the build output directory.""" # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to ninja easier, ninja doesn't put anything here. generator_dir = os.path.relpath(params['options'].generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = params.get('generator_flags', {}).get('output_dir', 'out') # Relative path from source root to our output files. e.g. "out" return os.path.normpath(os.path.join(generator_dir, output_dir)) def CalculateGeneratorInputInfo(params): """Called by __init__ to initialize generator values based on params.""" # E.g. "out/gypfiles" toplevel = params['options'].toplevel_dir qualified_out_dir = os.path.normpath(os.path.join( toplevel, ComputeOutputDir(params), 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': toplevel, 'qualified_out_dir': qualified_out_dir, } def OpenOutput(path, mode='w'): """Open |path| for writing, creating directories if necessary.""" gyp.common.EnsureDirExists(path) return open(path, mode) def CommandWithWrapper(cmd, wrappers, prog): wrapper = wrappers.get(cmd, '') if wrapper: return wrapper + ' ' + prog return prog def GetDefaultConcurrentLinks(): """Returns a best-guess for a number of concurrent links.""" pool_size = int(os.getenv('GYP_LINK_CONCURRENCY', 0)) if pool_size: return pool_size if sys.platform in ('win32', 'cygwin'): import ctypes class MEMORYSTATUSEX(ctypes.Structure): _fields_ = [ ("dwLength", ctypes.c_ulong), ("dwMemoryLoad", ctypes.c_ulong), ("ullTotalPhys", ctypes.c_ulonglong), ("ullAvailPhys", ctypes.c_ulonglong), ("ullTotalPageFile", ctypes.c_ulonglong), ("ullAvailPageFile", ctypes.c_ulonglong), ("ullTotalVirtual", ctypes.c_ulonglong), ("ullAvailVirtual", ctypes.c_ulonglong), ("sullAvailExtendedVirtual", ctypes.c_ulonglong), ] stat = MEMORYSTATUSEX() stat.dwLength = ctypes.sizeof(stat) ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat)) mem_limit = max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32))) return min(mem_limit, hard_cap) elif sys.platform.startswith('linux'): if os.path.exists("/proc/meminfo"): with open("/proc/meminfo") as meminfo: memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB') for line in meminfo: match = memtotal_re.match(line) if not match: continue # Allow 8Gb per link on Linux because Gold is quite memory hungry return max(1, int(match.group(1)) / (8 * (2 ** 20))) return 1 elif sys.platform == 'darwin': try: avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize'])) # A static library debug build of Chromium's unit_tests takes ~2.7GB, so # 4GB per ld process allows for some more bloat. return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB except: return 1 else: # TODO(scottmg): Implement this for other platforms. return 1 def _GetWinLinkRuleNameSuffix(embed_manifest): """Returns the suffix used to select an appropriate linking rule depending on whether the manifest embedding is enabled.""" return '_embed' if embed_manifest else '' def _AddWinLinkRules(master_ninja, embed_manifest): """Adds link rules for Windows platform to |master_ninja|.""" def FullLinkCommand(ldcmd, out, binary_type): resource_name = { 'exe': '1', 'dll': '2', }[binary_type] return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \ '%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \ '$manifests' % { 'python': sys.executable, 'out': out, 'ldcmd': ldcmd, 'resname': resource_name, 'embed': embed_manifest } rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest) use_separate_mspdbsrv = ( int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0) dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper() dllcmd = ('%s gyp-win-tool link-wrapper $arch %s ' '$ld /nologo $implibflag /DLL /OUT:$binary ' '@$binary.rsp' % (sys.executable, use_separate_mspdbsrv)) dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll') master_ninja.rule('solink' + rule_name_suffix, description=dlldesc, command=dllcmd, rspfile='$binary.rsp', rspfile_content='$libs $in_newline $ldflags', restat=True, pool='link_pool') master_ninja.rule('solink_module' + rule_name_suffix, description=dlldesc, command=dllcmd, rspfile='$binary.rsp', rspfile_content='$libs $in_newline $ldflags', restat=True, pool='link_pool') # Note that ldflags goes at the end so that it has the option of # overriding default settings earlier in the command line. exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s ' '$ld /nologo /OUT:$binary @$binary.rsp' % (sys.executable, use_separate_mspdbsrv)) exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe') master_ninja.rule('link' + rule_name_suffix, description='LINK%s $binary' % rule_name_suffix.upper(), command=exe_cmd, rspfile='$binary.rsp', rspfile_content='$in_newline $libs $ldflags', pool='link_pool') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_name): options = params['options'] flavor = gyp.common.GetFlavor(params) generator_flags = params.get('generator_flags', {}) # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath( os.path.join(ComputeOutputDir(params), config_name)) toplevel_build = os.path.join(options.toplevel_dir, build_dir) master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja')) master_ninja = ninja_syntax.Writer(master_ninja_file, width=120) # Put build-time support tools in out/{config_name}. gyp.common.CopyTool(flavor, toplevel_build) # Grab make settings for CC/CXX. # The rules are # - The priority from low to high is gcc/g++, the 'make_global_settings' in # gyp, the environment variable. # - If there is no 'make_global_settings' for CC.host/CXX.host or # 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set # to cc/cxx. if flavor == 'win': ar = 'lib.exe' # cc and cxx must be set to the correct architecture by overriding with one # of cl_x86 or cl_x64 below. cc = 'UNSET' cxx = 'UNSET' ld = 'link.exe' ld_host = '$ld' else: ar = 'ar' cc = 'cc' cxx = 'c++' ld = '$cc' ldxx = '$cxx' ld_host = '$cc_host' ldxx_host = '$cxx_host' ar_host = 'ar' cc_host = None cxx_host = None cc_host_global_setting = None cxx_host_global_setting = None clang_cl = None nm = 'nm' nm_host = 'nm' readelf = 'readelf' readelf_host = 'readelf' build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings = data[build_file].get('make_global_settings', []) build_to_root = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) wrappers = {} for key, value in make_global_settings: if key == 'AR': ar = os.path.join(build_to_root, value) if key == 'AR.host': ar_host = os.path.join(build_to_root, value) if key == 'CC': cc = os.path.join(build_to_root, value) if cc.endswith('clang-cl'): clang_cl = cc if key == 'CXX': cxx = os.path.join(build_to_root, value) if key == 'CC.host': cc_host = os.path.join(build_to_root, value) cc_host_global_setting = value if key == 'CXX.host': cxx_host = os.path.join(build_to_root, value) cxx_host_global_setting = value if key == 'LD': ld = os.path.join(build_to_root, value) if key == 'LD.host': ld_host = os.path.join(build_to_root, value) if key == 'NM': nm = os.path.join(build_to_root, value) if key == 'NM.host': nm_host = os.path.join(build_to_root, value) if key == 'READELF': readelf = os.path.join(build_to_root, value) if key == 'READELF.host': readelf_host = os.path.join(build_to_root, value) if key.endswith('_wrapper'): wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value) # Support wrappers from environment variables too. for key, value in os.environ.iteritems(): if key.lower().endswith('_wrapper'): key_prefix = key[:-len('_wrapper')] key_prefix = re.sub(r'\.HOST$', '.host', key_prefix) wrappers[key_prefix] = os.path.join(build_to_root, value) if flavor == 'win': configs = [target_dicts[qualified_target]['configurations'][config_name] for qualified_target in target_list] shared_system_includes = None if not generator_flags.get('ninja_use_custom_environment_files', 0): shared_system_includes = \ gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes( configs, generator_flags) cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles( toplevel_build, generator_flags, shared_system_includes, OpenOutput) for arch, path in cl_paths.iteritems(): if clang_cl: # If we have selected clang-cl, use that instead. path = clang_cl command = CommandWithWrapper('CC', wrappers, QuoteShellArgument(path, 'win')) if clang_cl: # Use clang-cl to cross-compile for x86 or x86_64. command += (' -m32' if arch == 'x86' else ' -m64') master_ninja.variable('cl_' + arch, command) cc = GetEnvironFallback(['CC_target', 'CC'], cc) master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc)) cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx) master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx)) if flavor == 'win': master_ninja.variable('ld', ld) master_ninja.variable('idl', 'midl.exe') master_ninja.variable('ar', ar) master_ninja.variable('rc', 'rc.exe') master_ninja.variable('ml_x86', 'ml.exe') master_ninja.variable('ml_x64', 'ml64.exe') master_ninja.variable('mt', 'mt.exe') else: master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld)) master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx)) master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar)) if flavor != 'mac': # Mac does not use readelf/nm for .TOC generation, so avoiding polluting # the master ninja with extra unused variables. master_ninja.variable( 'nm', GetEnvironFallback(['NM_target', 'NM'], nm)) master_ninja.variable( 'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf)) if generator_supports_multiple_toolsets: if not cc_host: cc_host = cc if not cxx_host: cxx_host = cxx master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host)) master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host)) master_ninja.variable('readelf_host', GetEnvironFallback(['READELF_host'], readelf_host)) cc_host = GetEnvironFallback(['CC_host'], cc_host) cxx_host = GetEnvironFallback(['CXX_host'], cxx_host) # The environment variable could be used in 'make_global_settings', like # ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here. if '$(CC)' in cc_host and cc_host_global_setting: cc_host = cc_host_global_setting.replace('$(CC)', cc) if '$(CXX)' in cxx_host and cxx_host_global_setting: cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx) master_ninja.variable('cc_host', CommandWithWrapper('CC.host', wrappers, cc_host)) master_ninja.variable('cxx_host', CommandWithWrapper('CXX.host', wrappers, cxx_host)) if flavor == 'win': master_ninja.variable('ld_host', ld_host) else: master_ninja.variable('ld_host', CommandWithWrapper( 'LINK', wrappers, ld_host)) master_ninja.variable('ldxx_host', CommandWithWrapper( 'LINK', wrappers, ldxx_host)) master_ninja.newline() master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks()) master_ninja.newline() deps = 'msvc' if flavor == 'win' else 'gcc' if flavor != 'win': master_ninja.rule( 'cc', description='CC $out', command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c ' '$cflags_pch_c -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'cc_s', description='CC $out', command=('$cc $defines $includes $cflags $cflags_c ' '$cflags_pch_c -c $in -o $out')) master_ninja.rule( 'cxx', description='CXX $out', command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc ' '$cflags_pch_cc -c $in -o $out'), depfile='$out.d', deps=deps) else: # TODO(scottmg) Separate pdb names is a test to see if it works around # http://crbug.com/142362. It seems there's a race between the creation of # the .pdb by the precompiled header step for .cc and the compilation of # .c files. This should be handled by mspdbsrv, but rarely errors out with # c1xx : fatal error C1033: cannot open program database # By making the rules target separate pdb files this might be avoided. cc_command = ('ninja -t msvc -e $arch ' + '-- ' '$cc /nologo /showIncludes /FC ' '@$out.rsp /c $in /Fo$out /Fd$pdbname_c ') cxx_command = ('ninja -t msvc -e $arch ' + '-- ' '$cxx /nologo /showIncludes /FC ' '@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ') master_ninja.rule( 'cc', description='CC $out', command=cc_command, rspfile='$out.rsp', rspfile_content='$defines $includes $cflags $cflags_c', deps=deps) master_ninja.rule( 'cxx', description='CXX $out', command=cxx_command, rspfile='$out.rsp', rspfile_content='$defines $includes $cflags $cflags_cc', deps=deps) master_ninja.rule( 'idl', description='IDL $in', command=('%s gyp-win-tool midl-wrapper $arch $outdir ' '$tlb $h $dlldata $iid $proxy $in ' '$midl_includes $idlflags' % sys.executable)) master_ninja.rule( 'rc', description='RC $in', # Note: $in must be last otherwise rc.exe complains. command=('%s gyp-win-tool rc-wrapper ' '$arch $rc $defines $resource_includes $rcflags /fo$out $in' % sys.executable)) master_ninja.rule( 'asm', description='ASM $out', command=('%s gyp-win-tool asm-wrapper ' '$arch $asm $defines $includes $asmflags /c /Fo $out $in' % sys.executable)) if flavor != 'mac' and flavor != 'win': master_ninja.rule( 'alink', description='AR $out', command='rm -f $out && $ar rcs $arflags $out $in') master_ninja.rule( 'alink_thin', description='AR $out', command='rm -f $out && $ar rcsT $arflags $out $in') # This allows targets that only need to depend on $lib's API to declare an # order-only dependency on $lib.TOC and avoid relinking such downstream # dependencies when $lib changes only in non-public ways. # The resulting string leaves an uninterpolated %{suffix} which # is used in the final substitution below. mtime_preserving_solink_base = ( 'if [ ! -e $lib -o ! -e $lib.TOC ]; then ' '%(solink)s && %(extract_toc)s > $lib.TOC; else ' '%(solink)s && %(extract_toc)s > $lib.tmp && ' 'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; ' 'fi; fi' % { 'solink': '$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s', 'extract_toc': ('{ $readelf -d $lib | grep SONAME ; ' '$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')}) master_ninja.rule( 'solink', description='SOLINK $lib', restat=True, command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, rspfile='$link_file_list', rspfile_content= '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs', pool='link_pool') master_ninja.rule( 'solink_module', description='SOLINK(module) $lib', restat=True, command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, rspfile='$link_file_list', rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs', pool='link_pool') master_ninja.rule( 'link', description='LINK $out', command=('$ld $ldflags -o $out ' '-Wl,--start-group $in -Wl,--end-group $solibs $libs'), pool='link_pool') elif flavor == 'win': master_ninja.rule( 'alink', description='LIB $out', command=('%s gyp-win-tool link-wrapper $arch False ' '$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' % sys.executable), rspfile='$out.rsp', rspfile_content='$in_newline $libflags') _AddWinLinkRules(master_ninja, embed_manifest=True) _AddWinLinkRules(master_ninja, embed_manifest=False) else: master_ninja.rule( 'objc', description='OBJC $out', command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc ' '$cflags_pch_objc -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'objcxx', description='OBJCXX $out', command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc ' '$cflags_pch_objcc -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'alink', description='LIBTOOL-STATIC $out, POSTBUILDS', command='rm -f $out && ' './gyp-mac-tool filter-libtool libtool $libtool_flags ' '-static -o $out $in' '$postbuilds') master_ninja.rule( 'lipo', description='LIPO $out, POSTBUILDS', command='rm -f $out && lipo -create $in -output $out$postbuilds') master_ninja.rule( 'solipo', description='SOLIPO $out, POSTBUILDS', command=( 'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&' '%(extract_toc)s > $lib.TOC' % { 'extract_toc': '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})) # Record the public interface of $lib in $lib.TOC. See the corresponding # comment in the posix section above for details. solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s' mtime_preserving_solink_base = ( 'if [ ! -e $lib -o ! -e $lib.TOC ] || ' # Always force dependent targets to relink if this library # reexports something. Handling this correctly would require # recursive TOC dumping but this is rare in practice, so punt. 'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then ' '%(solink)s && %(extract_toc)s > $lib.TOC; ' 'else ' '%(solink)s && %(extract_toc)s > $lib.tmp && ' 'if ! cmp -s $lib.tmp $lib.TOC; then ' 'mv $lib.tmp $lib.TOC ; ' 'fi; ' 'fi' % { 'solink': solink_base, 'extract_toc': '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}) solink_suffix = '@$link_file_list$postbuilds' master_ninja.rule( 'solink', description='SOLINK $lib, POSTBUILDS', restat=True, command=mtime_preserving_solink_base % {'suffix': solink_suffix, 'type': '-shared'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_notoc', description='SOLINK $lib, POSTBUILDS', restat=True, command=solink_base % {'suffix':solink_suffix, 'type': '-shared'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_module', description='SOLINK(module) $lib, POSTBUILDS', restat=True, command=mtime_preserving_solink_base % {'suffix': solink_suffix, 'type': '-bundle'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_module_notoc', description='SOLINK(module) $lib, POSTBUILDS', restat=True, command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'link', description='LINK $out, POSTBUILDS', command=('$ld $ldflags -o $out ' '$in $solibs $libs$postbuilds'), pool='link_pool') master_ninja.rule( 'preprocess_infoplist', description='PREPROCESS INFOPLIST $out', command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && ' 'plutil -convert xml1 $out $out')) master_ninja.rule( 'copy_infoplist', description='COPY INFOPLIST $in', command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys') master_ninja.rule( 'merge_infoplist', description='MERGE INFOPLISTS $in', command='$env ./gyp-mac-tool merge-info-plist $out $in') master_ninja.rule( 'compile_xcassets', description='COMPILE XCASSETS $in', command='$env ./gyp-mac-tool compile-xcassets $keys $in') master_ninja.rule( 'mac_tool', description='MACTOOL $mactool_cmd $in', command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary') master_ninja.rule( 'package_framework', description='PACKAGE FRAMEWORK $out, POSTBUILDS', command='./gyp-mac-tool package-framework $out $version$postbuilds ' '&& touch $out') if flavor == 'win': master_ninja.rule( 'stamp', description='STAMP $out', command='%s gyp-win-tool stamp $out' % sys.executable) master_ninja.rule( 'copy', description='COPY $in $out', command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable) else: master_ninja.rule( 'stamp', description='STAMP $out', command='${postbuilds}touch $out') master_ninja.rule( 'copy', description='COPY $in $out', command='rm -rf $out && cp -af $in $out') master_ninja.newline() all_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, os.path.normpath(build_file)): all_targets.add(target) all_outputs = set() # target_outputs is a map from qualified target name to a Target object. target_outputs = {} # target_short_names is a map from target short name to a list of Target # objects. target_short_names = {} # short name of targets that were skipped because they didn't contain anything # interesting. # NOTE: there may be overlap between this an non_empty_target_names. empty_target_names = set() # Set of non-empty short target names. # NOTE: there may be overlap between this an empty_target_names. non_empty_target_names = set() for qualified_target in target_list: # qualified_target is like: third_party/icu/icu.gyp:icui18n#target build_file, name, toolset = \ gyp.common.ParseQualifiedTarget(qualified_target) this_make_global_settings = data[build_file].get('make_global_settings', []) assert make_global_settings == this_make_global_settings, ( "make_global_settings needs to be the same for all targets. %s vs. %s" % (this_make_global_settings, make_global_settings)) spec = target_dicts[qualified_target] if flavor == 'mac': gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec) build_file = gyp.common.RelativePath(build_file, options.toplevel_dir) qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name, toolset) hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest() base_path = os.path.dirname(build_file) obj = 'obj' if toolset != 'target': obj += '.' + toolset output_file = os.path.join(obj, base_path, name + '.ninja') ninja_output = StringIO() writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir, ninja_output, toplevel_build, output_file, flavor, toplevel_dir=options.toplevel_dir) target = writer.WriteSpec(spec, config_name, generator_flags) if ninja_output.tell() > 0: # Only create files for ninja files that actually have contents. with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file: ninja_file.write(ninja_output.getvalue()) ninja_output.close() master_ninja.subninja(output_file) if target: if name != target.FinalOutput() and spec['toolset'] == 'target': target_short_names.setdefault(name, []).append(target) target_outputs[qualified_target] = target if qualified_target in all_targets: all_outputs.add(target.FinalOutput()) non_empty_target_names.add(name) else: empty_target_names.add(name) if target_short_names: # Write a short name to build this target. This benefits both the # "build chrome" case as well as the gyp tests, which expect to be # able to run actions and build libraries by their short name. master_ninja.newline() master_ninja.comment('Short names for targets.') for short_name in target_short_names: master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in target_short_names[short_name]]) # Write phony targets for any empty targets that weren't written yet. As # short names are not necessarily unique only do this for short names that # haven't already been output for another target. empty_target_names = empty_target_names - non_empty_target_names if empty_target_names: master_ninja.newline() master_ninja.comment('Empty targets (output for completeness).') for name in sorted(empty_target_names): master_ninja.build(name, 'phony') if all_outputs: master_ninja.newline() master_ninja.build('all', 'phony', list(all_outputs)) master_ninja.default(generator_flags.get('default_target', 'all')) master_ninja_file.close() def PerformBuild(data, configurations, params): options = params['options'] for config in configurations: builddir = os.path.join(options.toplevel_dir, 'out', config) arguments = ['ninja', '-C', builddir] print 'Building [%s]: %s' % (config, arguments) subprocess.check_call(arguments) def CallGenerateOutputForConfig(arglist): # Ignore the interrupt signal so that the parent process catches it and # kills all multiprocessing children. signal.signal(signal.SIGINT, signal.SIG_IGN) (target_list, target_dicts, data, params, config_name) = arglist GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) def GenerateOutput(target_list, target_dicts, data, params): # Update target_dicts for iOS device builds. target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator( target_dicts) user_config = params.get('generator_flags', {}).get('config', None) if gyp.common.GetFlavor(params) == 'win': target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts) target_list, target_dicts = MSVSUtil.InsertLargePdbShims( target_list, target_dicts, generator_default_variables) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() if params['parallel']: try: pool = multiprocessing.Pool(len(config_names)) arglists = [] for config_name in config_names: arglists.append( (target_list, target_dicts, data, params, config_name)) pool.map(CallGenerateOutputForConfig, arglists) except KeyboardInterrupt, e: pool.terminate() raise e else: for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
mit
SebasSBM/django
django/conf/locale/nl/formats.py
504
4472
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' # '20 januari 2009' TIME_FORMAT = 'H:i' # '15:23' DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23' YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009' MONTH_DAY_FORMAT = 'j F' # '20 januari' SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009' SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23' FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag') # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d-%m-%Y', '%d-%m-%y', # '20-01-2009', '20-01-09' '%d/%m/%Y', '%d/%m/%y', # '20/01/2009', '20/01/09' # '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09' # '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09' ] # Kept ISO formats as one is in first position TIME_INPUT_FORMATS = [ '%H:%M:%S', # '15:23:35' '%H:%M:%S.%f', # '15:23:35.000200' '%H.%M:%S', # '15.23:35' '%H.%M:%S.%f', # '15.23:35.000200' '%H.%M', # '15.23' '%H:%M', # '15:23' ] DATETIME_INPUT_FORMATS = [ # With time in %H:%M:%S : '%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S', # '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35' '%d/%m/%Y %H:%M:%S', '%d/%m/%y %H:%M:%S', '%Y/%m/%d %H:%M:%S', # '20/01/2009 15:23:35', '20/01/09 15:23:35', '2009/01/20 15:23:35' # '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35' # '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35' # With time in %H:%M:%S.%f : '%d-%m-%Y %H:%M:%S.%f', '%d-%m-%y %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S.%f', # '20-01-2009 15:23:35.000200', '20-01-09 15:23:35.000200', '2009-01-20 15:23:35.000200' '%d/%m/%Y %H:%M:%S.%f', '%d/%m/%y %H:%M:%S.%f', '%Y/%m/%d %H:%M:%S.%f', # '20/01/2009 15:23:35.000200', '20/01/09 15:23:35.000200', '2009/01/20 15:23:35.000200' # With time in %H.%M:%S : '%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35' '%d/%m/%Y %H.%M:%S', '%d/%m/%y %H.%M:%S', # '20/01/2009 15.23:35', '20/01/09 15.23:35' # '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35' # '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35' # With time in %H.%M:%S.%f : '%d-%m-%Y %H.%M:%S.%f', '%d-%m-%y %H.%M:%S.%f', # '20-01-2009 15.23:35.000200', '20-01-09 15.23:35.000200' '%d/%m/%Y %H.%M:%S.%f', '%d/%m/%y %H.%M:%S.%f', # '20/01/2009 15.23:35.000200', '20/01/09 15.23:35.000200' # With time in %H:%M : '%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23' '%d/%m/%Y %H:%M', '%d/%m/%y %H:%M', '%Y/%m/%d %H:%M', # '20/01/2009 15:23', '20/01/09 15:23', '2009/01/20 15:23' # '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23' # '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23' # With time in %H.%M : '%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23' '%d/%m/%Y %H.%M', '%d/%m/%y %H.%M', # '20/01/2009 15.23', '20/01/09 15.23' # '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23' # '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23' # Without time : '%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20' '%d/%m/%Y', '%d/%m/%y', '%Y/%m/%d', # '20/01/2009', '20/01/09', '2009/01/20' # '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09' # '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
allenlavoie/tensorflow
tensorflow/python/ops/distributions/categorical.py
8
12156
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Categorical distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import distribution from tensorflow.python.ops.distributions import kullback_leibler from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util.tf_export import tf_export def _broadcast_cat_event_and_params(event, params, base_dtype=dtypes.int32): """Broadcasts the event or distribution parameters.""" if event.shape.ndims is None: raise NotImplementedError( "Cannot broadcast with an event tensor of unknown rank.") if event.dtype.is_integer: pass elif event.dtype.is_floating: # When `validate_args=True` we've already ensured int/float casting # is closed. event = math_ops.cast(event, dtype=dtypes.int32) else: raise TypeError("`value` should have integer `dtype` or " "`self.dtype` ({})".format(base_dtype)) if params.get_shape()[:-1] == event.get_shape(): params = params else: params *= array_ops.ones_like( array_ops.expand_dims(event, -1), dtype=params.dtype) params_shape = array_ops.shape(params)[:-1] event *= array_ops.ones(params_shape, dtype=event.dtype) event.set_shape(tensor_shape.TensorShape(params.get_shape()[:-1])) return event, params @tf_export("distributions.Categorical") class Categorical(distribution.Distribution): """Categorical distribution. The Categorical distribution is parameterized by either probabilities or log-probabilities of a set of `K` classes. It is defined over the integers `{0, 1, ..., K}`. The Categorical distribution is closely related to the `OneHotCategorical` and `Multinomial` distributions. The Categorical distribution can be intuited as generating samples according to `argmax{ OneHotCategorical(probs) }` itself being identical to `argmax{ Multinomial(probs, total_count=1) }. #### Mathematical Details The probability mass function (pmf) is, ```none pmf(k; pi) = prod_j pi_j**[k == j] ``` #### Pitfalls The number of classes, `K`, must not exceed: - the largest integer representable by `self.dtype`, i.e., `2**(mantissa_bits+1)` (IEE754), - the maximum `Tensor` index, i.e., `2**31-1`. In other words, ```python K <= min(2**31-1, { tf.float16: 2**11, tf.float32: 2**24, tf.float64: 2**53 }[param.dtype]) ``` Note: This condition is validated only when `self.validate_args = True`. #### Examples Creates a 3-class distribution with the 2nd class being most likely. ```python dist = Categorical(probs=[0.1, 0.5, 0.4]) n = 1e4 empirical_prob = tf.cast( tf.histogram_fixed_width( dist.sample(int(n)), [0., 2], nbins=3), dtype=tf.float32) / n # ==> array([ 0.1005, 0.5037, 0.3958], dtype=float32) ``` Creates a 3-class distribution with the 2nd class being most likely. Parameterized by [logits](https://en.wikipedia.org/wiki/Logit) rather than probabilities. ```python dist = Categorical(logits=np.log([0.1, 0.5, 0.4]) n = 1e4 empirical_prob = tf.cast( tf.histogram_fixed_width( dist.sample(int(n)), [0., 2], nbins=3), dtype=tf.float32) / n # ==> array([0.1045, 0.5047, 0.3908], dtype=float32) ``` Creates a 3-class distribution with the 3rd class being most likely. The distribution functions can be evaluated on counts. ```python # counts is a scalar. p = [0.1, 0.4, 0.5] dist = Categorical(probs=p) dist.prob(0) # Shape [] # p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts. counts = [1, 0] dist.prob(counts) # Shape [2] # p will be broadcast to shape [3, 5, 7, 3] to match counts. counts = [[...]] # Shape [5, 7, 3] dist.prob(counts) # Shape [5, 7, 3] ``` """ def __init__( self, logits=None, probs=None, dtype=dtypes.int32, validate_args=False, allow_nan_stats=True, name="Categorical"): """Initialize Categorical distributions using class log-probabilities. Args: logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities of a set of Categorical distributions. The first `N - 1` dimensions index into a batch of independent distributions and the last dimension represents a vector of logits for each class. Only one of `logits` or `probs` should be passed in. probs: An N-D `Tensor`, `N >= 1`, representing the probabilities of a set of Categorical distributions. The first `N - 1` dimensions index into a batch of independent distributions and the last dimension represents a vector of probabilities for each class. Only one of `logits` or `probs` should be passed in. dtype: The type of the event samples (default: int32). validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = locals() with ops.name_scope(name, values=[logits, probs]): self._logits, self._probs = distribution_util.get_logits_and_probs( logits=logits, probs=probs, validate_args=validate_args, multidimensional=True, name=name) if validate_args: self._logits = distribution_util.embed_check_categorical_event_shape( self._logits) logits_shape_static = self._logits.get_shape().with_rank_at_least(1) if logits_shape_static.ndims is not None: self._batch_rank = ops.convert_to_tensor( logits_shape_static.ndims - 1, dtype=dtypes.int32, name="batch_rank") else: with ops.name_scope(name="batch_rank"): self._batch_rank = array_ops.rank(self._logits) - 1 logits_shape = array_ops.shape(self._logits, name="logits_shape") if logits_shape_static[-1].value is not None: self._event_size = ops.convert_to_tensor( logits_shape_static[-1].value, dtype=dtypes.int32, name="event_size") else: with ops.name_scope(name="event_size"): self._event_size = logits_shape[self._batch_rank] if logits_shape_static[:-1].is_fully_defined(): self._batch_shape_val = constant_op.constant( logits_shape_static[:-1].as_list(), dtype=dtypes.int32, name="batch_shape") else: with ops.name_scope(name="batch_shape"): self._batch_shape_val = logits_shape[:-1] super(Categorical, self).__init__( dtype=dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._logits, self._probs], name=name) @property def event_size(self): """Scalar `int32` tensor: the number of classes.""" return self._event_size @property def logits(self): """Vector of coordinatewise logits.""" return self._logits @property def probs(self): """Vector of coordinatewise probabilities.""" return self._probs def _batch_shape_tensor(self): return array_ops.identity(self._batch_shape_val) def _batch_shape(self): return self.logits.get_shape()[:-1] def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): if self.logits.get_shape().ndims == 2: logits_2d = self.logits else: logits_2d = array_ops.reshape(self.logits, [-1, self.event_size]) sample_dtype = dtypes.int64 if self.dtype.size > 4 else dtypes.int32 draws = random_ops.multinomial( logits_2d, n, seed=seed, output_dtype=sample_dtype) draws = array_ops.reshape( array_ops.transpose(draws), array_ops.concat([[n], self.batch_shape_tensor()], 0)) return math_ops.cast(draws, self.dtype) def _cdf(self, k): k = ops.convert_to_tensor(k, name="k") if self.validate_args: k = distribution_util.embed_check_integer_casting_closed( k, target_dtype=dtypes.int32) k, probs = _broadcast_cat_event_and_params( k, self.probs, base_dtype=self.dtype.base_dtype) # batch-flatten everything in order to use `sequence_mask()`. batch_flattened_probs = array_ops.reshape(probs, (-1, self._event_size)) batch_flattened_k = array_ops.reshape(k, [-1]) to_sum_over = array_ops.where( array_ops.sequence_mask(batch_flattened_k, self._event_size), batch_flattened_probs, array_ops.zeros_like(batch_flattened_probs)) batch_flattened_cdf = math_ops.reduce_sum(to_sum_over, axis=-1) # Reshape back to the shape of the argument. return array_ops.reshape(batch_flattened_cdf, array_ops.shape(k)) def _log_prob(self, k): k = ops.convert_to_tensor(k, name="k") if self.validate_args: k = distribution_util.embed_check_integer_casting_closed( k, target_dtype=dtypes.int32) k, logits = _broadcast_cat_event_and_params( k, self.logits, base_dtype=self.dtype.base_dtype) return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k, logits=logits) def _entropy(self): return -math_ops.reduce_sum( nn_ops.log_softmax(self.logits) * self.probs, axis=-1) def _mode(self): ret = math_ops.argmax(self.logits, dimension=self._batch_rank) ret = math_ops.cast(ret, self.dtype) ret.set_shape(self.batch_shape) return ret @kullback_leibler.RegisterKL(Categorical, Categorical) def _kl_categorical_categorical(a, b, name=None): """Calculate the batched KL divergence KL(a || b) with a and b Categorical. Args: a: instance of a Categorical distribution object. b: instance of a Categorical distribution object. name: (optional) Name to use for created operations. default is "kl_categorical_categorical". Returns: Batchwise KL(a || b) """ with ops.name_scope(name, "kl_categorical_categorical", values=[a.logits, b.logits]): # sum(probs log(probs / (1 - probs))) delta_log_probs1 = (nn_ops.log_softmax(a.logits) - nn_ops.log_softmax(b.logits)) return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1, axis=-1)
apache-2.0
rajul/Pydev
plugins/org.python.pydev.jython/Lib/threading.py
11
13499
from java.lang import IllegalThreadStateException, InterruptedException from java.util import Collections, WeakHashMap from java.util.concurrent import Semaphore, CyclicBarrier from java.util.concurrent.locks import ReentrantLock from org.python.util import jython from org.python.core import Py from thread import _newFunctionThread from thread import _local as local from _threading import Lock, RLock, Condition, _Lock, _RLock, _threads, _active, _jthread_to_pythread, _register_thread, _unregister_thread import java.lang.Thread import sys as _sys from traceback import print_exc as _print_exc # Rename some stuff so "from threading import *" is safe __all__ = ['activeCount', 'active_count', 'Condition', 'currentThread', 'current_thread', 'enumerate', 'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Timer', 'setprofile', 'settrace', 'local', 'stack_size'] _VERBOSE = False if __debug__: class _Verbose(object): def __init__(self, verbose=None): if verbose is None: verbose = _VERBOSE self.__verbose = verbose def _note(self, format, *args): if self.__verbose: format = format % args format = "%s: %s\n" % ( currentThread().getName(), format) _sys.stderr.write(format) else: # Disable this when using "python -O" class _Verbose(object): def __init__(self, verbose=None): pass def _note(self, *args): pass # Support for profile and trace hooks _profile_hook = None _trace_hook = None def setprofile(func): global _profile_hook _profile_hook = func def settrace(func): global _trace_hook _trace_hook = func class Semaphore(object): def __init__(self, value=1): if value < 0: raise ValueError("Semaphore initial value must be >= 0") self._semaphore = java.util.concurrent.Semaphore(value) def acquire(self, blocking=True): if blocking: self._semaphore.acquire() return True else: return self._semaphore.tryAcquire() def __enter__(self): self.acquire() return self def release(self): self._semaphore.release() def __exit__(self, t, v, tb): self.release() ThreadStates = { java.lang.Thread.State.NEW : 'initial', java.lang.Thread.State.RUNNABLE: 'started', java.lang.Thread.State.BLOCKED: 'started', java.lang.Thread.State.WAITING: 'started', java.lang.Thread.State.TIMED_WAITING: 'started', java.lang.Thread.State.TERMINATED: 'stopped', } class JavaThread(object): def __init__(self, thread): self._thread = thread _register_thread(thread, self) def __repr__(self): _thread = self._thread status = ThreadStates[_thread.getState()] if _thread.isDaemon(): status + " daemon" return "<%s(%s, %s %s)>" % (self.__class__.__name__, self.getName(), status, self.ident) def __eq__(self, other): if isinstance(other, JavaThread): return self._thread == other._thread else: return False def __ne__(self, other): return not self.__eq__(other) def start(self): try: self._thread.start() except IllegalThreadStateException: raise RuntimeError("threads can only be started once") def run(self): self._thread.run() def join(self, timeout=None): if self._thread == java.lang.Thread.currentThread(): raise RuntimeError("cannot join current thread") elif self._thread.getState() == java.lang.Thread.State.NEW: raise RuntimeError("cannot join thread before it is started") if timeout: millis = timeout * 1000. millis_int = int(millis) nanos = int((millis - millis_int) * 1e6) self._thread.join(millis_int, nanos) else: self._thread.join() def ident(self): return self._thread.getId() ident = property(ident) def getName(self): return self._thread.getName() def setName(self, name): self._thread.setName(str(name)) name = property(getName, setName) def isAlive(self): return self._thread.isAlive() is_alive = isAlive def isDaemon(self): return self._thread.isDaemon() def setDaemon(self, daemonic): if self._thread.getState() != java.lang.Thread.State.NEW: # thread could in fact be dead... Python uses the same error raise RuntimeError("cannot set daemon status of active thread") try: self._thread.setDaemon(bool(daemonic)) except IllegalThreadStateException: # changing daemonization only makes sense in Java when the # thread is alive; need extra test on the exception # because of possible races on interrogating with getState raise RuntimeError("cannot set daemon status of active thread") daemon = property(isDaemon, setDaemon) def __tojava__(self, c): if isinstance(self._thread, c): return self._thread if isinstance(self, c): return self return Py.NoConversion class Thread(JavaThread): def __init__(self, group=None, target=None, name=None, args=None, kwargs=None): assert group is None, "group argument must be None for now" _thread = self._create_thread() JavaThread.__init__(self, _thread) if args is None: args = () if kwargs is None: kwargs = {} self._target = target self._args = args self._kwargs = kwargs if name: self._thread.setName(str(name)) def _create_thread(self): return _newFunctionThread(self.__bootstrap, ()) def run(self): if self._target: self._target(*self._args, **self._kwargs) def __bootstrap(self): try: if _trace_hook: _sys.settrace(_trace_hook) if _profile_hook: _sys.setprofile(_profile_hook) try: self.run() except SystemExit: pass except InterruptedException: # Quiet InterruptedExceptions if they're caused by # _systemrestart if not jython.shouldRestart: raise except: # If sys.stderr is no more (most likely from interpreter # shutdown) use self.__stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined. if _sys: _sys.stderr.write("Exception in thread %s:" % self.getName()) _print_exc(file=_sys.stderr) else: # Do the best job possible w/o a huge amt. of code to # approx. a traceback stack trace exc_type, exc_value, exc_tb = self.__exc_info() try: print>>self.__stderr, ( "Exception in thread " + self.getName() + " (most likely raised during interpreter shutdown):") print>>self.__stderr, ( "Traceback (most recent call last):") while exc_tb: print>>self.__stderr, ( ' File "%s", line %s, in %s' % (exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno, exc_tb.tb_frame.f_code.co_name)) exc_tb = exc_tb.tb_next print>>self.__stderr, ("%s: %s" % (exc_type, exc_value)) # Make sure that exc_tb gets deleted since it is a memory # hog; deleting everything else is just for thoroughness finally: del exc_type, exc_value, exc_tb finally: self.__stop() try: self.__delete() except: pass def __stop(self): pass def __delete(self): _unregister_thread(self._thread) class _MainThread(Thread): def __init__(self): Thread.__init__(self, name="MainThread") import atexit atexit.register(self.__exitfunc) def _create_thread(self): return java.lang.Thread.currentThread() def _set_daemon(self): return False def __exitfunc(self): _unregister_thread(self._thread) t = _pickSomeNonDaemonThread() while t: t.join() t = _pickSomeNonDaemonThread() def _pickSomeNonDaemonThread(): for t in enumerate(): if not t.isDaemon() and t.isAlive(): return t return None def currentThread(): jthread = java.lang.Thread.currentThread() pythread = _jthread_to_pythread[jthread] if pythread is None: pythread = JavaThread(jthread) return pythread current_thread = currentThread def activeCount(): return len(_threads) active_count = activeCount def enumerate(): return _threads.values() from thread import stack_size _MainThread() ###################################################################### # pure Python code from CPythonLib/threading.py # The timer class was contributed by Itamar Shtull-Trauring def Timer(*args, **kwargs): return _Timer(*args, **kwargs) class _Timer(Thread): """Call a function after a specified number of seconds: t = Timer(30.0, f, args=[], kwargs={}) t.start() t.cancel() # stop the timer's action if it's still waiting """ def __init__(self, interval, function, args=[], kwargs={}): Thread.__init__(self) self.interval = interval self.function = function self.args = args self.kwargs = kwargs self.finished = Event() def cancel(self): """Stop the timer if it hasn't finished yet""" self.finished.set() def run(self): self.finished.wait(self.interval) if not self.finished.isSet(): self.function(*self.args, **self.kwargs) self.finished.set() # NOT USED except by BoundedSemaphore class _Semaphore(_Verbose): # After Tim Peters' semaphore class, but not quite the same (no maximum) def __init__(self, value=1, verbose=None): if value < 0: raise ValueError("Semaphore initial value must be >= 0") _Verbose.__init__(self, verbose) self.__cond = Condition(Lock()) self.__value = value def acquire(self, blocking=1): rc = False self.__cond.acquire() while self.__value == 0: if not blocking: break if __debug__: self._note("%s.acquire(%s): blocked waiting, value=%s", self, blocking, self.__value) self.__cond.wait() else: self.__value = self.__value - 1 if __debug__: self._note("%s.acquire: success, value=%s", self, self.__value) rc = True self.__cond.release() return rc def release(self): self.__cond.acquire() self.__value = self.__value + 1 if __debug__: self._note("%s.release: success, value=%s", self, self.__value) self.__cond.notify() self.__cond.release() def BoundedSemaphore(*args, **kwargs): return _BoundedSemaphore(*args, **kwargs) class _BoundedSemaphore(_Semaphore): """Semaphore that checks that # releases is <= # acquires""" def __init__(self, value=1, verbose=None): _Semaphore.__init__(self, value, verbose) self._initial_value = value def __enter__(self): self.acquire() return self def release(self): if self._Semaphore__value >= self._initial_value: raise ValueError, "Semaphore released too many times" return _Semaphore.release(self) def __exit__(self, t, v, tb): self.release() def Event(*args, **kwargs): return _Event(*args, **kwargs) class _Event(_Verbose): # After Tim Peters' event class (without is_posted()) def __init__(self, verbose=None): _Verbose.__init__(self, verbose) self.__cond = Condition(Lock()) self.__flag = False def isSet(self): return self.__flag is_set = isSet def set(self): self.__cond.acquire() try: self.__flag = True self.__cond.notifyAll() finally: self.__cond.release() def clear(self): self.__cond.acquire() try: self.__flag = False finally: self.__cond.release() def wait(self, timeout=None): self.__cond.acquire() try: if not self.__flag: self.__cond.wait(timeout) # Issue 2005: Since CPython 2.7, threading.Event.wait(timeout) returns boolean. # The function should return False if timeout is reached before the event is set. return self.__flag finally: self.__cond.release()
epl-1.0
dreglad/fastapps
scraper/scraper/pipelines.py
1
1727
"""Scraper pipelines""" # -*- coding: utf-8 -*- import logging import pymongo from scrapy.conf import settings from scrapy.exceptions import DropItem from arango import ArangoClient class ArangoDBPipeline(object): def __init__(self): client = ArangoClient( protocol='http', host='localhost', port=8529, username='root', password='', enable_logging=True ) self.db = client.db('fastapps') self.isPartOf = 'WebSite/CienciaSalud' def process_item(self, item, spider): if not item.get('url'): logging.info('Dropping object without identifier') raise DropItem("Object Without identifier {0}!".format(item)) itemType = item.get('type') del item['type'] item.update({ 'isPartOf': self.isPartOf, '_id': item.get('url') }) self.db.collection(itemType).insert(item) logging.info("Object added to database") return item class MongoDBPipeline(object): def __init__(self): connection = pymongo.MongoClient( settings['MONGODB_SERVER'], settings['MONGODB_PORT'] ) db = connection[settings['MONGODB_DB']] self.collection = db[settings['MONGODB_COLLECTION']] def process_item(self, item, spider): if not item.get('url'): valid = False logging.info('Dropping object without identifier') raise DropItem("Object Without identifier {0}!".format(item)) self.collection.update({'url': item['url']}, dict(item), upsert=True) logging.info("Object added to database") return item
agpl-3.0
spektom/incubator-airflow
airflow/contrib/hooks/fs_hook.py
5
1089
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module is deprecated. Please use `airflow.hooks.filesystem`.""" import warnings # pylint: disable=unused-import from airflow.hooks.filesystem import FSHook # noqa warnings.warn( "This module is deprecated. Please use `airflow.hooks.filesystem`.", DeprecationWarning, stacklevel=2 )
apache-2.0
jaubut/maman_web
node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py
886
131038
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import ntpath import os import posixpath import re import subprocess import sys import gyp.common import gyp.easy_xml as easy_xml import gyp.generator.ninja as ninja_generator import gyp.MSVSNew as MSVSNew import gyp.MSVSProject as MSVSProject import gyp.MSVSSettings as MSVSSettings import gyp.MSVSToolFile as MSVSToolFile import gyp.MSVSUserFile as MSVSUserFile import gyp.MSVSUtil as MSVSUtil import gyp.MSVSVersion as MSVSVersion from gyp.common import GypError from gyp.common import OrderedSet # TODO: Remove once bots are on 2.7, http://crbug.com/241769 def _import_OrderedDict(): import collections try: return collections.OrderedDict except AttributeError: import gyp.ordered_dict return gyp.ordered_dict.OrderedDict OrderedDict = _import_OrderedDict() # Regular expression for validating Visual Studio GUIDs. If the GUID # contains lowercase hex letters, MSVS will be fine. However, # IncrediBuild BuildConsole will parse the solution file, but then # silently skip building the target causing hard to track down errors. # Note that this only happens with the BuildConsole, and does not occur # if IncrediBuild is executed from inside Visual Studio. This regex # validates that the string looks like a GUID with all uppercase hex # letters. VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$') generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '.exe', 'STATIC_LIB_PREFIX': '', 'SHARED_LIB_PREFIX': '', 'STATIC_LIB_SUFFIX': '.lib', 'SHARED_LIB_SUFFIX': '.dll', 'INTERMEDIATE_DIR': '$(IntDir)', 'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate', 'OS': 'win', 'PRODUCT_DIR': '$(OutDir)', 'LIB_DIR': '$(OutDir)lib', 'RULE_INPUT_ROOT': '$(InputName)', 'RULE_INPUT_DIRNAME': '$(InputDir)', 'RULE_INPUT_EXT': '$(InputExt)', 'RULE_INPUT_NAME': '$(InputFileName)', 'RULE_INPUT_PATH': '$(InputPath)', 'CONFIGURATION_NAME': '$(ConfigurationName)', } # The msvs specific sections that hold paths generator_additional_path_sections = [ 'msvs_cygwin_dirs', 'msvs_props', ] generator_additional_non_configuration_keys = [ 'msvs_cygwin_dirs', 'msvs_cygwin_shell', 'msvs_large_pdb', 'msvs_shard', 'msvs_external_builder', 'msvs_external_builder_out_dir', 'msvs_external_builder_build_cmd', 'msvs_external_builder_clean_cmd', 'msvs_external_builder_clcompile_cmd', 'msvs_enable_winrt', 'msvs_requires_importlibrary', 'msvs_enable_winphone', 'msvs_application_type_revision', 'msvs_target_platform_version', 'msvs_target_platform_minversion', ] # List of precompiled header related keys. precomp_keys = [ 'msvs_precompiled_header', 'msvs_precompiled_source', ] cached_username = None cached_domain = None # TODO(gspencer): Switch the os.environ calls to be # win32api.GetDomainName() and win32api.GetUserName() once the # python version in depot_tools has been updated to work on Vista # 64-bit. def _GetDomainAndUserName(): if sys.platform not in ('win32', 'cygwin'): return ('DOMAIN', 'USERNAME') global cached_username global cached_domain if not cached_domain or not cached_username: domain = os.environ.get('USERDOMAIN') username = os.environ.get('USERNAME') if not domain or not username: call = subprocess.Popen(['net', 'config', 'Workstation'], stdout=subprocess.PIPE) config = call.communicate()[0] username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE) username_match = username_re.search(config) if username_match: username = username_match.group(1) domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE) domain_match = domain_re.search(config) if domain_match: domain = domain_match.group(1) cached_domain = domain cached_username = username return (cached_domain, cached_username) fixpath_prefix = None def _NormalizedSource(source): """Normalize the path. But not if that gets rid of a variable, as this may expand to something larger than one directory. Arguments: source: The path to be normalize.d Returns: The normalized path. """ normalized = os.path.normpath(source) if source.count('$') == normalized.count('$'): source = normalized return source def _FixPath(path): """Convert paths to a form that will make sense in a vcproj file. Arguments: path: The path to convert, may contain / etc. Returns: The path with all slashes made into backslashes. """ if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$': path = os.path.join(fixpath_prefix, path) path = path.replace('/', '\\') path = _NormalizedSource(path) if path and path[-1] == '\\': path = path[:-1] return path def _FixPaths(paths): """Fix each of the paths of the list.""" return [_FixPath(i) for i in paths] def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None, list_excluded=True, msvs_version=None): """Converts a list split source file paths into a vcproj folder hierarchy. Arguments: sources: A list of source file paths split. prefix: A list of source file path layers meant to apply to each of sources. excluded: A set of excluded files. msvs_version: A MSVSVersion object. Returns: A hierarchy of filenames and MSVSProject.Filter objects that matches the layout of the source tree. For example: _ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']], prefix=['joe']) --> [MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']), MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])] """ if not prefix: prefix = [] result = [] excluded_result = [] folders = OrderedDict() # Gather files into the final result, excluded, or folders. for s in sources: if len(s) == 1: filename = _NormalizedSource('\\'.join(prefix + s)) if filename in excluded: excluded_result.append(filename) else: result.append(filename) elif msvs_version and not msvs_version.UsesVcxproj(): # For MSVS 2008 and earlier, we need to process all files before walking # the sub folders. if not folders.get(s[0]): folders[s[0]] = [] folders[s[0]].append(s[1:]) else: contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]], excluded=excluded, list_excluded=list_excluded, msvs_version=msvs_version) contents = MSVSProject.Filter(s[0], contents=contents) result.append(contents) # Add a folder for excluded files. if excluded_result and list_excluded: excluded_folder = MSVSProject.Filter('_excluded_files', contents=excluded_result) result.append(excluded_folder) if msvs_version and msvs_version.UsesVcxproj(): return result # Populate all the folders. for f in folders: contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f], excluded=excluded, list_excluded=list_excluded, msvs_version=msvs_version) contents = MSVSProject.Filter(f, contents=contents) result.append(contents) return result def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False): if not value: return _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset) def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False): # TODO(bradnelson): ugly hack, fix this more generally!!! if 'Directories' in setting or 'Dependencies' in setting: if type(value) == str: value = value.replace('/', '\\') else: value = [i.replace('/', '\\') for i in value] if not tools.get(tool_name): tools[tool_name] = dict() tool = tools[tool_name] if tool.get(setting): if only_if_unset: return if type(tool[setting]) == list and type(value) == list: tool[setting] += value else: raise TypeError( 'Appending "%s" to a non-list setting "%s" for tool "%s" is ' 'not allowed, previous value: %s' % ( value, setting, tool_name, str(tool[setting]))) else: tool[setting] = value def _ConfigPlatform(config_data): return config_data.get('msvs_configuration_platform', 'Win32') def _ConfigBaseName(config_name, platform_name): if config_name.endswith('_' + platform_name): return config_name[0:-len(platform_name) - 1] else: return config_name def _ConfigFullName(config_name, config_data): platform_name = _ConfigPlatform(config_data) return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name) def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path, quote_cmd, do_setup_env): if [x for x in cmd if '$(InputDir)' in x]: input_dir_preamble = ( 'set INPUTDIR=$(InputDir)\n' 'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n' 'set INPUTDIR=%INPUTDIR:~0,-1%\n' ) else: input_dir_preamble = '' if cygwin_shell: # Find path to cygwin. cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0]) # Prepare command. direct_cmd = cmd direct_cmd = [i.replace('$(IntDir)', '`cygpath -m "${INTDIR}"`') for i in direct_cmd] direct_cmd = [i.replace('$(OutDir)', '`cygpath -m "${OUTDIR}"`') for i in direct_cmd] direct_cmd = [i.replace('$(InputDir)', '`cygpath -m "${INPUTDIR}"`') for i in direct_cmd] if has_input_path: direct_cmd = [i.replace('$(InputPath)', '`cygpath -m "${INPUTPATH}"`') for i in direct_cmd] direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd] # direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd) direct_cmd = ' '.join(direct_cmd) # TODO(quote): regularize quoting path names throughout the module cmd = '' if do_setup_env: cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && ' cmd += 'set CYGWIN=nontsec&& ' if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0: cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& ' if direct_cmd.find('INTDIR') >= 0: cmd += 'set INTDIR=$(IntDir)&& ' if direct_cmd.find('OUTDIR') >= 0: cmd += 'set OUTDIR=$(OutDir)&& ' if has_input_path and direct_cmd.find('INPUTPATH') >= 0: cmd += 'set INPUTPATH=$(InputPath) && ' cmd += 'bash -c "%(cmd)s"' cmd = cmd % {'cygwin_dir': cygwin_dir, 'cmd': direct_cmd} return input_dir_preamble + cmd else: # Convert cat --> type to mimic unix. if cmd[0] == 'cat': command = ['type'] else: command = [cmd[0].replace('/', '\\')] # Add call before command to ensure that commands can be tied together one # after the other without aborting in Incredibuild, since IB makes a bat # file out of the raw command string, and some commands (like python) are # actually batch files themselves. command.insert(0, 'call') # Fix the paths # TODO(quote): This is a really ugly heuristic, and will miss path fixing # for arguments like "--arg=path" or "/opt:path". # If the argument starts with a slash or dash, it's probably a command line # switch arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]] arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments] arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments] if quote_cmd: # Support a mode for using cmd directly. # Convert any paths to native form (first element is used directly). # TODO(quote): regularize quoting path names throughout the module arguments = ['"%s"' % i for i in arguments] # Collapse into a single command. return input_dir_preamble + ' '.join(command + arguments) def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env): # Currently this weird argument munging is used to duplicate the way a # python script would need to be run as part of the chrome tree. # Eventually we should add some sort of rule_default option to set this # per project. For now the behavior chrome needs is the default. mcs = rule.get('msvs_cygwin_shell') if mcs is None: mcs = int(spec.get('msvs_cygwin_shell', 1)) elif isinstance(mcs, str): mcs = int(mcs) quote_cmd = int(rule.get('msvs_quote_cmd', 1)) return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path, quote_cmd, do_setup_env=do_setup_env) def _AddActionStep(actions_dict, inputs, outputs, description, command): """Merge action into an existing list of actions. Care must be taken so that actions which have overlapping inputs either don't get assigned to the same input, or get collapsed into one. Arguments: actions_dict: dictionary keyed on input name, which maps to a list of dicts describing the actions attached to that input file. inputs: list of inputs outputs: list of outputs description: description of the action command: command line to execute """ # Require there to be at least one input (call sites will ensure this). assert inputs action = { 'inputs': inputs, 'outputs': outputs, 'description': description, 'command': command, } # Pick where to stick this action. # While less than optimal in terms of build time, attach them to the first # input for now. chosen_input = inputs[0] # Add it there. if chosen_input not in actions_dict: actions_dict[chosen_input] = [] actions_dict[chosen_input].append(action) def _AddCustomBuildToolForMSVS(p, spec, primary_input, inputs, outputs, description, cmd): """Add a custom build tool to execute something. Arguments: p: the target project spec: the target project dict primary_input: input file to attach the build tool to inputs: list of inputs outputs: list of outputs description: description of the action cmd: command line to execute """ inputs = _FixPaths(inputs) outputs = _FixPaths(outputs) tool = MSVSProject.Tool( 'VCCustomBuildTool', {'Description': description, 'AdditionalDependencies': ';'.join(inputs), 'Outputs': ';'.join(outputs), 'CommandLine': cmd, }) # Add to the properties of primary input for each config. for config_name, c_data in spec['configurations'].iteritems(): p.AddFileConfig(_FixPath(primary_input), _ConfigFullName(config_name, c_data), tools=[tool]) def _AddAccumulatedActionsToMSVS(p, spec, actions_dict): """Add actions accumulated into an actions_dict, merging as needed. Arguments: p: the target project spec: the target project dict actions_dict: dictionary keyed on input name, which maps to a list of dicts describing the actions attached to that input file. """ for primary_input in actions_dict: inputs = OrderedSet() outputs = OrderedSet() descriptions = [] commands = [] for action in actions_dict[primary_input]: inputs.update(OrderedSet(action['inputs'])) outputs.update(OrderedSet(action['outputs'])) descriptions.append(action['description']) commands.append(action['command']) # Add the custom build step for one input file. description = ', and also '.join(descriptions) command = '\r\n'.join(commands) _AddCustomBuildToolForMSVS(p, spec, primary_input=primary_input, inputs=inputs, outputs=outputs, description=description, cmd=command) def _RuleExpandPath(path, input_file): """Given the input file to which a rule applied, string substitute a path. Arguments: path: a path to string expand input_file: the file to which the rule applied. Returns: The string substituted path. """ path = path.replace('$(InputName)', os.path.splitext(os.path.split(input_file)[1])[0]) path = path.replace('$(InputDir)', os.path.dirname(input_file)) path = path.replace('$(InputExt)', os.path.splitext(os.path.split(input_file)[1])[1]) path = path.replace('$(InputFileName)', os.path.split(input_file)[1]) path = path.replace('$(InputPath)', input_file) return path def _FindRuleTriggerFiles(rule, sources): """Find the list of files which a particular rule applies to. Arguments: rule: the rule in question sources: the set of all known source files for this project Returns: The list of sources that trigger a particular rule. """ return rule.get('rule_sources', []) def _RuleInputsAndOutputs(rule, trigger_file): """Find the inputs and outputs generated by a rule. Arguments: rule: the rule in question. trigger_file: the main trigger for this rule. Returns: The pair of (inputs, outputs) involved in this rule. """ raw_inputs = _FixPaths(rule.get('inputs', [])) raw_outputs = _FixPaths(rule.get('outputs', [])) inputs = OrderedSet() outputs = OrderedSet() inputs.add(trigger_file) for i in raw_inputs: inputs.add(_RuleExpandPath(i, trigger_file)) for o in raw_outputs: outputs.add(_RuleExpandPath(o, trigger_file)) return (inputs, outputs) def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options): """Generate a native rules file. Arguments: p: the target project rules: the set of rules to include output_dir: the directory in which the project/gyp resides spec: the project dict options: global generator options """ rules_filename = '%s%s.rules' % (spec['target_name'], options.suffix) rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename), spec['target_name']) # Add each rule. for r in rules: rule_name = r['rule_name'] rule_ext = r['extension'] inputs = _FixPaths(r.get('inputs', [])) outputs = _FixPaths(r.get('outputs', [])) # Skip a rule with no action and no inputs. if 'action' not in r and not r.get('rule_sources', []): continue cmd = _BuildCommandLineForRule(spec, r, has_input_path=True, do_setup_env=True) rules_file.AddCustomBuildRule(name=rule_name, description=r.get('message', rule_name), extensions=[rule_ext], additional_dependencies=inputs, outputs=outputs, cmd=cmd) # Write out rules file. rules_file.WriteIfChanged() # Add rules file to project. p.AddToolFile(rules_filename) def _Cygwinify(path): path = path.replace('$(OutDir)', '$(OutDirCygwin)') path = path.replace('$(IntDir)', '$(IntDirCygwin)') return path def _GenerateExternalRules(rules, output_dir, spec, sources, options, actions_to_add): """Generate an external makefile to do a set of rules. Arguments: rules: the list of rules to include output_dir: path containing project and gyp files spec: project specification data sources: set of sources known options: global generator options actions_to_add: The list of actions we will add to. """ filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix) mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename)) # Find cygwin style versions of some paths. mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n') mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n') # Gather stuff needed to emit all: target. all_inputs = OrderedSet() all_outputs = OrderedSet() all_output_dirs = OrderedSet() first_outputs = [] for rule in rules: trigger_files = _FindRuleTriggerFiles(rule, sources) for tf in trigger_files: inputs, outputs = _RuleInputsAndOutputs(rule, tf) all_inputs.update(OrderedSet(inputs)) all_outputs.update(OrderedSet(outputs)) # Only use one target from each rule as the dependency for # 'all' so we don't try to build each rule multiple times. first_outputs.append(list(outputs)[0]) # Get the unique output directories for this rule. output_dirs = [os.path.split(i)[0] for i in outputs] for od in output_dirs: all_output_dirs.add(od) first_outputs_cyg = [_Cygwinify(i) for i in first_outputs] # Write out all: target, including mkdir for each output directory. mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg)) for od in all_output_dirs: if od: mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od) mk_file.write('\n') # Define how each output is generated. for rule in rules: trigger_files = _FindRuleTriggerFiles(rule, sources) for tf in trigger_files: # Get all the inputs and outputs for this rule for this trigger file. inputs, outputs = _RuleInputsAndOutputs(rule, tf) inputs = [_Cygwinify(i) for i in inputs] outputs = [_Cygwinify(i) for i in outputs] # Prepare the command line for this rule. cmd = [_RuleExpandPath(c, tf) for c in rule['action']] cmd = ['"%s"' % i for i in cmd] cmd = ' '.join(cmd) # Add it to the makefile. mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs))) mk_file.write('\t%s\n\n' % cmd) # Close up the file. mk_file.close() # Add makefile to list of sources. sources.add(filename) # Add a build action to call makefile. cmd = ['make', 'OutDir=$(OutDir)', 'IntDir=$(IntDir)', '-j', '${NUMBER_OF_PROCESSORS_PLUS_1}', '-f', filename] cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True) # Insert makefile as 0'th input, so it gets the action attached there, # as this is easier to understand from in the IDE. all_inputs = list(all_inputs) all_inputs.insert(0, filename) _AddActionStep(actions_to_add, inputs=_FixPaths(all_inputs), outputs=_FixPaths(all_outputs), description='Running external rules for %s' % spec['target_name'], command=cmd) def _EscapeEnvironmentVariableExpansion(s): """Escapes % characters. Escapes any % characters so that Windows-style environment variable expansions will leave them alone. See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile to understand why we have to do this. Args: s: The string to be escaped. Returns: The escaped string. """ s = s.replace('%', '%%') return s quote_replacer_regex = re.compile(r'(\\*)"') def _EscapeCommandLineArgumentForMSVS(s): """Escapes a Windows command-line argument. So that the Win32 CommandLineToArgv function will turn the escaped result back into the original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx ("Parsing C++ Command-Line Arguments") to understand why we have to do this. Args: s: the string to be escaped. Returns: the escaped string. """ def _Replace(match): # For a literal quote, CommandLineToArgv requires an odd number of # backslashes preceding it, and it produces half as many literal backslashes # (rounded down). So we need to produce 2n+1 backslashes. return 2 * match.group(1) + '\\"' # Escape all quotes so that they are interpreted literally. s = quote_replacer_regex.sub(_Replace, s) # Now add unescaped quotes so that any whitespace is interpreted literally. s = '"' + s + '"' return s delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)') def _EscapeVCProjCommandLineArgListItem(s): """Escapes command line arguments for MSVS. The VCProj format stores string lists in a single string using commas and semi-colons as separators, which must be quoted if they are to be interpreted literally. However, command-line arguments may already have quotes, and the VCProj parser is ignorant of the backslash escaping convention used by CommandLineToArgv, so the command-line quotes and the VCProj quotes may not be the same quotes. So to store a general command-line argument in a VCProj list, we need to parse the existing quoting according to VCProj's convention and quote any delimiters that are not already quoted by that convention. The quotes that we add will also be seen by CommandLineToArgv, so if backslashes precede them then we also have to escape those backslashes according to the CommandLineToArgv convention. Args: s: the string to be escaped. Returns: the escaped string. """ def _Replace(match): # For a non-literal quote, CommandLineToArgv requires an even number of # backslashes preceding it, and it produces half as many literal # backslashes. So we need to produce 2n backslashes. return 2 * match.group(1) + '"' + match.group(2) + '"' segments = s.split('"') # The unquoted segments are at the even-numbered indices. for i in range(0, len(segments), 2): segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i]) # Concatenate back into a single string s = '"'.join(segments) if len(segments) % 2 == 0: # String ends while still quoted according to VCProj's convention. This # means the delimiter and the next list item that follow this one in the # .vcproj file will be misinterpreted as part of this item. There is nothing # we can do about this. Adding an extra quote would correct the problem in # the VCProj but cause the same problem on the final command-line. Moving # the item to the end of the list does works, but that's only possible if # there's only one such item. Let's just warn the user. print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' + 'quotes in ' + s) return s def _EscapeCppDefineForMSVS(s): """Escapes a CPP define so that it will reach the compiler unaltered.""" s = _EscapeEnvironmentVariableExpansion(s) s = _EscapeCommandLineArgumentForMSVS(s) s = _EscapeVCProjCommandLineArgListItem(s) # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. s = s.replace('#', '\\%03o' % ord('#')) return s quote_replacer_regex2 = re.compile(r'(\\+)"') def _EscapeCommandLineArgumentForMSBuild(s): """Escapes a Windows command-line argument for use by MSBuild.""" def _Replace(match): return (len(match.group(1)) / 2 * 4) * '\\' + '\\"' # Escape all quotes so that they are interpreted literally. s = quote_replacer_regex2.sub(_Replace, s) return s def _EscapeMSBuildSpecialCharacters(s): escape_dictionary = { '%': '%25', '$': '%24', '@': '%40', "'": '%27', ';': '%3B', '?': '%3F', '*': '%2A' } result = ''.join([escape_dictionary.get(c, c) for c in s]) return result def _EscapeCppDefineForMSBuild(s): """Escapes a CPP define so that it will reach the compiler unaltered.""" s = _EscapeEnvironmentVariableExpansion(s) s = _EscapeCommandLineArgumentForMSBuild(s) s = _EscapeMSBuildSpecialCharacters(s) # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. s = s.replace('#', '\\%03o' % ord('#')) return s def _GenerateRulesForMSVS(p, output_dir, options, spec, sources, excluded_sources, actions_to_add): """Generate all the rules for a particular project. Arguments: p: the project output_dir: directory to emit rules to options: global options passed to the generator spec: the specification for this project sources: the set of all known source files in this project excluded_sources: the set of sources excluded from normal processing actions_to_add: deferred list of actions to add in """ rules = spec.get('rules', []) rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))] rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))] # Handle rules that use a native rules file. if rules_native: _GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options) # Handle external rules (non-native rules). if rules_external: _GenerateExternalRules(rules_external, output_dir, spec, sources, options, actions_to_add) _AdjustSourcesForRules(rules, sources, excluded_sources, False) def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild): # Add outputs generated by each rule (if applicable). for rule in rules: # Add in the outputs from this rule. trigger_files = _FindRuleTriggerFiles(rule, sources) for trigger_file in trigger_files: # Remove trigger_file from excluded_sources to let the rule be triggered # (e.g. rule trigger ax_enums.idl is added to excluded_sources # because it's also in an action's inputs in the same project) excluded_sources.discard(_FixPath(trigger_file)) # Done if not processing outputs as sources. if int(rule.get('process_outputs_as_sources', False)): inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file) inputs = OrderedSet(_FixPaths(inputs)) outputs = OrderedSet(_FixPaths(outputs)) inputs.remove(_FixPath(trigger_file)) sources.update(inputs) if not is_msbuild: excluded_sources.update(inputs) sources.update(outputs) def _FilterActionsFromExcluded(excluded_sources, actions_to_add): """Take inputs with actions attached out of the list of exclusions. Arguments: excluded_sources: list of source files not to be built. actions_to_add: dict of actions keyed on source file they're attached to. Returns: excluded_sources with files that have actions attached removed. """ must_keep = OrderedSet(_FixPaths(actions_to_add.keys())) return [s for s in excluded_sources if s not in must_keep] def _GetDefaultConfiguration(spec): return spec['configurations'][spec['default_configuration']] def _GetGuidOfProject(proj_path, spec): """Get the guid for the project. Arguments: proj_path: Path of the vcproj or vcxproj file to generate. spec: The target dictionary containing the properties of the target. Returns: the guid. Raises: ValueError: if the specified GUID is invalid. """ # Pluck out the default configuration. default_config = _GetDefaultConfiguration(spec) # Decide the guid of the project. guid = default_config.get('msvs_guid') if guid: if VALID_MSVS_GUID_CHARS.match(guid) is None: raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' % (guid, VALID_MSVS_GUID_CHARS.pattern)) guid = '{%s}' % guid guid = guid or MSVSNew.MakeGuid(proj_path) return guid def _GetMsbuildToolsetOfProject(proj_path, spec, version): """Get the platform toolset for the project. Arguments: proj_path: Path of the vcproj or vcxproj file to generate. spec: The target dictionary containing the properties of the target. version: The MSVSVersion object. Returns: the platform toolset string or None. """ # Pluck out the default configuration. default_config = _GetDefaultConfiguration(spec) toolset = default_config.get('msbuild_toolset') if not toolset and version.DefaultToolset(): toolset = version.DefaultToolset() return toolset def _GenerateProject(project, options, version, generator_flags): """Generates a vcproj file. Arguments: project: the MSVSProject object. options: global generator options. version: the MSVSVersion object. generator_flags: dict of generator-specific flags. Returns: A list of source files that cannot be found on disk. """ default_config = _GetDefaultConfiguration(project.spec) # Skip emitting anything if told to with msvs_existing_vcproj option. if default_config.get('msvs_existing_vcproj'): return [] if version.UsesVcxproj(): return _GenerateMSBuildProject(project, options, version, generator_flags) else: return _GenerateMSVSProject(project, options, version, generator_flags) # TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py. def _ValidateSourcesForMSVSProject(spec, version): """Makes sure if duplicate basenames are not specified in the source list. Arguments: spec: The target dictionary containing the properties of the target. version: The VisualStudioVersion object. """ # This validation should not be applied to MSVC2010 and later. assert not version.UsesVcxproj() # TODO: Check if MSVC allows this for loadable_module targets. if spec.get('type', None) not in ('static_library', 'shared_library'): return sources = spec.get('sources', []) basenames = {} for source in sources: name, ext = os.path.splitext(source) is_compiled_file = ext in [ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] if not is_compiled_file: continue basename = os.path.basename(name) # Don't include extension. basenames.setdefault(basename, []).append(source) error = '' for basename, files in basenames.iteritems(): if len(files) > 1: error += ' %s: %s\n' % (basename, ' '.join(files)) if error: print('static library %s has several files with the same basename:\n' % spec['target_name'] + error + 'MSVC08 cannot handle that.') raise GypError('Duplicate basenames in sources section, see list above') def _GenerateMSVSProject(project, options, version, generator_flags): """Generates a .vcproj file. It may create .rules and .user files too. Arguments: project: The project object we will generate the file for. options: Global options passed to the generator. version: The VisualStudioVersion object. generator_flags: dict of generator-specific flags. """ spec = project.spec gyp.common.EnsureDirExists(project.path) platforms = _GetUniquePlatforms(spec) p = MSVSProject.Writer(project.path, version, spec['target_name'], project.guid, platforms) # Get directory project file is in. project_dir = os.path.split(project.path)[0] gyp_path = _NormalizedSource(project.build_file) relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir) config_type = _GetMSVSConfigurationType(spec, project.build_file) for config_name, config in spec['configurations'].iteritems(): _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config) # MSVC08 and prior version cannot handle duplicate basenames in the same # target. # TODO: Take excluded sources into consideration if possible. _ValidateSourcesForMSVSProject(spec, version) # Prepare list of sources and excluded sources. gyp_file = os.path.split(project.build_file)[1] sources, excluded_sources = _PrepareListOfSources(spec, generator_flags, gyp_file) # Add rules. actions_to_add = {} _GenerateRulesForMSVS(p, project_dir, options, spec, sources, excluded_sources, actions_to_add) list_excluded = generator_flags.get('msvs_list_excluded_files', True) sources, excluded_sources, excluded_idl = ( _AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir, sources, excluded_sources, list_excluded, version)) # Add in files. missing_sources = _VerifySourcesExist(sources, project_dir) p.AddFiles(sources) _AddToolFilesToMSVS(p, spec) _HandlePreCompiledHeaders(p, sources, spec) _AddActions(actions_to_add, spec, relative_path_of_gyp_file) _AddCopies(actions_to_add, spec) _WriteMSVSUserFile(project.path, version, spec) # NOTE: this stanza must appear after all actions have been decided. # Don't excluded sources with actions attached, or they won't run. excluded_sources = _FilterActionsFromExcluded( excluded_sources, actions_to_add) _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl, list_excluded) _AddAccumulatedActionsToMSVS(p, spec, actions_to_add) # Write it out. p.WriteIfChanged() return missing_sources def _GetUniquePlatforms(spec): """Returns the list of unique platforms for this spec, e.g ['win32', ...]. Arguments: spec: The target dictionary containing the properties of the target. Returns: The MSVSUserFile object created. """ # Gather list of unique platforms. platforms = OrderedSet() for configuration in spec['configurations']: platforms.add(_ConfigPlatform(spec['configurations'][configuration])) platforms = list(platforms) return platforms def _CreateMSVSUserFile(proj_path, version, spec): """Generates a .user file for the user running this Gyp program. Arguments: proj_path: The path of the project file being created. The .user file shares the same path (with an appropriate suffix). version: The VisualStudioVersion object. spec: The target dictionary containing the properties of the target. Returns: The MSVSUserFile object created. """ (domain, username) = _GetDomainAndUserName() vcuser_filename = '.'.join([proj_path, domain, username, 'user']) user_file = MSVSUserFile.Writer(vcuser_filename, version, spec['target_name']) return user_file def _GetMSVSConfigurationType(spec, build_file): """Returns the configuration type for this project. It's a number defined by Microsoft. May raise an exception. Args: spec: The target dictionary containing the properties of the target. build_file: The path of the gyp file. Returns: An integer, the configuration type. """ try: config_type = { 'executable': '1', # .exe 'shared_library': '2', # .dll 'loadable_module': '2', # .dll 'static_library': '4', # .lib 'none': '10', # Utility type }[spec['type']] except KeyError: if spec.get('type'): raise GypError('Target type %s is not a valid target type for ' 'target %s in %s.' % (spec['type'], spec['target_name'], build_file)) else: raise GypError('Missing type field for target %s in %s.' % (spec['target_name'], build_file)) return config_type def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config): """Adds a configuration to the MSVS project. Many settings in a vcproj file are specific to a configuration. This function the main part of the vcproj file that's configuration specific. Arguments: p: The target project being generated. spec: The target dictionary containing the properties of the target. config_type: The configuration type, a number as defined by Microsoft. config_name: The name of the configuration. config: The dictionary that defines the special processing to be done for this configuration. """ # Get the information for this configuration include_dirs, midl_include_dirs, resource_include_dirs = \ _GetIncludeDirs(config) libraries = _GetLibraries(spec) library_dirs = _GetLibraryDirs(config) out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False) defines = _GetDefines(config) defines = [_EscapeCppDefineForMSVS(d) for d in defines] disabled_warnings = _GetDisabledWarnings(config) prebuild = config.get('msvs_prebuild') postbuild = config.get('msvs_postbuild') def_file = _GetModuleDefinition(spec) precompiled_header = config.get('msvs_precompiled_header') # Prepare the list of tools as a dictionary. tools = dict() # Add in user specified msvs_settings. msvs_settings = config.get('msvs_settings', {}) MSVSSettings.ValidateMSVSSettings(msvs_settings) # Prevent default library inheritance from the environment. _ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)']) for tool in msvs_settings: settings = config['msvs_settings'][tool] for setting in settings: _ToolAppend(tools, tool, setting, settings[setting]) # Add the information to the appropriate tool _ToolAppend(tools, 'VCCLCompilerTool', 'AdditionalIncludeDirectories', include_dirs) _ToolAppend(tools, 'VCMIDLTool', 'AdditionalIncludeDirectories', midl_include_dirs) _ToolAppend(tools, 'VCResourceCompilerTool', 'AdditionalIncludeDirectories', resource_include_dirs) # Add in libraries. _ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries) _ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories', library_dirs) if out_file: _ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True) # Add defines. _ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines) _ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions', defines) # Change program database directory to prevent collisions. _ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName', '$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True) # Add disabled warnings. _ToolAppend(tools, 'VCCLCompilerTool', 'DisableSpecificWarnings', disabled_warnings) # Add Pre-build. _ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild) # Add Post-build. _ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild) # Turn on precompiled headers if appropriate. if precompiled_header: precompiled_header = os.path.split(precompiled_header)[1] _ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2') _ToolAppend(tools, 'VCCLCompilerTool', 'PrecompiledHeaderThrough', precompiled_header) _ToolAppend(tools, 'VCCLCompilerTool', 'ForcedIncludeFiles', precompiled_header) # Loadable modules don't generate import libraries; # tell dependent projects to not expect one. if spec['type'] == 'loadable_module': _ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true') # Set the module definition file if any. if def_file: _ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file) _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name) def _GetIncludeDirs(config): """Returns the list of directories to be used for #include directives. Arguments: config: The dictionary that defines the special processing to be done for this configuration. Returns: The list of directory paths. """ # TODO(bradnelson): include_dirs should really be flexible enough not to # require this sort of thing. include_dirs = ( config.get('include_dirs', []) + config.get('msvs_system_include_dirs', [])) midl_include_dirs = ( config.get('midl_include_dirs', []) + config.get('msvs_system_include_dirs', [])) resource_include_dirs = config.get('resource_include_dirs', include_dirs) include_dirs = _FixPaths(include_dirs) midl_include_dirs = _FixPaths(midl_include_dirs) resource_include_dirs = _FixPaths(resource_include_dirs) return include_dirs, midl_include_dirs, resource_include_dirs def _GetLibraryDirs(config): """Returns the list of directories to be used for library search paths. Arguments: config: The dictionary that defines the special processing to be done for this configuration. Returns: The list of directory paths. """ library_dirs = config.get('library_dirs', []) library_dirs = _FixPaths(library_dirs) return library_dirs def _GetLibraries(spec): """Returns the list of libraries for this configuration. Arguments: spec: The target dictionary containing the properties of the target. Returns: The list of directory paths. """ libraries = spec.get('libraries', []) # Strip out -l, as it is not used on windows (but is needed so we can pass # in libraries that are assumed to be in the default library path). # Also remove duplicate entries, leaving only the last duplicate, while # preserving order. found = OrderedSet() unique_libraries_list = [] for entry in reversed(libraries): library = re.sub(r'^\-l', '', entry) if not os.path.splitext(library)[1]: library += '.lib' if library not in found: found.add(library) unique_libraries_list.append(library) unique_libraries_list.reverse() return unique_libraries_list def _GetOutputFilePathAndTool(spec, msbuild): """Returns the path and tool to use for this target. Figures out the path of the file this spec will create and the name of the VC tool that will create it. Arguments: spec: The target dictionary containing the properties of the target. Returns: A triple of (file path, name of the vc tool, name of the msbuild tool) """ # Select a name for the output file. out_file = '' vc_tool = '' msbuild_tool = '' output_file_map = { 'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'), 'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'), 'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'), 'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'), } output_file_props = output_file_map.get(spec['type']) if output_file_props and int(spec.get('msvs_auto_output_file', 1)): vc_tool, msbuild_tool, out_dir, suffix = output_file_props if spec.get('standalone_static_library', 0): out_dir = '$(OutDir)' out_dir = spec.get('product_dir', out_dir) product_extension = spec.get('product_extension') if product_extension: suffix = '.' + product_extension elif msbuild: suffix = '$(TargetExt)' prefix = spec.get('product_prefix', '') product_name = spec.get('product_name', '$(ProjectName)') out_file = ntpath.join(out_dir, prefix + product_name + suffix) return out_file, vc_tool, msbuild_tool def _GetOutputTargetExt(spec): """Returns the extension for this target, including the dot If product_extension is specified, set target_extension to this to avoid MSB8012, returns None otherwise. Ignores any target_extension settings in the input files. Arguments: spec: The target dictionary containing the properties of the target. Returns: A string with the extension, or None """ target_extension = spec.get('product_extension') if target_extension: return '.' + target_extension return None def _GetDefines(config): """Returns the list of preprocessor definitions for this configuation. Arguments: config: The dictionary that defines the special processing to be done for this configuration. Returns: The list of preprocessor definitions. """ defines = [] for d in config.get('defines', []): if type(d) == list: fd = '='.join([str(dpart) for dpart in d]) else: fd = str(d) defines.append(fd) return defines def _GetDisabledWarnings(config): return [str(i) for i in config.get('msvs_disabled_warnings', [])] def _GetModuleDefinition(spec): def_file = '' if spec['type'] in ['shared_library', 'loadable_module', 'executable']: def_files = [s for s in spec.get('sources', []) if s.endswith('.def')] if len(def_files) == 1: def_file = _FixPath(def_files[0]) elif def_files: raise ValueError( 'Multiple module definition files in one target, target %s lists ' 'multiple .def files: %s' % ( spec['target_name'], ' '.join(def_files))) return def_file def _ConvertToolsToExpectedForm(tools): """Convert tools to a form expected by Visual Studio. Arguments: tools: A dictionary of settings; the tool name is the key. Returns: A list of Tool objects. """ tool_list = [] for tool, settings in tools.iteritems(): # Collapse settings with lists. settings_fixed = {} for setting, value in settings.iteritems(): if type(value) == list: if ((tool == 'VCLinkerTool' and setting == 'AdditionalDependencies') or setting == 'AdditionalOptions'): settings_fixed[setting] = ' '.join(value) else: settings_fixed[setting] = ';'.join(value) else: settings_fixed[setting] = value # Add in this tool. tool_list.append(MSVSProject.Tool(tool, settings_fixed)) return tool_list def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name): """Add to the project file the configuration specified by config. Arguments: p: The target project being generated. spec: the target project dict. tools: A dictionary of settings; the tool name is the key. config: The dictionary that defines the special processing to be done for this configuration. config_type: The configuration type, a number as defined by Microsoft. config_name: The name of the configuration. """ attributes = _GetMSVSAttributes(spec, config, config_type) # Add in this configuration. tool_list = _ConvertToolsToExpectedForm(tools) p.AddConfig(_ConfigFullName(config_name, config), attrs=attributes, tools=tool_list) def _GetMSVSAttributes(spec, config, config_type): # Prepare configuration attributes. prepared_attrs = {} source_attrs = config.get('msvs_configuration_attributes', {}) for a in source_attrs: prepared_attrs[a] = source_attrs[a] # Add props files. vsprops_dirs = config.get('msvs_props', []) vsprops_dirs = _FixPaths(vsprops_dirs) if vsprops_dirs: prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs) # Set configuration type. prepared_attrs['ConfigurationType'] = config_type output_dir = prepared_attrs.get('OutputDirectory', '$(SolutionDir)$(ConfigurationName)') prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\' if 'IntermediateDirectory' not in prepared_attrs: intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)' prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\' else: intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\' intermediate = MSVSSettings.FixVCMacroSlashes(intermediate) prepared_attrs['IntermediateDirectory'] = intermediate return prepared_attrs def _AddNormalizedSources(sources_set, sources_array): sources_set.update(_NormalizedSource(s) for s in sources_array) def _PrepareListOfSources(spec, generator_flags, gyp_file): """Prepare list of sources and excluded sources. Besides the sources specified directly in the spec, adds the gyp file so that a change to it will cause a re-compile. Also adds appropriate sources for actions and copies. Assumes later stage will un-exclude files which have custom build steps attached. Arguments: spec: The target dictionary containing the properties of the target. gyp_file: The name of the gyp file. Returns: A pair of (list of sources, list of excluded sources). The sources will be relative to the gyp file. """ sources = OrderedSet() _AddNormalizedSources(sources, spec.get('sources', [])) excluded_sources = OrderedSet() # Add in the gyp file. if not generator_flags.get('standalone'): sources.add(gyp_file) # Add in 'action' inputs and outputs. for a in spec.get('actions', []): inputs = a['inputs'] inputs = [_NormalizedSource(i) for i in inputs] # Add all inputs to sources and excluded sources. inputs = OrderedSet(inputs) sources.update(inputs) if not spec.get('msvs_external_builder'): excluded_sources.update(inputs) if int(a.get('process_outputs_as_sources', False)): _AddNormalizedSources(sources, a.get('outputs', [])) # Add in 'copies' inputs and outputs. for cpy in spec.get('copies', []): _AddNormalizedSources(sources, cpy.get('files', [])) return (sources, excluded_sources) def _AdjustSourcesAndConvertToFilterHierarchy( spec, options, gyp_dir, sources, excluded_sources, list_excluded, version): """Adjusts the list of sources and excluded sources. Also converts the sets to lists. Arguments: spec: The target dictionary containing the properties of the target. options: Global generator options. gyp_dir: The path to the gyp file being processed. sources: A set of sources to be included for this project. excluded_sources: A set of sources to be excluded for this project. version: A MSVSVersion object. Returns: A trio of (list of sources, list of excluded sources, path of excluded IDL file) """ # Exclude excluded sources coming into the generator. excluded_sources.update(OrderedSet(spec.get('sources_excluded', []))) # Add excluded sources into sources for good measure. sources.update(excluded_sources) # Convert to proper windows form. # NOTE: sources goes from being a set to a list here. # NOTE: excluded_sources goes from being a set to a list here. sources = _FixPaths(sources) # Convert to proper windows form. excluded_sources = _FixPaths(excluded_sources) excluded_idl = _IdlFilesHandledNonNatively(spec, sources) precompiled_related = _GetPrecompileRelatedFiles(spec) # Find the excluded ones, minus the precompiled header related ones. fully_excluded = [i for i in excluded_sources if i not in precompiled_related] # Convert to folders and the right slashes. sources = [i.split('\\') for i in sources] sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded, list_excluded=list_excluded, msvs_version=version) # Prune filters with a single child to flatten ugly directory structures # such as ../../src/modules/module1 etc. if version.UsesVcxproj(): while all([isinstance(s, MSVSProject.Filter) for s in sources]) \ and len(set([s.name for s in sources])) == 1: assert all([len(s.contents) == 1 for s in sources]) sources = [s.contents[0] for s in sources] else: while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter): sources = sources[0].contents return sources, excluded_sources, excluded_idl def _IdlFilesHandledNonNatively(spec, sources): # If any non-native rules use 'idl' as an extension exclude idl files. # Gather a list here to use later. using_idl = False for rule in spec.get('rules', []): if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)): using_idl = True break if using_idl: excluded_idl = [i for i in sources if i.endswith('.idl')] else: excluded_idl = [] return excluded_idl def _GetPrecompileRelatedFiles(spec): # Gather a list of precompiled header related sources. precompiled_related = [] for _, config in spec['configurations'].iteritems(): for k in precomp_keys: f = config.get(k) if f: precompiled_related.append(_FixPath(f)) return precompiled_related def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl, list_excluded): exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl) for file_name, excluded_configs in exclusions.iteritems(): if (not list_excluded and len(excluded_configs) == len(spec['configurations'])): # If we're not listing excluded files, then they won't appear in the # project, so don't try to configure them to be excluded. pass else: for config_name, config in excluded_configs: p.AddFileConfig(file_name, _ConfigFullName(config_name, config), {'ExcludedFromBuild': 'true'}) def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl): exclusions = {} # Exclude excluded sources from being built. for f in excluded_sources: excluded_configs = [] for config_name, config in spec['configurations'].iteritems(): precomped = [_FixPath(config.get(i, '')) for i in precomp_keys] # Don't do this for ones that are precompiled header related. if f not in precomped: excluded_configs.append((config_name, config)) exclusions[f] = excluded_configs # If any non-native rules use 'idl' as an extension exclude idl files. # Exclude them now. for f in excluded_idl: excluded_configs = [] for config_name, config in spec['configurations'].iteritems(): excluded_configs.append((config_name, config)) exclusions[f] = excluded_configs return exclusions def _AddToolFilesToMSVS(p, spec): # Add in tool files (rules). tool_files = OrderedSet() for _, config in spec['configurations'].iteritems(): for f in config.get('msvs_tool_files', []): tool_files.add(f) for f in tool_files: p.AddToolFile(f) def _HandlePreCompiledHeaders(p, sources, spec): # Pre-compiled header source stubs need a different compiler flag # (generate precompiled header) and any source file not of the same # kind (i.e. C vs. C++) as the precompiled header source stub needs # to have use of precompiled headers disabled. extensions_excluded_from_precompile = [] for config_name, config in spec['configurations'].iteritems(): source = config.get('msvs_precompiled_source') if source: source = _FixPath(source) # UsePrecompiledHeader=1 for if using precompiled headers. tool = MSVSProject.Tool('VCCLCompilerTool', {'UsePrecompiledHeader': '1'}) p.AddFileConfig(source, _ConfigFullName(config_name, config), {}, tools=[tool]) basename, extension = os.path.splitext(source) if extension == '.c': extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx'] else: extensions_excluded_from_precompile = ['.c'] def DisableForSourceTree(source_tree): for source in source_tree: if isinstance(source, MSVSProject.Filter): DisableForSourceTree(source.contents) else: basename, extension = os.path.splitext(source) if extension in extensions_excluded_from_precompile: for config_name, config in spec['configurations'].iteritems(): tool = MSVSProject.Tool('VCCLCompilerTool', {'UsePrecompiledHeader': '0', 'ForcedIncludeFiles': '$(NOINHERIT)'}) p.AddFileConfig(_FixPath(source), _ConfigFullName(config_name, config), {}, tools=[tool]) # Do nothing if there was no precompiled source. if extensions_excluded_from_precompile: DisableForSourceTree(sources) def _AddActions(actions_to_add, spec, relative_path_of_gyp_file): # Add actions. actions = spec.get('actions', []) # Don't setup_env every time. When all the actions are run together in one # batch file in VS, the PATH will grow too long. # Membership in this set means that the cygwin environment has been set up, # and does not need to be set up again. have_setup_env = set() for a in actions: # Attach actions to the gyp file if nothing else is there. inputs = a.get('inputs') or [relative_path_of_gyp_file] attached_to = inputs[0] need_setup_env = attached_to not in have_setup_env cmd = _BuildCommandLineForRule(spec, a, has_input_path=False, do_setup_env=need_setup_env) have_setup_env.add(attached_to) # Add the action. _AddActionStep(actions_to_add, inputs=inputs, outputs=a.get('outputs', []), description=a.get('message', a['action_name']), command=cmd) def _WriteMSVSUserFile(project_path, version, spec): # Add run_as and test targets. if 'run_as' in spec: run_as = spec['run_as'] action = run_as.get('action', []) environment = run_as.get('environment', []) working_directory = run_as.get('working_directory', '.') elif int(spec.get('test', 0)): action = ['$(TargetPath)', '--gtest_print_time'] environment = [] working_directory = '.' else: return # Nothing to add # Write out the user file. user_file = _CreateMSVSUserFile(project_path, version, spec) for config_name, c_data in spec['configurations'].iteritems(): user_file.AddDebugSettings(_ConfigFullName(config_name, c_data), action, environment, working_directory) user_file.WriteIfChanged() def _AddCopies(actions_to_add, spec): copies = _GetCopies(spec) for inputs, outputs, cmd, description in copies: _AddActionStep(actions_to_add, inputs=inputs, outputs=outputs, description=description, command=cmd) def _GetCopies(spec): copies = [] # Add copies. for cpy in spec.get('copies', []): for src in cpy.get('files', []): dst = os.path.join(cpy['destination'], os.path.basename(src)) # _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and # outputs, so do the same for our generated command line. if src.endswith('/'): src_bare = src[:-1] base_dir = posixpath.split(src_bare)[0] outer_dir = posixpath.split(src_bare)[1] cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % ( _FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir) copies.append(([src], ['dummy_copies', dst], cmd, 'Copying %s to %s' % (src, dst))) else: cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % ( _FixPath(cpy['destination']), _FixPath(src), _FixPath(dst)) copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst))) return copies def _GetPathDict(root, path): # |path| will eventually be empty (in the recursive calls) if it was initially # relative; otherwise it will eventually end up as '\', 'D:\', etc. if not path or path.endswith(os.sep): return root parent, folder = os.path.split(path) parent_dict = _GetPathDict(root, parent) if folder not in parent_dict: parent_dict[folder] = dict() return parent_dict[folder] def _DictsToFolders(base_path, bucket, flat): # Convert to folders recursively. children = [] for folder, contents in bucket.iteritems(): if type(contents) == dict: folder_children = _DictsToFolders(os.path.join(base_path, folder), contents, flat) if flat: children += folder_children else: folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder), name='(' + folder + ')', entries=folder_children) children.append(folder_children) else: children.append(contents) return children def _CollapseSingles(parent, node): # Recursively explorer the tree of dicts looking for projects which are # the sole item in a folder which has the same name as the project. Bring # such projects up one level. if (type(node) == dict and len(node) == 1 and node.keys()[0] == parent + '.vcproj'): return node[node.keys()[0]] if type(node) != dict: return node for child in node: node[child] = _CollapseSingles(child, node[child]) return node def _GatherSolutionFolders(sln_projects, project_objects, flat): root = {} # Convert into a tree of dicts on path. for p in sln_projects: gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2] gyp_dir = os.path.dirname(gyp_file) path_dict = _GetPathDict(root, gyp_dir) path_dict[target + '.vcproj'] = project_objects[p] # Walk down from the top until we hit a folder that has more than one entry. # In practice, this strips the top-level "src/" dir from the hierarchy in # the solution. while len(root) == 1 and type(root[root.keys()[0]]) == dict: root = root[root.keys()[0]] # Collapse singles. root = _CollapseSingles('', root) # Merge buckets until everything is a root entry. return _DictsToFolders('', root, flat) def _GetPathOfProject(qualified_target, spec, options, msvs_version): default_config = _GetDefaultConfiguration(spec) proj_filename = default_config.get('msvs_existing_vcproj') if not proj_filename: proj_filename = (spec['target_name'] + options.suffix + msvs_version.ProjectExtension()) build_file = gyp.common.BuildFile(qualified_target) proj_path = os.path.join(os.path.dirname(build_file), proj_filename) fix_prefix = None if options.generator_output: project_dir_path = os.path.dirname(os.path.abspath(proj_path)) proj_path = os.path.join(options.generator_output, proj_path) fix_prefix = gyp.common.RelativePath(project_dir_path, os.path.dirname(proj_path)) return proj_path, fix_prefix def _GetPlatformOverridesOfProject(spec): # Prepare a dict indicating which project configurations are used for which # solution configurations for this target. config_platform_overrides = {} for config_name, c in spec['configurations'].iteritems(): config_fullname = _ConfigFullName(config_name, c) platform = c.get('msvs_target_platform', _ConfigPlatform(c)) fixed_config_fullname = '%s|%s' % ( _ConfigBaseName(config_name, _ConfigPlatform(c)), platform) config_platform_overrides[config_fullname] = fixed_config_fullname return config_platform_overrides def _CreateProjectObjects(target_list, target_dicts, options, msvs_version): """Create a MSVSProject object for the targets found in target list. Arguments: target_list: the list of targets to generate project objects for. target_dicts: the dictionary of specifications. options: global generator options. msvs_version: the MSVSVersion object. Returns: A set of created projects, keyed by target. """ global fixpath_prefix # Generate each project. projects = {} for qualified_target in target_list: spec = target_dicts[qualified_target] if spec['toolset'] != 'target': raise GypError( 'Multiple toolsets not supported in msvs build (target %s)' % qualified_target) proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec, options, msvs_version) guid = _GetGuidOfProject(proj_path, spec) overrides = _GetPlatformOverridesOfProject(spec) build_file = gyp.common.BuildFile(qualified_target) # Create object for this project. obj = MSVSNew.MSVSProject( proj_path, name=spec['target_name'], guid=guid, spec=spec, build_file=build_file, config_platform_overrides=overrides, fixpath_prefix=fixpath_prefix) # Set project toolset if any (MS build only) if msvs_version.UsesVcxproj(): obj.set_msbuild_toolset( _GetMsbuildToolsetOfProject(proj_path, spec, msvs_version)) projects[qualified_target] = obj # Set all the dependencies, but not if we are using an external builder like # ninja for project in projects.values(): if not project.spec.get('msvs_external_builder'): deps = project.spec.get('dependencies', []) deps = [projects[d] for d in deps] project.set_dependencies(deps) return projects def _InitNinjaFlavor(params, target_list, target_dicts): """Initialize targets for the ninja flavor. This sets up the necessary variables in the targets to generate msvs projects that use ninja as an external builder. The variables in the spec are only set if they have not been set. This allows individual specs to override the default values initialized here. Arguments: params: Params provided to the generator. target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. """ for qualified_target in target_list: spec = target_dicts[qualified_target] if spec.get('msvs_external_builder'): # The spec explicitly defined an external builder, so don't change it. continue path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe') spec['msvs_external_builder'] = 'ninja' if not spec.get('msvs_external_builder_out_dir'): gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target) gyp_dir = os.path.dirname(gyp_file) configuration = '$(Configuration)' if params.get('target_arch') == 'x64': configuration += '_x64' spec['msvs_external_builder_out_dir'] = os.path.join( gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir), ninja_generator.ComputeOutputDir(params), configuration) if not spec.get('msvs_external_builder_build_cmd'): spec['msvs_external_builder_build_cmd'] = [ path_to_ninja, '-C', '$(OutDir)', '$(ProjectName)', ] if not spec.get('msvs_external_builder_clean_cmd'): spec['msvs_external_builder_clean_cmd'] = [ path_to_ninja, '-C', '$(OutDir)', '-tclean', '$(ProjectName)', ] def CalculateVariables(default_variables, params): """Generated variables that require params to be known.""" generator_flags = params.get('generator_flags', {}) # Select project file format version (if unset, default to auto detecting). msvs_version = MSVSVersion.SelectVisualStudioVersion( generator_flags.get('msvs_version', 'auto')) # Stash msvs_version for later (so we don't have to probe the system twice). params['msvs_version'] = msvs_version # Set a variable so conditions can be based on msvs_version. default_variables['MSVS_VERSION'] = msvs_version.ShortName() # To determine processor word size on Windows, in addition to checking # PROCESSOR_ARCHITECTURE (which reflects the word size of the current # process), it is also necessary to check PROCESSOR_ARCITEW6432 (which # contains the actual word size of the system when running thru WOW64). if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0): default_variables['MSVS_OS_BITS'] = 64 else: default_variables['MSVS_OS_BITS'] = 32 if gyp.common.GetFlavor(params) == 'ninja': default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen' def PerformBuild(data, configurations, params): options = params['options'] msvs_version = params['msvs_version'] devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com') for build_file, build_file_dict in data.iteritems(): (build_file_root, build_file_ext) = os.path.splitext(build_file) if build_file_ext != '.gyp': continue sln_path = build_file_root + options.suffix + '.sln' if options.generator_output: sln_path = os.path.join(options.generator_output, sln_path) for config in configurations: arguments = [devenv, sln_path, '/Build', config] print 'Building [%s]: %s' % (config, arguments) rtn = subprocess.check_call(arguments) def GenerateOutput(target_list, target_dicts, data, params): """Generate .sln and .vcproj files. This is the entry point for this generator. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dictionary containing per .gyp data. """ global fixpath_prefix options = params['options'] # Get the project file format version back out of where we stashed it in # GeneratorCalculatedVariables. msvs_version = params['msvs_version'] generator_flags = params.get('generator_flags', {}) # Optionally shard targets marked with 'msvs_shard': SHARD_COUNT. (target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts) # Optionally use the large PDB workaround for targets marked with # 'msvs_large_pdb': 1. (target_list, target_dicts) = MSVSUtil.InsertLargePdbShims( target_list, target_dicts, generator_default_variables) # Optionally configure each spec to use ninja as the external builder. if params.get('flavor') == 'ninja': _InitNinjaFlavor(params, target_list, target_dicts) # Prepare the set of configurations. configs = set() for qualified_target in target_list: spec = target_dicts[qualified_target] for config_name, config in spec['configurations'].iteritems(): configs.add(_ConfigFullName(config_name, config)) configs = list(configs) # Figure out all the projects that will be generated and their guids project_objects = _CreateProjectObjects(target_list, target_dicts, options, msvs_version) # Generate each project. missing_sources = [] for project in project_objects.values(): fixpath_prefix = project.fixpath_prefix missing_sources.extend(_GenerateProject(project, options, msvs_version, generator_flags)) fixpath_prefix = None for build_file in data: # Validate build_file extension if not build_file.endswith('.gyp'): continue sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln' if options.generator_output: sln_path = os.path.join(options.generator_output, sln_path) # Get projects in the solution, and their dependents. sln_projects = gyp.common.BuildFileTargets(target_list, build_file) sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects) # Create folder hierarchy. root_entries = _GatherSolutionFolders( sln_projects, project_objects, flat=msvs_version.FlatSolution()) # Create solution. sln = MSVSNew.MSVSSolution(sln_path, entries=root_entries, variants=configs, websiteProperties=False, version=msvs_version) sln.Write() if missing_sources: error_message = "Missing input files:\n" + \ '\n'.join(set(missing_sources)) if generator_flags.get('msvs_error_on_missing_sources', False): raise GypError(error_message) else: print >> sys.stdout, "Warning: " + error_message def _GenerateMSBuildFiltersFile(filters_path, source_files, rule_dependencies, extension_to_rule_name): """Generate the filters file. This file is used by Visual Studio to organize the presentation of source files into folders. Arguments: filters_path: The path of the file to be created. source_files: The hierarchical structure of all the sources. extension_to_rule_name: A dictionary mapping file extensions to rules. """ filter_group = [] source_group = [] _AppendFiltersForMSBuild('', source_files, rule_dependencies, extension_to_rule_name, filter_group, source_group) if filter_group: content = ['Project', {'ToolsVersion': '4.0', 'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003' }, ['ItemGroup'] + filter_group, ['ItemGroup'] + source_group ] easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True) elif os.path.exists(filters_path): # We don't need this filter anymore. Delete the old filter file. os.unlink(filters_path) def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies, extension_to_rule_name, filter_group, source_group): """Creates the list of filters and sources to be added in the filter file. Args: parent_filter_name: The name of the filter under which the sources are found. sources: The hierarchy of filters and sources to process. extension_to_rule_name: A dictionary mapping file extensions to rules. filter_group: The list to which filter entries will be appended. source_group: The list to which source entries will be appeneded. """ for source in sources: if isinstance(source, MSVSProject.Filter): # We have a sub-filter. Create the name of that sub-filter. if not parent_filter_name: filter_name = source.name else: filter_name = '%s\\%s' % (parent_filter_name, source.name) # Add the filter to the group. filter_group.append( ['Filter', {'Include': filter_name}, ['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]]) # Recurse and add its dependents. _AppendFiltersForMSBuild(filter_name, source.contents, rule_dependencies, extension_to_rule_name, filter_group, source_group) else: # It's a source. Create a source entry. _, element = _MapFileToMsBuildSourceType(source, rule_dependencies, extension_to_rule_name) source_entry = [element, {'Include': source}] # Specify the filter it is part of, if any. if parent_filter_name: source_entry.append(['Filter', parent_filter_name]) source_group.append(source_entry) def _MapFileToMsBuildSourceType(source, rule_dependencies, extension_to_rule_name): """Returns the group and element type of the source file. Arguments: source: The source file name. extension_to_rule_name: A dictionary mapping file extensions to rules. Returns: A pair of (group this file should be part of, the label of element) """ _, ext = os.path.splitext(source) if ext in extension_to_rule_name: group = 'rule' element = extension_to_rule_name[ext] elif ext in ['.cc', '.cpp', '.c', '.cxx']: group = 'compile' element = 'ClCompile' elif ext in ['.h', '.hxx']: group = 'include' element = 'ClInclude' elif ext == '.rc': group = 'resource' element = 'ResourceCompile' elif ext == '.asm': group = 'masm' element = 'MASM' elif ext == '.idl': group = 'midl' element = 'Midl' elif source in rule_dependencies: group = 'rule_dependency' element = 'CustomBuild' else: group = 'none' element = 'None' return (group, element) def _GenerateRulesForMSBuild(output_dir, options, spec, sources, excluded_sources, props_files_of_rules, targets_files_of_rules, actions_to_add, rule_dependencies, extension_to_rule_name): # MSBuild rules are implemented using three files: an XML file, a .targets # file and a .props file. # See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx # for more details. rules = spec.get('rules', []) rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))] rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))] msbuild_rules = [] for rule in rules_native: # Skip a rule with no action and no inputs. if 'action' not in rule and not rule.get('rule_sources', []): continue msbuild_rule = MSBuildRule(rule, spec) msbuild_rules.append(msbuild_rule) rule_dependencies.update(msbuild_rule.additional_dependencies.split(';')) extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name if msbuild_rules: base = spec['target_name'] + options.suffix props_name = base + '.props' targets_name = base + '.targets' xml_name = base + '.xml' props_files_of_rules.add(props_name) targets_files_of_rules.add(targets_name) props_path = os.path.join(output_dir, props_name) targets_path = os.path.join(output_dir, targets_name) xml_path = os.path.join(output_dir, xml_name) _GenerateMSBuildRulePropsFile(props_path, msbuild_rules) _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules) _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules) if rules_external: _GenerateExternalRules(rules_external, output_dir, spec, sources, options, actions_to_add) _AdjustSourcesForRules(rules, sources, excluded_sources, True) class MSBuildRule(object): """Used to store information used to generate an MSBuild rule. Attributes: rule_name: The rule name, sanitized to use in XML. target_name: The name of the target. after_targets: The name of the AfterTargets element. before_targets: The name of the BeforeTargets element. depends_on: The name of the DependsOn element. compute_output: The name of the ComputeOutput element. dirs_to_make: The name of the DirsToMake element. inputs: The name of the _inputs element. tlog: The name of the _tlog element. extension: The extension this rule applies to. description: The message displayed when this rule is invoked. additional_dependencies: A string listing additional dependencies. outputs: The outputs of this rule. command: The command used to run the rule. """ def __init__(self, rule, spec): self.display_name = rule['rule_name'] # Assure that the rule name is only characters and numbers self.rule_name = re.sub(r'\W', '_', self.display_name) # Create the various element names, following the example set by the # Visual Studio 2008 to 2010 conversion. I don't know if VS2010 # is sensitive to the exact names. self.target_name = '_' + self.rule_name self.after_targets = self.rule_name + 'AfterTargets' self.before_targets = self.rule_name + 'BeforeTargets' self.depends_on = self.rule_name + 'DependsOn' self.compute_output = 'Compute%sOutput' % self.rule_name self.dirs_to_make = self.rule_name + 'DirsToMake' self.inputs = self.rule_name + '_inputs' self.tlog = self.rule_name + '_tlog' self.extension = rule['extension'] if not self.extension.startswith('.'): self.extension = '.' + self.extension self.description = MSVSSettings.ConvertVCMacrosToMSBuild( rule.get('message', self.rule_name)) old_additional_dependencies = _FixPaths(rule.get('inputs', [])) self.additional_dependencies = ( ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in old_additional_dependencies])) old_outputs = _FixPaths(rule.get('outputs', [])) self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in old_outputs]) old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True, do_setup_env=True) self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command) def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules): """Generate the .props file.""" content = ['Project', {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}] for rule in msbuild_rules: content.extend([ ['PropertyGroup', {'Condition': "'$(%s)' == '' and '$(%s)' == '' and " "'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets, rule.after_targets) }, [rule.before_targets, 'Midl'], [rule.after_targets, 'CustomBuild'], ], ['PropertyGroup', [rule.depends_on, {'Condition': "'$(ConfigurationType)' != 'Makefile'"}, '_SelectedFiles;$(%s)' % rule.depends_on ], ], ['ItemDefinitionGroup', [rule.rule_name, ['CommandLineTemplate', rule.command], ['Outputs', rule.outputs], ['ExecutionDescription', rule.description], ['AdditionalDependencies', rule.additional_dependencies], ], ] ]) easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True) def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules): """Generate the .targets file.""" content = ['Project', {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003' } ] item_group = [ 'ItemGroup', ['PropertyPageSchema', {'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'} ] ] for rule in msbuild_rules: item_group.append( ['AvailableItemName', {'Include': rule.rule_name}, ['Targets', rule.target_name], ]) content.append(item_group) for rule in msbuild_rules: content.append( ['UsingTask', {'TaskName': rule.rule_name, 'TaskFactory': 'XamlTaskFactory', 'AssemblyName': 'Microsoft.Build.Tasks.v4.0' }, ['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'], ]) for rule in msbuild_rules: rule_name = rule.rule_name target_outputs = '%%(%s.Outputs)' % rule_name target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);' '$(MSBuildProjectFile)') % (rule_name, rule_name) rule_inputs = '%%(%s.Identity)' % rule_name extension_condition = ("'%(Extension)'=='.obj' or " "'%(Extension)'=='.res' or " "'%(Extension)'=='.rsc' or " "'%(Extension)'=='.lib'") remove_section = [ 'ItemGroup', {'Condition': "'@(SelectedFiles)' != ''"}, [rule_name, {'Remove': '@(%s)' % rule_name, 'Condition': "'%(Identity)' != '@(SelectedFiles)'" } ] ] inputs_section = [ 'ItemGroup', [rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}] ] logging_section = [ 'ItemGroup', [rule.tlog, {'Include': '%%(%s.Outputs)' % rule_name, 'Condition': ("'%%(%s.Outputs)' != '' and " "'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name)) }, ['Source', "@(%s, '|')" % rule_name], ['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs], ], ] message_section = [ 'Message', {'Importance': 'High', 'Text': '%%(%s.ExecutionDescription)' % rule_name } ] write_tlog_section = [ 'WriteLinesToFile', {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " "'true'" % (rule.tlog, rule.tlog), 'File': '$(IntDir)$(ProjectName).write.1.tlog', 'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog, rule.tlog) } ] read_tlog_section = [ 'WriteLinesToFile', {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " "'true'" % (rule.tlog, rule.tlog), 'File': '$(IntDir)$(ProjectName).read.1.tlog', 'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog) } ] command_and_input_section = [ rule_name, {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " "'true'" % (rule_name, rule_name), 'EchoOff': 'true', 'StandardOutputImportance': 'High', 'StandardErrorImportance': 'High', 'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name, 'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name, 'Inputs': rule_inputs } ] content.extend([ ['Target', {'Name': rule.target_name, 'BeforeTargets': '$(%s)' % rule.before_targets, 'AfterTargets': '$(%s)' % rule.after_targets, 'Condition': "'@(%s)' != ''" % rule_name, 'DependsOnTargets': '$(%s);%s' % (rule.depends_on, rule.compute_output), 'Outputs': target_outputs, 'Inputs': target_inputs }, remove_section, inputs_section, logging_section, message_section, write_tlog_section, read_tlog_section, command_and_input_section, ], ['PropertyGroup', ['ComputeLinkInputsTargets', '$(ComputeLinkInputsTargets);', '%s;' % rule.compute_output ], ['ComputeLibInputsTargets', '$(ComputeLibInputsTargets);', '%s;' % rule.compute_output ], ], ['Target', {'Name': rule.compute_output, 'Condition': "'@(%s)' != ''" % rule_name }, ['ItemGroup', [rule.dirs_to_make, {'Condition': "'@(%s)' != '' and " "'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name), 'Include': '%%(%s.Outputs)' % rule_name } ], ['Link', {'Include': '%%(%s.Identity)' % rule.dirs_to_make, 'Condition': extension_condition } ], ['Lib', {'Include': '%%(%s.Identity)' % rule.dirs_to_make, 'Condition': extension_condition } ], ['ImpLib', {'Include': '%%(%s.Identity)' % rule.dirs_to_make, 'Condition': extension_condition } ], ], ['MakeDir', {'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" % rule.dirs_to_make) } ] ], ]) easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True) def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules): # Generate the .xml file content = [ 'ProjectSchemaDefinitions', {'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;' 'assembly=Microsoft.Build.Framework'), 'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml', 'xmlns:sys': 'clr-namespace:System;assembly=mscorlib', 'xmlns:transformCallback': 'Microsoft.Cpp.Dev10.ConvertPropertyCallback' } ] for rule in msbuild_rules: content.extend([ ['Rule', {'Name': rule.rule_name, 'PageTemplate': 'tool', 'DisplayName': rule.display_name, 'Order': '200' }, ['Rule.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'ItemType': rule.rule_name } ] ], ['Rule.Categories', ['Category', {'Name': 'General'}, ['Category.DisplayName', ['sys:String', 'General'], ], ], ['Category', {'Name': 'Command Line', 'Subtype': 'CommandLine' }, ['Category.DisplayName', ['sys:String', 'Command Line'], ], ], ], ['StringListProperty', {'Name': 'Inputs', 'Category': 'Command Line', 'IsRequired': 'true', 'Switch': ' ' }, ['StringListProperty.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'ItemType': rule.rule_name, 'SourceType': 'Item' } ] ], ], ['StringProperty', {'Name': 'CommandLineTemplate', 'DisplayName': 'Command Line', 'Visible': 'False', 'IncludeInCommandLine': 'False' } ], ['DynamicEnumProperty', {'Name': rule.before_targets, 'Category': 'General', 'EnumProvider': 'Targets', 'IncludeInCommandLine': 'False' }, ['DynamicEnumProperty.DisplayName', ['sys:String', 'Execute Before'], ], ['DynamicEnumProperty.Description', ['sys:String', 'Specifies the targets for the build customization' ' to run before.' ], ], ['DynamicEnumProperty.ProviderSettings', ['NameValuePair', {'Name': 'Exclude', 'Value': '^%s|^Compute' % rule.before_targets } ] ], ['DynamicEnumProperty.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'HasConfigurationCondition': 'true' } ] ], ], ['DynamicEnumProperty', {'Name': rule.after_targets, 'Category': 'General', 'EnumProvider': 'Targets', 'IncludeInCommandLine': 'False' }, ['DynamicEnumProperty.DisplayName', ['sys:String', 'Execute After'], ], ['DynamicEnumProperty.Description', ['sys:String', ('Specifies the targets for the build customization' ' to run after.') ], ], ['DynamicEnumProperty.ProviderSettings', ['NameValuePair', {'Name': 'Exclude', 'Value': '^%s|^Compute' % rule.after_targets } ] ], ['DynamicEnumProperty.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'ItemType': '', 'HasConfigurationCondition': 'true' } ] ], ], ['StringListProperty', {'Name': 'Outputs', 'DisplayName': 'Outputs', 'Visible': 'False', 'IncludeInCommandLine': 'False' } ], ['StringProperty', {'Name': 'ExecutionDescription', 'DisplayName': 'Execution Description', 'Visible': 'False', 'IncludeInCommandLine': 'False' } ], ['StringListProperty', {'Name': 'AdditionalDependencies', 'DisplayName': 'Additional Dependencies', 'IncludeInCommandLine': 'False', 'Visible': 'false' } ], ['StringProperty', {'Subtype': 'AdditionalOptions', 'Name': 'AdditionalOptions', 'Category': 'Command Line' }, ['StringProperty.DisplayName', ['sys:String', 'Additional Options'], ], ['StringProperty.Description', ['sys:String', 'Additional Options'], ], ], ], ['ItemType', {'Name': rule.rule_name, 'DisplayName': rule.display_name } ], ['FileExtension', {'Name': '*' + rule.extension, 'ContentType': rule.rule_name } ], ['ContentType', {'Name': rule.rule_name, 'DisplayName': '', 'ItemType': rule.rule_name } ] ]) easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True) def _GetConfigurationAndPlatform(name, settings): configuration = name.rsplit('_', 1)[0] platform = settings.get('msvs_configuration_platform', 'Win32') return (configuration, platform) def _GetConfigurationCondition(name, settings): return (r"'$(Configuration)|$(Platform)'=='%s|%s'" % _GetConfigurationAndPlatform(name, settings)) def _GetMSBuildProjectConfigurations(configurations): group = ['ItemGroup', {'Label': 'ProjectConfigurations'}] for (name, settings) in sorted(configurations.iteritems()): configuration, platform = _GetConfigurationAndPlatform(name, settings) designation = '%s|%s' % (configuration, platform) group.append( ['ProjectConfiguration', {'Include': designation}, ['Configuration', configuration], ['Platform', platform]]) return [group] def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name): namespace = os.path.splitext(gyp_file_name)[0] properties = [ ['PropertyGroup', {'Label': 'Globals'}, ['ProjectGuid', guid], ['Keyword', 'Win32Proj'], ['RootNamespace', namespace], ['IgnoreWarnCompileDuplicatedFilename', 'true'], ] ] if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \ os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64': properties[0].append(['PreferredToolArchitecture', 'x64']) if spec.get('msvs_enable_winrt'): properties[0].append(['DefaultLanguage', 'en-US']) properties[0].append(['AppContainerApplication', 'true']) if spec.get('msvs_application_type_revision'): app_type_revision = spec.get('msvs_application_type_revision') properties[0].append(['ApplicationTypeRevision', app_type_revision]) else: properties[0].append(['ApplicationTypeRevision', '8.1']) if spec.get('msvs_target_platform_version'): target_platform_version = spec.get('msvs_target_platform_version') properties[0].append(['WindowsTargetPlatformVersion', target_platform_version]) if spec.get('msvs_target_platform_minversion'): target_platform_minversion = spec.get('msvs_target_platform_minversion') properties[0].append(['WindowsTargetPlatformMinVersion', target_platform_minversion]) else: properties[0].append(['WindowsTargetPlatformMinVersion', target_platform_version]) if spec.get('msvs_enable_winphone'): properties[0].append(['ApplicationType', 'Windows Phone']) else: properties[0].append(['ApplicationType', 'Windows Store']) return properties def _GetMSBuildConfigurationDetails(spec, build_file): properties = {} for name, settings in spec['configurations'].iteritems(): msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file) condition = _GetConfigurationCondition(name, settings) character_set = msbuild_attributes.get('CharacterSet') _AddConditionalProperty(properties, condition, 'ConfigurationType', msbuild_attributes['ConfigurationType']) if character_set: if 'msvs_enable_winrt' not in spec : _AddConditionalProperty(properties, condition, 'CharacterSet', character_set) return _GetMSBuildPropertyGroup(spec, 'Configuration', properties) def _GetMSBuildLocalProperties(msbuild_toolset): # Currently the only local property we support is PlatformToolset properties = {} if msbuild_toolset: properties = [ ['PropertyGroup', {'Label': 'Locals'}, ['PlatformToolset', msbuild_toolset], ] ] return properties def _GetMSBuildPropertySheets(configurations): user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props' additional_props = {} props_specified = False for name, settings in sorted(configurations.iteritems()): configuration = _GetConfigurationCondition(name, settings) if settings.has_key('msbuild_props'): additional_props[configuration] = _FixPaths(settings['msbuild_props']) props_specified = True else: additional_props[configuration] = '' if not props_specified: return [ ['ImportGroup', {'Label': 'PropertySheets'}, ['Import', {'Project': user_props, 'Condition': "exists('%s')" % user_props, 'Label': 'LocalAppDataPlatform' } ] ] ] else: sheets = [] for condition, props in additional_props.iteritems(): import_group = [ 'ImportGroup', {'Label': 'PropertySheets', 'Condition': condition }, ['Import', {'Project': user_props, 'Condition': "exists('%s')" % user_props, 'Label': 'LocalAppDataPlatform' } ] ] for props_file in props: import_group.append(['Import', {'Project':props_file}]) sheets.append(import_group) return sheets def _ConvertMSVSBuildAttributes(spec, config, build_file): config_type = _GetMSVSConfigurationType(spec, build_file) msvs_attributes = _GetMSVSAttributes(spec, config, config_type) msbuild_attributes = {} for a in msvs_attributes: if a in ['IntermediateDirectory', 'OutputDirectory']: directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a]) if not directory.endswith('\\'): directory += '\\' msbuild_attributes[a] = directory elif a == 'CharacterSet': msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a]) elif a == 'ConfigurationType': msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a]) else: print 'Warning: Do not know how to convert MSVS attribute ' + a return msbuild_attributes def _ConvertMSVSCharacterSet(char_set): if char_set.isdigit(): char_set = { '0': 'MultiByte', '1': 'Unicode', '2': 'MultiByte', }[char_set] return char_set def _ConvertMSVSConfigurationType(config_type): if config_type.isdigit(): config_type = { '1': 'Application', '2': 'DynamicLibrary', '4': 'StaticLibrary', '10': 'Utility' }[config_type] return config_type def _GetMSBuildAttributes(spec, config, build_file): if 'msbuild_configuration_attributes' not in config: msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file) else: config_type = _GetMSVSConfigurationType(spec, build_file) config_type = _ConvertMSVSConfigurationType(config_type) msbuild_attributes = config.get('msbuild_configuration_attributes', {}) msbuild_attributes.setdefault('ConfigurationType', config_type) output_dir = msbuild_attributes.get('OutputDirectory', '$(SolutionDir)$(Configuration)') msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\' if 'IntermediateDirectory' not in msbuild_attributes: intermediate = _FixPath('$(Configuration)') + '\\' msbuild_attributes['IntermediateDirectory'] = intermediate if 'CharacterSet' in msbuild_attributes: msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet( msbuild_attributes['CharacterSet']) if 'TargetName' not in msbuild_attributes: prefix = spec.get('product_prefix', '') product_name = spec.get('product_name', '$(ProjectName)') target_name = prefix + product_name msbuild_attributes['TargetName'] = target_name if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec: ext = spec.get('product_extension') msbuild_attributes['TargetExt'] = '.' + ext if spec.get('msvs_external_builder'): external_out_dir = spec.get('msvs_external_builder_out_dir', '.') msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\' # Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile' # (depending on the tool used) to avoid MSB8012 warning. msbuild_tool_map = { 'executable': 'Link', 'shared_library': 'Link', 'loadable_module': 'Link', 'static_library': 'Lib', } msbuild_tool = msbuild_tool_map.get(spec['type']) if msbuild_tool: msbuild_settings = config['finalized_msbuild_settings'] out_file = msbuild_settings[msbuild_tool].get('OutputFile') if out_file: msbuild_attributes['TargetPath'] = _FixPath(out_file) target_ext = msbuild_settings[msbuild_tool].get('TargetExt') if target_ext: msbuild_attributes['TargetExt'] = target_ext return msbuild_attributes def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file): # TODO(jeanluc) We could optimize out the following and do it only if # there are actions. # TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'. new_paths = [] cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0] if cygwin_dirs: cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs) new_paths.append(cyg_path) # TODO(jeanluc) Change the convention to have both a cygwin_dir and a # python_dir. python_path = cyg_path.replace('cygwin\\bin', 'python_26') new_paths.append(python_path) if new_paths: new_paths = '$(ExecutablePath);' + ';'.join(new_paths) properties = {} for (name, configuration) in sorted(configurations.iteritems()): condition = _GetConfigurationCondition(name, configuration) attributes = _GetMSBuildAttributes(spec, configuration, build_file) msbuild_settings = configuration['finalized_msbuild_settings'] _AddConditionalProperty(properties, condition, 'IntDir', attributes['IntermediateDirectory']) _AddConditionalProperty(properties, condition, 'OutDir', attributes['OutputDirectory']) _AddConditionalProperty(properties, condition, 'TargetName', attributes['TargetName']) if 'TargetExt' in attributes: _AddConditionalProperty(properties, condition, 'TargetExt', attributes['TargetExt']) if attributes.get('TargetPath'): _AddConditionalProperty(properties, condition, 'TargetPath', attributes['TargetPath']) if attributes.get('TargetExt'): _AddConditionalProperty(properties, condition, 'TargetExt', attributes['TargetExt']) if new_paths: _AddConditionalProperty(properties, condition, 'ExecutablePath', new_paths) tool_settings = msbuild_settings.get('', {}) for name, value in sorted(tool_settings.iteritems()): formatted_value = _GetValueFormattedForMSBuild('', name, value) _AddConditionalProperty(properties, condition, name, formatted_value) return _GetMSBuildPropertyGroup(spec, None, properties) def _AddConditionalProperty(properties, condition, name, value): """Adds a property / conditional value pair to a dictionary. Arguments: properties: The dictionary to be modified. The key is the name of the property. The value is itself a dictionary; its key is the value and the value a list of condition for which this value is true. condition: The condition under which the named property has the value. name: The name of the property. value: The value of the property. """ if name not in properties: properties[name] = {} values = properties[name] if value not in values: values[value] = [] conditions = values[value] conditions.append(condition) # Regex for msvs variable references ( i.e. $(FOO) ). MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)') def _GetMSBuildPropertyGroup(spec, label, properties): """Returns a PropertyGroup definition for the specified properties. Arguments: spec: The target project dict. label: An optional label for the PropertyGroup. properties: The dictionary to be converted. The key is the name of the property. The value is itself a dictionary; its key is the value and the value a list of condition for which this value is true. """ group = ['PropertyGroup'] if label: group.append({'Label': label}) num_configurations = len(spec['configurations']) def GetEdges(node): # Use a definition of edges such that user_of_variable -> used_varible. # This happens to be easier in this case, since a variable's # definition contains all variables it references in a single string. edges = set() for value in sorted(properties[node].keys()): # Add to edges all $(...) references to variables. # # Variable references that refer to names not in properties are excluded # These can exist for instance to refer built in definitions like # $(SolutionDir). # # Self references are ignored. Self reference is used in a few places to # append to the default value. I.e. PATH=$(PATH);other_path edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value) if v in properties and v != node])) return edges properties_ordered = gyp.common.TopologicallySorted( properties.keys(), GetEdges) # Walk properties in the reverse of a topological sort on # user_of_variable -> used_variable as this ensures variables are # defined before they are used. # NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) for name in reversed(properties_ordered): values = properties[name] for value, conditions in sorted(values.iteritems()): if len(conditions) == num_configurations: # If the value is the same all configurations, # just add one unconditional entry. group.append([name, value]) else: for condition in conditions: group.append([name, {'Condition': condition}, value]) return [group] def _GetMSBuildToolSettingsSections(spec, configurations): groups = [] for (name, configuration) in sorted(configurations.iteritems()): msbuild_settings = configuration['finalized_msbuild_settings'] group = ['ItemDefinitionGroup', {'Condition': _GetConfigurationCondition(name, configuration)} ] for tool_name, tool_settings in sorted(msbuild_settings.iteritems()): # Skip the tool named '' which is a holder of global settings handled # by _GetMSBuildConfigurationGlobalProperties. if tool_name: if tool_settings: tool = [tool_name] for name, value in sorted(tool_settings.iteritems()): formatted_value = _GetValueFormattedForMSBuild(tool_name, name, value) tool.append([name, formatted_value]) group.append(tool) groups.append(group) return groups def _FinalizeMSBuildSettings(spec, configuration): if 'msbuild_settings' in configuration: converted = False msbuild_settings = configuration['msbuild_settings'] MSVSSettings.ValidateMSBuildSettings(msbuild_settings) else: converted = True msvs_settings = configuration.get('msvs_settings', {}) msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings) include_dirs, midl_include_dirs, resource_include_dirs = \ _GetIncludeDirs(configuration) libraries = _GetLibraries(spec) library_dirs = _GetLibraryDirs(configuration) out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True) target_ext = _GetOutputTargetExt(spec) defines = _GetDefines(configuration) if converted: # Visual Studio 2010 has TR1 defines = [d for d in defines if d != '_HAS_TR1=0'] # Warn of ignored settings ignored_settings = ['msvs_tool_files'] for ignored_setting in ignored_settings: value = configuration.get(ignored_setting) if value: print ('Warning: The automatic conversion to MSBuild does not handle ' '%s. Ignoring setting of %s' % (ignored_setting, str(value))) defines = [_EscapeCppDefineForMSBuild(d) for d in defines] disabled_warnings = _GetDisabledWarnings(configuration) prebuild = configuration.get('msvs_prebuild') postbuild = configuration.get('msvs_postbuild') def_file = _GetModuleDefinition(spec) precompiled_header = configuration.get('msvs_precompiled_header') # Add the information to the appropriate tool # TODO(jeanluc) We could optimize and generate these settings only if # the corresponding files are found, e.g. don't generate ResourceCompile # if you don't have any resources. _ToolAppend(msbuild_settings, 'ClCompile', 'AdditionalIncludeDirectories', include_dirs) _ToolAppend(msbuild_settings, 'Midl', 'AdditionalIncludeDirectories', midl_include_dirs) _ToolAppend(msbuild_settings, 'ResourceCompile', 'AdditionalIncludeDirectories', resource_include_dirs) # Add in libraries, note that even for empty libraries, we want this # set, to prevent inheriting default libraries from the enviroment. _ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies', libraries) _ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories', library_dirs) if out_file: _ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file, only_if_unset=True) if target_ext: _ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext, only_if_unset=True) # Add defines. _ToolAppend(msbuild_settings, 'ClCompile', 'PreprocessorDefinitions', defines) _ToolAppend(msbuild_settings, 'ResourceCompile', 'PreprocessorDefinitions', defines) # Add disabled warnings. _ToolAppend(msbuild_settings, 'ClCompile', 'DisableSpecificWarnings', disabled_warnings) # Turn on precompiled headers if appropriate. if precompiled_header: precompiled_header = os.path.split(precompiled_header)[1] _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use') _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeaderFile', precompiled_header) _ToolAppend(msbuild_settings, 'ClCompile', 'ForcedIncludeFiles', [precompiled_header]) else: _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing') # Turn off WinRT compilation _ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false') # Turn on import libraries if appropriate if spec.get('msvs_requires_importlibrary'): _ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false') # Loadable modules don't generate import libraries; # tell dependent projects to not expect one. if spec['type'] == 'loadable_module': _ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true') # Set the module definition file if any. if def_file: _ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file) configuration['finalized_msbuild_settings'] = msbuild_settings if prebuild: _ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild) if postbuild: _ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild) def _GetValueFormattedForMSBuild(tool_name, name, value): if type(value) == list: # For some settings, VS2010 does not automatically extends the settings # TODO(jeanluc) Is this what we want? if name in ['AdditionalIncludeDirectories', 'AdditionalLibraryDirectories', 'AdditionalOptions', 'DelayLoadDLLs', 'DisableSpecificWarnings', 'PreprocessorDefinitions']: value.append('%%(%s)' % name) # For most tools, entries in a list should be separated with ';' but some # settings use a space. Check for those first. exceptions = { 'ClCompile': ['AdditionalOptions'], 'Link': ['AdditionalOptions'], 'Lib': ['AdditionalOptions']} if tool_name in exceptions and name in exceptions[tool_name]: char = ' ' else: char = ';' formatted_value = char.join( [MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value]) else: formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value) return formatted_value def _VerifySourcesExist(sources, root_dir): """Verifies that all source files exist on disk. Checks that all regular source files, i.e. not created at run time, exist on disk. Missing files cause needless recompilation but no otherwise visible errors. Arguments: sources: A recursive list of Filter/file names. root_dir: The root directory for the relative path names. Returns: A list of source files that cannot be found on disk. """ missing_sources = [] for source in sources: if isinstance(source, MSVSProject.Filter): missing_sources.extend(_VerifySourcesExist(source.contents, root_dir)) else: if '$' not in source: full_path = os.path.join(root_dir, source) if not os.path.exists(full_path): missing_sources.append(full_path) return missing_sources def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies, extension_to_rule_name, actions_spec, sources_handled_by_action, list_excluded): groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule', 'rule_dependency'] grouped_sources = {} for g in groups: grouped_sources[g] = [] _AddSources2(spec, sources, exclusions, grouped_sources, rule_dependencies, extension_to_rule_name, sources_handled_by_action, list_excluded) sources = [] for g in groups: if grouped_sources[g]: sources.append(['ItemGroup'] + grouped_sources[g]) if actions_spec: sources.append(['ItemGroup'] + actions_spec) return sources def _AddSources2(spec, sources, exclusions, grouped_sources, rule_dependencies, extension_to_rule_name, sources_handled_by_action, list_excluded): extensions_excluded_from_precompile = [] for source in sources: if isinstance(source, MSVSProject.Filter): _AddSources2(spec, source.contents, exclusions, grouped_sources, rule_dependencies, extension_to_rule_name, sources_handled_by_action, list_excluded) else: if not source in sources_handled_by_action: detail = [] excluded_configurations = exclusions.get(source, []) if len(excluded_configurations) == len(spec['configurations']): detail.append(['ExcludedFromBuild', 'true']) else: for config_name, configuration in sorted(excluded_configurations): condition = _GetConfigurationCondition(config_name, configuration) detail.append(['ExcludedFromBuild', {'Condition': condition}, 'true']) # Add precompile if needed for config_name, configuration in spec['configurations'].iteritems(): precompiled_source = configuration.get('msvs_precompiled_source', '') if precompiled_source != '': precompiled_source = _FixPath(precompiled_source) if not extensions_excluded_from_precompile: # If the precompiled header is generated by a C source, we must # not try to use it for C++ sources, and vice versa. basename, extension = os.path.splitext(precompiled_source) if extension == '.c': extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx'] else: extensions_excluded_from_precompile = ['.c'] if precompiled_source == source: condition = _GetConfigurationCondition(config_name, configuration) detail.append(['PrecompiledHeader', {'Condition': condition}, 'Create' ]) else: # Turn off precompiled header usage for source files of a # different type than the file that generated the # precompiled header. for extension in extensions_excluded_from_precompile: if source.endswith(extension): detail.append(['PrecompiledHeader', '']) detail.append(['ForcedIncludeFiles', '']) group, element = _MapFileToMsBuildSourceType(source, rule_dependencies, extension_to_rule_name) grouped_sources[group].append([element, {'Include': source}] + detail) def _GetMSBuildProjectReferences(project): references = [] if project.dependencies: group = ['ItemGroup'] for dependency in project.dependencies: guid = dependency.guid project_dir = os.path.split(project.path)[0] relative_path = gyp.common.RelativePath(dependency.path, project_dir) project_ref = ['ProjectReference', {'Include': relative_path}, ['Project', guid], ['ReferenceOutputAssembly', 'false'] ] for config in dependency.spec.get('configurations', {}).itervalues(): # If it's disabled in any config, turn it off in the reference. if config.get('msvs_2010_disable_uldi_when_referenced', 0): project_ref.append(['UseLibraryDependencyInputs', 'false']) break group.append(project_ref) references.append(group) return references def _GenerateMSBuildProject(project, options, version, generator_flags): spec = project.spec configurations = spec['configurations'] project_dir, project_file_name = os.path.split(project.path) gyp.common.EnsureDirExists(project.path) # Prepare list of sources and excluded sources. gyp_path = _NormalizedSource(project.build_file) relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir) gyp_file = os.path.split(project.build_file)[1] sources, excluded_sources = _PrepareListOfSources(spec, generator_flags, gyp_file) # Add rules. actions_to_add = {} props_files_of_rules = set() targets_files_of_rules = set() rule_dependencies = set() extension_to_rule_name = {} list_excluded = generator_flags.get('msvs_list_excluded_files', True) # Don't generate rules if we are using an external builder like ninja. if not spec.get('msvs_external_builder'): _GenerateRulesForMSBuild(project_dir, options, spec, sources, excluded_sources, props_files_of_rules, targets_files_of_rules, actions_to_add, rule_dependencies, extension_to_rule_name) else: rules = spec.get('rules', []) _AdjustSourcesForRules(rules, sources, excluded_sources, True) sources, excluded_sources, excluded_idl = ( _AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir, sources, excluded_sources, list_excluded, version)) # Don't add actions if we are using an external builder like ninja. if not spec.get('msvs_external_builder'): _AddActions(actions_to_add, spec, project.build_file) _AddCopies(actions_to_add, spec) # NOTE: this stanza must appear after all actions have been decided. # Don't excluded sources with actions attached, or they won't run. excluded_sources = _FilterActionsFromExcluded( excluded_sources, actions_to_add) exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl) actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild( spec, actions_to_add) _GenerateMSBuildFiltersFile(project.path + '.filters', sources, rule_dependencies, extension_to_rule_name) missing_sources = _VerifySourcesExist(sources, project_dir) for configuration in configurations.itervalues(): _FinalizeMSBuildSettings(spec, configuration) # Add attributes to root element import_default_section = [ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]] import_cpp_props_section = [ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]] import_cpp_targets_section = [ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]] import_masm_props_section = [ ['Import', {'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]] import_masm_targets_section = [ ['Import', {'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]] macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]] content = [ 'Project', {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003', 'ToolsVersion': version.ProjectVersion(), 'DefaultTargets': 'Build' }] content += _GetMSBuildProjectConfigurations(configurations) content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name) content += import_default_section content += _GetMSBuildConfigurationDetails(spec, project.build_file) if spec.get('msvs_enable_winphone'): content += _GetMSBuildLocalProperties('v120_wp81') else: content += _GetMSBuildLocalProperties(project.msbuild_toolset) content += import_cpp_props_section content += import_masm_props_section content += _GetMSBuildExtensions(props_files_of_rules) content += _GetMSBuildPropertySheets(configurations) content += macro_section content += _GetMSBuildConfigurationGlobalProperties(spec, configurations, project.build_file) content += _GetMSBuildToolSettingsSections(spec, configurations) content += _GetMSBuildSources( spec, sources, exclusions, rule_dependencies, extension_to_rule_name, actions_spec, sources_handled_by_action, list_excluded) content += _GetMSBuildProjectReferences(project) content += import_cpp_targets_section content += import_masm_targets_section content += _GetMSBuildExtensionTargets(targets_files_of_rules) if spec.get('msvs_external_builder'): content += _GetMSBuildExternalBuilderTargets(spec) # TODO(jeanluc) File a bug to get rid of runas. We had in MSVS: # has_run_as = _WriteMSVSUserFile(project.path, version, spec) easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True) return missing_sources def _GetMSBuildExternalBuilderTargets(spec): """Return a list of MSBuild targets for external builders. The "Build" and "Clean" targets are always generated. If the spec contains 'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also be generated, to support building selected C/C++ files. Arguments: spec: The gyp target spec. Returns: List of MSBuild 'Target' specs. """ build_cmd = _BuildCommandLineForRuleRaw( spec, spec['msvs_external_builder_build_cmd'], False, False, False, False) build_target = ['Target', {'Name': 'Build'}] build_target.append(['Exec', {'Command': build_cmd}]) clean_cmd = _BuildCommandLineForRuleRaw( spec, spec['msvs_external_builder_clean_cmd'], False, False, False, False) clean_target = ['Target', {'Name': 'Clean'}] clean_target.append(['Exec', {'Command': clean_cmd}]) targets = [build_target, clean_target] if spec.get('msvs_external_builder_clcompile_cmd'): clcompile_cmd = _BuildCommandLineForRuleRaw( spec, spec['msvs_external_builder_clcompile_cmd'], False, False, False, False) clcompile_target = ['Target', {'Name': 'ClCompile'}] clcompile_target.append(['Exec', {'Command': clcompile_cmd}]) targets.append(clcompile_target) return targets def _GetMSBuildExtensions(props_files_of_rules): extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}] for props_file in props_files_of_rules: extensions.append(['Import', {'Project': props_file}]) return [extensions] def _GetMSBuildExtensionTargets(targets_files_of_rules): targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}] for targets_file in sorted(targets_files_of_rules): targets_node.append(['Import', {'Project': targets_file}]) return [targets_node] def _GenerateActionsForMSBuild(spec, actions_to_add): """Add actions accumulated into an actions_to_add, merging as needed. Arguments: spec: the target project dict actions_to_add: dictionary keyed on input name, which maps to a list of dicts describing the actions attached to that input file. Returns: A pair of (action specification, the sources handled by this action). """ sources_handled_by_action = OrderedSet() actions_spec = [] for primary_input, actions in actions_to_add.iteritems(): inputs = OrderedSet() outputs = OrderedSet() descriptions = [] commands = [] for action in actions: inputs.update(OrderedSet(action['inputs'])) outputs.update(OrderedSet(action['outputs'])) descriptions.append(action['description']) cmd = action['command'] # For most actions, add 'call' so that actions that invoke batch files # return and continue executing. msbuild_use_call provides a way to # disable this but I have not seen any adverse effect from doing that # for everything. if action.get('msbuild_use_call', True): cmd = 'call ' + cmd commands.append(cmd) # Add the custom build action for one input file. description = ', and also '.join(descriptions) # We can't join the commands simply with && because the command line will # get too long. See also _AddActions: cygwin's setup_env mustn't be called # for every invocation or the command that sets the PATH will grow too # long. command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%' for c in commands]) _AddMSBuildAction(spec, primary_input, inputs, outputs, command, description, sources_handled_by_action, actions_spec) return actions_spec, sources_handled_by_action def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description, sources_handled_by_action, actions_spec): command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd) primary_input = _FixPath(primary_input) inputs_array = _FixPaths(inputs) outputs_array = _FixPaths(outputs) additional_inputs = ';'.join([i for i in inputs_array if i != primary_input]) outputs = ';'.join(outputs_array) sources_handled_by_action.add(primary_input) action_spec = ['CustomBuild', {'Include': primary_input}] action_spec.extend( # TODO(jeanluc) 'Document' for all or just if as_sources? [['FileType', 'Document'], ['Command', command], ['Message', description], ['Outputs', outputs] ]) if additional_inputs: action_spec.append(['AdditionalInputs', additional_inputs]) actions_spec.append(action_spec)
mit
sandhujasmine/conda-build
conda_build/main_inspect.py
3
14583
# (c) Continuum Analytics, Inc. / http://continuum.io # All Rights Reserved # # conda is distributed under the terms of the BSD 3-clause license. # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. from __future__ import absolute_import, division, print_function import sys import re import os from os.path import abspath, join, dirname, exists, basename from collections import defaultdict from operator import itemgetter from conda.misc import which_package from conda.cli.common import add_parser_prefix, get_prefix, InstalledPackages from conda.cli.conda_argparse import ArgumentParser import conda.install as ci from conda.api import get_index from conda.cli.install import check_install from conda.config import get_default_urls, normalize_urls from conda_build.main_build import args_func from conda_build.ldd import get_linkages, get_package_obj_files, get_untracked_obj_files from conda_build.macho import get_rpaths, human_filetype from conda_build.utils import groupby, getter, comma_join def main(): p = ArgumentParser( description='Tools for inspecting conda packages.', epilog=""" Run --help on the subcommands like 'conda inspect linkages --help' to see the options available. """, ) subcommand = p.add_subparsers( dest='subcommand', ) linkages_help = """ Investigates linkages of binary libraries in a package (works in Linux and OS X). This is an advanced command to aid building packages that link against C libraries. Aggregates the output of ldd (on Linux) and otool -L (on OS X) by dependent packages. Useful for finding broken links, or links against system libraries that ought to be dependent conda packages. """ linkages = subcommand.add_parser( "linkages", # help controls conda inspect -h and description controls conda # inspect linkages -h help=linkages_help, description=linkages_help, ) linkages.add_argument( 'packages', action='store', nargs='*', help='Conda packages to inspect.', ).completer=InstalledPackages linkages.add_argument( '--untracked', action='store_true', help="""Inspect the untracked files in the environment. This is useful when used in conjunction with conda build --build-only.""", ) linkages.add_argument( '--show-files', action="store_true", help="Show the files in the package that link to each library", ) linkages.add_argument( '--groupby', action='store', default='package', choices=('package', 'dependency'), help="""Attribute to group by (default: %(default)s). Useful when used in conjunction with --all.""", ) linkages.add_argument( '--all', action='store_true', help="Generate a report for all packages in the environment.", ) add_parser_prefix(linkages) objects_help = """ Investigate binary object files in a package (only works on OS X). This is an advanced command to aid building packages that have compiled libraries. Aggregates the output of otool on all the binary object files in a package. """ objects = subcommand.add_parser( "objects", help=objects_help, description=objects_help, ) objects.add_argument( 'packages', action='store', nargs='*', help='Conda packages to inspect.', ).completer=InstalledPackages objects.add_argument( '--untracked', action='store_true', help="""Inspect the untracked files in the environment. This is useful when used in conjunction with conda build --build-only.""", ) # TODO: Allow groupby to include the package (like for --all) objects.add_argument( '--groupby', action='store', default='filename', choices=('filename', 'filetype', 'rpath'), help='Attribute to group by (default: %(default)s).', ) objects.add_argument( '--all', action='store_true', help="Generate a report for all packages in the environment.", ) add_parser_prefix(objects) channels_help = """ Tools for investigating conda channels. """ channels = subcommand.add_parser( "channels", help=channels_help, description=channels_help, ) channels.add_argument( '--verbose', action='store_true', help="""Show verbose output. Note that error output to stderr will always be shown regardless of this flag. """, ) channels.add_argument( '--test-installable', '-t', action='store_true', help="""Test every package in the channel to see if it is installable by conda.""", ) channels.add_argument( "channel", nargs='?', default="defaults", help="The channel to test. The default is %(default)s." ) p.set_defaults(func=execute) args = p.parse_args() args_func(args, p) def print_linkages(depmap, show_files=False): # Print system and not found last k = sorted(set(depmap.keys()) - {'system', 'not found'}) all_deps = k if 'not found' not in depmap.keys() else k + ['system', 'not found'] for dep in all_deps: print("%s:" % dep) if show_files: for lib, path, binary in sorted(depmap[dep]): print(" %s (%s) from %s" % (lib, path, binary)) else: for lib, path in sorted(set(map(itemgetter(0, 1), depmap[dep]))): print(" %s (%s)" % (lib, path)) print() def replace_path(binary, path, prefix): if sys.platform.startswith('linux'): return abspath(path) elif sys.platform.startswith('darwin'): if path == basename(binary): return abspath(join(prefix, binary)) if '@rpath' in path: rpaths = get_rpaths(join(prefix, binary)) if not rpaths: return "NO LC_RPATH FOUND" else: for rpath in rpaths: path1 = path.replace("@rpath", rpath) path1 = path1.replace('@loader_path', join(prefix, dirname(binary))) if exists(abspath(join(prefix, path1))): path = path1 break else: return 'not found' path = path.replace('@loader_path', join(prefix, dirname(binary))) if path.startswith('/'): return abspath(path) return 'not found' def print_object_info(info, key): gb = groupby(key, info) for header in sorted(gb, key=str): print(header) for f_info in sorted(gb[header], key=getter('filename')): for data in sorted(f_info): if data == key: continue if f_info[data] is None: continue print(' %s: %s' % (data, f_info[data])) if len([i for i in f_info if f_info[i] is not None and i != key]) > 1: print() print() class _untracked_package: def __str__(self): return "<untracked>" untracked_package = _untracked_package() def test_installable(channel='defaults', verbose=True): if not verbose: sys.stdout = open(os.devnull, 'w') success = False has_py = re.compile(r'py(\d)(\d)') for platform in ['osx-64', 'linux-32', 'linux-64', 'win-32', 'win-64']: print("######## Testing platform %s ########" % platform) channels = [channel] + get_default_urls() index = get_index(channel_urls=channels, prepend=False, platform=platform) for package in sorted(index): if channel != 'defaults': # If we give channels at the command line, only look at # packages from those channels (not defaults). if index[package]['channel'] not in normalize_urls([channel], platform=platform): continue name, version, build = package.rsplit('.tar.bz2', 1)[0].rsplit('-', 2) if name in {'conda', 'conda-build'}: # conda can only be installed in the root environment continue # Don't fail just because the package is a different version of Python # than the default. We should probably check depends rather than the # build string. match = has_py.search(build) assert match if 'py' in build else True, build if match: additional_packages = ['python=%s.%s' % (match.group(1), match.group(2))] else: additional_packages = [] print('Testing %s=%s' % (name, version)) # if additional_packages: # print("Including %s" % additional_packages[0]) try: check_install([name + '=' + version] + additional_packages, channel_urls=channels, prepend=False, platform=platform) except KeyboardInterrupt: raise # sys.exit raises an exception that doesn't subclass from Exception except BaseException as e: success = True print("FAIL: %s %s on %s with %s (%s)" % (name, version, platform, additional_packages, e), file=sys.stderr) return success def execute(args, parser): if not args.subcommand: parser.print_help() exit() if args.subcommand == 'channels': if not args.test_installable: parser.error("At least one option (--test-installable) is required.") else: sys.exit(not test_installable(channel=args.channel, verbose=args.verbose)) prefix = get_prefix(args) installed = ci.linked(prefix) if not args.packages and not args.untracked and not args.all: parser.error("At least one package or --untracked or --all must be provided") if args.all: args.packages = sorted([i.rsplit('-', 2)[0] for i in installed]) if args.untracked: args.packages.append(untracked_package) if args.subcommand == 'linkages': pkgmap = {} for pkg in args.packages: if pkg == untracked_package: dist = untracked_package else: for dist in installed: if pkg == dist.rsplit('-', 2)[0]: break else: sys.exit("Package %s is not installed in %s" % (pkg, prefix)) if not sys.platform.startswith(('linux', 'darwin')): sys.exit("Error: conda inspect linkages is only implemented in Linux and OS X") if dist == untracked_package: obj_files = get_untracked_obj_files(prefix) else: obj_files = get_package_obj_files(dist, prefix) linkages = get_linkages(obj_files, prefix) depmap = defaultdict(list) pkgmap[pkg] = depmap depmap['not found'] = [] for binary in linkages: for lib, path in linkages[binary]: path = replace_path(binary, path, prefix) if path not in {'', 'not found'} else path if path.startswith(prefix): deps = list(which_package(path)) if len(deps) > 1: print("Warning: %s comes from multiple packages: %s" % (path, comma_join(deps)), file=sys.stderr) if not deps: if exists(path): depmap['untracked'].append((lib, path.split(prefix + '/', 1)[-1], binary)) else: depmap['not found'].append((lib, path.split(prefix + '/', 1)[-1], binary)) for d in deps: depmap[d].append((lib, path.split(prefix + '/', 1)[-1], binary)) elif path == 'not found': depmap['not found'].append((lib, path, binary)) else: depmap['system'].append((lib, path, binary)) if args.groupby == 'package': for pkg in args.packages: print(pkg) print('-'*len(str(pkg))) print() print_linkages(pkgmap[pkg], show_files=args.show_files) elif args.groupby == 'dependency': # {pkg: {dep: [files]}} -> {dep: {pkg: [files]}} inverted_map = defaultdict(lambda: defaultdict(list)) for pkg in pkgmap: for dep in pkgmap[pkg]: if pkgmap[pkg][dep]: inverted_map[dep][pkg] = pkgmap[pkg][dep] # print system and not found last k = sorted(set(inverted_map.keys()) - {'system', 'not found'}) for dep in k + ['system', 'not found']: print(dep) print('-'*len(str(dep))) print() print_linkages(inverted_map[dep], show_files=args.show_files) else: raise ValueError("Unrecognized groupby: %s" % args.groupby) if args.subcommand == 'objects': for pkg in args.packages: if pkg == untracked_package: dist = untracked_package else: for dist in installed: if pkg == dist.rsplit('-', 2)[0]: break else: sys.exit("Package %s is not installed in %s" % (pkg, prefix)) print(pkg) print('-'*len(str(pkg))) print() if not sys.platform.startswith('darwin'): sys.exit("Error: conda inspect objects is only implemented in OS X") if dist == untracked_package: obj_files = get_untracked_obj_files(prefix) else: obj_files = get_package_obj_files(dist, prefix) info = [] for f in obj_files: f_info = {} path = join(prefix, f) f_info['filetype'] = human_filetype(path) f_info['rpath'] = ':'.join(get_rpaths(path)) f_info['filename'] = f info.append(f_info) print_object_info(info, args.groupby)
bsd-3-clause
NECCSiPortal/NECCSPortal-dashboard
nec_portal/dashboards/admin/capacity/tests.py
1
4009
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from django.core.urlresolvers import reverse from mox3.mox import IsA # noqa from openstack_dashboard.api import nova from openstack_dashboard.test import helpers as test from openstack_dashboard.test.test_data import utils as test_utils from nec_portal.dashboards.admin.capacity import panel # noqa INDEX_URL = reverse('horizon:admin:capacity:index') class CapacityViewTests(test.BaseAdminViewTests): """A test of the screen of capacity's index. CheckPoint 1. A expected template is used. """ def test_capacity(self): self.mox.ReplayAll() res = self.client.get(INDEX_URL) self.assertTemplateUsed(res, 'admin/capacity/capacity/_index.html') class CapacityAZTabTests(test.BaseAdminViewTests): """A test of the screen of capacity's az tab. CheckPoint 1. A expected template is used. CheckPoint 2. A expected context is returned. """ TEST_GROUP = 'test_az_group' TEST_NAME = 'test.az:name' CONTEXT_GROUP = 'az' def setUp(self): test.BaseAdminViewTests.setUp(self) self.testdata = test_utils.TestData() test_utils.load_test_data(self.testdata) @mock.patch('novaclient.v2.client.Client') @test.create_stubs({nova: ('availability_zone_list',), }) def test_capacity_az(self, request): self.mox.ReplayAll() url_param = '?group=' + self.TEST_GROUP + '&name=' + self.TEST_NAME nova.novaclient(self.request).availability_zones = \ self.availability_zones availability_zone_list = self.availability_zones.list() for az in availability_zone_list: if not az.zoneName == 'internal': context_name = az.zoneName break context_url = './capacity_az/detail?group=' + self.CONTEXT_GROUP + \ '&name=' + context_name res = self.client.get(INDEX_URL + url_param + '&tab=capacity_group_tabs__capacity_az') self.assertTemplateUsed(res, 'admin/capacity/capacity_az/_index.html') self.assertEqual(res.context['detail_url'], context_url) class CapacityHostTabTests(test.BaseAdminViewTests): """A test of the screen of capacity's host tab. CheckPoint 1. A expected template is used. CheckPoint 2. A expected context is returned. """ TEST_GROUP = 'test_host_group' TEST_NAME = 'test_host,name' CONTEXT_GROUP = 'host' def setUp(self): test.BaseAdminViewTests.setUp(self) self.testdata = test_utils.TestData() test_utils.load_test_data(self.testdata) @mock.patch('novaclient.v2.client.Client') @test.create_stubs({nova: ('hypervisor_list',), }) def test_capacity_host(self, request): self.mox.ReplayAll() url_param = '?group=' + self.TEST_GROUP + '&name=' + self.TEST_NAME nova.novaclient(self.request).hypervisors = self.hypervisors hypervisor_list = self.hypervisors.list() context_name = hypervisor_list[0].hypervisor_hostname context_url = './capacity_host/detail?group=' + self.CONTEXT_GROUP + \ '&name=' + context_name res = self.client.get(INDEX_URL + url_param + '&tab=capacity_group_tabs__capacity_host') self.assertTemplateUsed(res, 'admin/capacity/capacity_host/_index.html') self.assertEqual(res.context['detail_url'], context_url)
apache-2.0
paul-xxx/micropython
drivers/sdcard/sdcard.py
33
5777
""" Micro Python driver for SD cards using SPI bus. Requires an SPI bus and a CS pin. Provides readblocks and writeblocks methods so the device can be mounted as a filesystem. Example usage: import pyb, sdcard, os sd = sdcard.SDCard(pyb.SPI(1), pyb.Pin.board.X5) pyb.mount(sd, '/sd2') os.listdir('/') """ import pyb class SDCard: CMD_TIMEOUT = const(100) R1_IDLE_STATE = const(1 << 0) #R1_ERASE_RESET = const(1 << 1) R1_ILLEGAL_COMMAND = const(1 << 2) #R1_COM_CRC_ERROR = const(1 << 3) #R1_ERASE_SEQUENCE_ERROR = const(1 << 4) #R1_ADDRESS_ERROR = const(1 << 5) #R1_PARAMETER_ERROR = const(1 << 6) def __init__(self, spi, cs): self.spi = spi self.cs = cs self.cmdbuf = bytearray(6) self.dummybuf = bytearray(512) for i in range(512): self.dummybuf[i] = 0xff self.dummybuf_memoryview = memoryview(self.dummybuf) # initialise the card self.init_card() def init_card(self): # init CS pin self.cs.high() self.cs.init(self.cs.OUT_PP) # init SPI bus; use low data rate for initialisation self.spi.init(self.spi.MASTER, baudrate=100000, phase=0, polarity=0) # clock card at least 100 cycles with cs high for i in range(16): self.spi.send(0xff) # CMD0: init card; should return R1_IDLE_STATE (allow 2 attempts) if self.cmd(0, 0, 0x95) != R1_IDLE_STATE: if self.cmd(0, 0, 0x95) != R1_IDLE_STATE: raise OSError("no SD card") # CMD8: determine card version r = self.cmd(8, 0x01aa, 0x87, 4) if r == R1_IDLE_STATE: self.init_card_v2() elif r == (R1_IDLE_STATE | R1_ILLEGAL_COMMAND): self.init_card_v1() else: raise OSError("couldn't determine SD card version") # get the number of sectors # CMD9: response R2 (R1 byte + 16-byte block read) if self.cmd(9, 0, 0, 0, False) != 0: raise OSError("no response from SD card") csd = bytearray(16) self.readinto(csd) if csd[0] & 0xc0 != 0x40: raise OSError("SD card CSD format not supported") self.sectors = ((csd[8] << 8 | csd[9]) + 1) * 2014 #print('sectors', self.sectors) # CMD16: set block length to 512 bytes if self.cmd(16, 512, 0) != 0: raise OSError("can't set 512 block size") # set to high data rate now that it's initialised self.spi.init(self.spi.MASTER, baudrate=1320000, phase=0, polarity=0) def init_card_v1(self): for i in range(CMD_TIMEOUT): self.cmd(55, 0, 0) if self.cmd(41, 0, 0) == 0: self.cdv = 512 #print("[SDCard] v1 card") return raise OSError("timeout waiting for v1 card") def init_card_v2(self): for i in range(CMD_TIMEOUT): pyb.delay(50) self.cmd(58, 0, 0, 4) self.cmd(55, 0, 0) if self.cmd(41, 0x40000000, 0) == 0: self.cmd(58, 0, 0, 4) self.cdv = 1 #print("[SDCard] v2 card") return raise OSError("timeout waiting for v2 card") def cmd(self, cmd, arg, crc, final=0, release=True): self.cs.low() # create and send the command buf = self.cmdbuf buf[0] = 0x40 | cmd buf[1] = arg >> 24 buf[2] = arg >> 16 buf[3] = arg >> 8 buf[4] = arg buf[5] = crc self.spi.send(buf) # wait for the repsonse (response[7] == 0) for i in range(CMD_TIMEOUT): response = self.spi.send_recv(0xff)[0] if not (response & 0x80): # this could be a big-endian integer that we are getting here for j in range(final): self.spi.send(0xff) if release: self.cs.high() self.spi.send(0xff) return response # timeout self.cs.high() self.spi.send(0xff) return -1 def readinto(self, buf): self.cs.low() # read until start byte (0xff) while self.spi.send_recv(0xff)[0] != 0xfe: pass # read data mv = self.dummybuf_memoryview[:len(buf)] self.spi.send_recv(mv, recv=buf) # read checksum self.spi.send(0xff) self.spi.send(0xff) self.cs.high() self.spi.send(0xff) def write(self, buf): self.cs.low() # send: start of block, data, checksum self.spi.send(0xfe) self.spi.send(buf) self.spi.send(0xff) self.spi.send(0xff) # check the response if (self.spi.send_recv(0xff)[0] & 0x1f) != 0x05: self.cs.high() self.spi.send(0xff) return # wait for write to finish while self.spi.send_recv(0xff)[0] == 0: pass self.cs.high() self.spi.send(0xff) def count(self): return self.sectors def readblocks(self, block_num, buf): # TODO support multiple block reads assert len(buf) == 512 # CMD17: set read address for single block if self.cmd(17, block_num * self.cdv, 0) != 0: return 1 # receive the data self.readinto(buf) return 0 def writeblocks(self, block_num, buf): # TODO support multiple block writes assert len(buf) == 512 # CMD24: set write address for single block if self.cmd(24, block_num * self.cdv, 0) != 0: return 1 # send the data self.write(buf) return 0
mit
XiaodunServerGroup/ddyedx
lms/djangoapps/analytics/basic.py
30
3759
""" Student and course analytics. Serve miscellaneous course and student data """ from django.contrib.auth.models import User import xmodule.graders as xmgraders STUDENT_FEATURES = ('username', 'first_name', 'last_name', 'is_staff', 'email') PROFILE_FEATURES = ('name', 'language', 'location', 'year_of_birth', 'gender', 'level_of_education', 'mailing_address', 'goals') AVAILABLE_FEATURES = STUDENT_FEATURES + PROFILE_FEATURES def enrolled_students_features(course_id, features): """ Return list of student features as dictionaries. enrolled_students_features(course_id, ['username, first_name']) would return [ {'username': 'username1', 'first_name': 'firstname1'} {'username': 'username2', 'first_name': 'firstname2'} {'username': 'username3', 'first_name': 'firstname3'} ] """ students = User.objects.filter( courseenrollment__course_id=course_id, courseenrollment__is_active=1, ).order_by('username').select_related('profile') def extract_student(student, features): """ convert student to dictionary """ student_features = [x for x in STUDENT_FEATURES if x in features] profile_features = [x for x in PROFILE_FEATURES if x in features] student_dict = dict((feature, getattr(student, feature)) for feature in student_features) profile = student.profile if profile is not None: profile_dict = dict((feature, getattr(profile, feature)) for feature in profile_features) student_dict.update(profile_dict) return student_dict return [extract_student(student, features) for student in students] def dump_grading_context(course): """ Render information about course grading context (e.g. which problems are graded in what assignments) Useful for debugging grading_policy.json and policy.json Returns HTML string """ hbar = "{}\n".format("-" * 77) msg = hbar msg += "Course grader:\n" msg += '%s\n' % course.grader.__class__ graders = {} if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader): msg += '\n' msg += "Graded sections:\n" for subgrader, category, weight in course.grader.sections: msg += " subgrader=%s, type=%s, category=%s, weight=%s\n"\ % (subgrader.__class__, subgrader.type, category, weight) subgrader.index = 1 graders[subgrader.type] = subgrader msg += hbar msg += "Listing grading context for course %s\n" % course.id gcontext = course.grading_context msg += "graded sections:\n" msg += '%s\n' % gcontext['graded_sections'].keys() for (gsomething, gsvals) in gcontext['graded_sections'].items(): msg += "--> Section %s:\n" % (gsomething) for sec in gsvals: sdesc = sec['section_descriptor'] frmat = getattr(sdesc, 'format', None) aname = '' if frmat in graders: gform = graders[frmat] aname = '%s %02d' % (gform.short_label, gform.index) gform.index += 1 elif sdesc.display_name in graders: gform = graders[sdesc.display_name] aname = '%s' % gform.short_label notes = '' if getattr(sdesc, 'score_by_attempt', False): notes = ', score by attempt!' msg += " %s (format=%s, Assignment=%s%s)\n"\ % (sdesc.display_name, frmat, aname, notes) msg += "all descriptors:\n" msg += "length=%d\n" % len(gcontext['all_descriptors']) msg = '<pre>%s</pre>' % msg.replace('<', '&lt;') return msg
agpl-3.0
honghaoz/UW-Info-Session-iOS
UW-Info-Session-1.0/GAE Support/uw-info2/libs/requests/packages/chardet/cp949prober.py
2801
1782
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis from .mbcssm import CP949SMModel class CP949Prober(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(CP949SMModel) # NOTE: CP949 is a superset of EUC-KR, so the distribution should be # not different. self._mDistributionAnalyzer = EUCKRDistributionAnalysis() self.reset() def get_charset_name(self): return "CP949"
gpl-3.0
mtlchun/edx
pavelib/utils/test/suites/bokchoy_suite.py
7
4970
""" Class used for defining and running Bok Choy acceptance test suite """ from paver.easy import sh from pavelib.utils.test.suites import TestSuite from pavelib.utils.envs import Env from pavelib.utils.test import bokchoy_utils from pavelib.utils.test import utils as test_utils try: from pygments.console import colorize except ImportError: colorize = lambda color, text: text # pylint: disable-msg=invalid-name __test__ = False # do not collect class BokChoyTestSuite(TestSuite): """ TestSuite for running Bok Choy tests Properties (below is a subset): test_dir - parent directory for tests log_dir - directory for test output report_dir - directory for reports (e.g., coverage) related to test execution xunit_report - directory for xunit-style output (xml) fasttest - when set, skip various set-up tasks (e.g., DB migrations) test_spec - when set, specifies test files, classes, cases, etc. See platform doc. default_store - modulestore to use when running tests (split or draft) """ def __init__(self, *args, **kwargs): super(BokChoyTestSuite, self).__init__(*args, **kwargs) self.test_dir = Env.BOK_CHOY_DIR / kwargs.get('test_dir', 'tests') self.log_dir = Env.BOK_CHOY_LOG_DIR self.report_dir = Env.BOK_CHOY_REPORT_DIR self.xunit_report = self.report_dir / "xunit.xml" self.cache = Env.BOK_CHOY_CACHE self.fasttest = kwargs.get('fasttest', False) self.test_spec = kwargs.get('test_spec', None) self.default_store = kwargs.get('default_store', None) self.verbosity = kwargs.get('verbosity', 2) self.extra_args = kwargs.get('extra_args', '') self.ptests = kwargs.get('ptests', False) self.har_dir = self.log_dir / 'hars' self.imports_dir = kwargs.get('imports_dir', None) def __enter__(self): super(BokChoyTestSuite, self).__enter__() # Ensure that we have a directory to put logs and reports self.log_dir.makedirs_p() self.har_dir.makedirs_p() self.report_dir.makedirs_p() test_utils.clean_reports_dir() if not self.skip_clean: test_utils.clean_test_files() msg = colorize('green', "Checking for mongo, memchache, and mysql...") print(msg) bokchoy_utils.check_services() sh("{}/scripts/reset-test-db.sh".format(Env.REPO_ROOT)) if not self.fasttest: # Process assets and set up database for bok-choy tests # Reset the database # Collect static assets sh("paver update_assets --settings=bok_choy") # Clear any test data already in Mongo or MySQLand invalidate # the cache bokchoy_utils.clear_mongo() self.cache.flush_all() sh( "DEFAULT_STORE={default_store}" " ./manage.py lms --settings bok_choy loaddata --traceback" " common/test/db_fixtures/*.json".format( default_store=self.default_store, ) ) if self.imports_dir: sh( "DEFAULT_STORE={default_store}" " ./manage.py cms --settings=bok_choy import {import_dir}".format( default_store=self.default_store, import_dir=self.imports_dir ) ) # Ensure the test servers are available msg = colorize('green', "Starting test servers...") print(msg) bokchoy_utils.start_servers(self.default_store) msg = colorize('green', "Waiting for servers to start...") print(msg) bokchoy_utils.wait_for_test_servers() def __exit__(self, exc_type, exc_value, traceback): super(BokChoyTestSuite, self).__exit__(exc_type, exc_value, traceback) msg = colorize('green', "Cleaning up databases...") print(msg) # Clean up data we created in the databases sh("./manage.py lms --settings bok_choy flush --traceback --noinput") bokchoy_utils.clear_mongo() @property def cmd(self): # Default to running all tests if no specific test is specified if not self.test_spec: test_spec = self.test_dir else: test_spec = self.test_dir / self.test_spec # Construct the nosetests command, specifying where to save # screenshots and XUnit XML reports cmd = [ "DEFAULT_STORE={}".format(self.default_store), "SCREENSHOT_DIR='{}'".format(self.log_dir), "BOK_CHOY_HAR_DIR='{}'".format(self.har_dir), "SELENIUM_DRIVER_LOG_DIR='{}'".format(self.log_dir), "nosetests", test_spec, "--with-xunit", "--xunit-file={}".format(self.xunit_report), "--verbosity={}".format(self.verbosity), self.extra_args, ] cmd = (" ").join(cmd) return cmd
agpl-3.0
melviso/phycpp
beatle/model/py/KwArgsArgument.py
2
1396
# -*- coding: utf-8 -*- """ Created on Sun Dec 22 22:31:28 2013 @author: mel """ from beatle.tran import TransactionalMethod, TransactionalMoveObject from Argument import Argument class KwArgsArgument(Argument): """Implements argument representation""" context_container = True #visual methods @TransactionalMethod('move argument {0}') def drop(self, to): """drop this elemento to another place""" target = to.inner_argument_container if not target or to.project != self.project: return False # avoid move arguments between projects index = 0 # trick for insert as first child TransactionalMoveObject( object=self, origin=self.parent, target=target, index=index) return True def __init__(self, **kwargs): """Initialization""" kwargs['name'] = 'kwargs' kwargs['default'] = '' super(KwArgsArgument, self).__init__(**kwargs) container = self.outer_class or self.outer_module container._lastSrcTime = None container._lastHdrTime = None @property def bitmap_index(self): """Index of tree image""" from beatle.app import resources as rc return rc.GetBitmapIndex('py_kwargs') #return 5 @property def label(self): """Get tree label""" return '**{self._name}'.format(self=self)
gpl-2.0
djmitche/build-relengapi
relengapi/lib/memcached.py
3
6350
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import absolute_import import contextlib import socket import threading import time import urlparse import elasticache_auto_discovery import memcache import structlog logger = structlog.get_logger() class BaseCacheFinder(object): def __init__(self): self.lock = threading.Lock() self._values = {} # format is up to the subclass def acquire_cache(self, config): '''Lock and return a cache client with the given config. Returns a cache and a "cookie" which can later be used to release the cache.''' def release_cache(self, cookie): '''Release the cache client with the given cookie''' def _value_for_config(self, config): '''Make a new value for self._get; called with the finder lock held.''' def _get(self, name, config=None): '''Get, making if necessary, a new value for the given name and configuration. If `config` is omitted, then the value must already exist.''' with self.lock: try: return self._values[name] except KeyError: assert config is not None rv = self._values[name] = self._value_for_config(config) return rv class MockCacheFinder(BaseCacheFinder): def _value_for_config(self, config): import mockcache return mockcache.Client(), threading.Lock() def acquire_cache(self, config): mc, lock = self._get(config, config) # wait forever for the cache to become available, since the data is all # stored inside it. lock.acquire() return mc, config def release_cache(self, cookie): _, lock = self._get(cookie) lock.release() class MemcachedCacheFinder(BaseCacheFinder): def _value_for_config(self, config): # an empty list of wrappers; acquire_cache will add wrappers to # this list as necessary return [] def acquire_cache(self, config): name = str(config) wrappers = self._get(name, config) # find an unlocked cache (again under the finder lock) with self.lock: for i in xrange(len(wrappers)): if wrappers[i].lock.acquire(False): cookie = name, i return wrappers[i].checkout(), cookie # or make a new one (note that these are never collected) else: cookie = name, len(wrappers) wrapper = self.client_wrapper_class(config) wrappers.append(wrapper) wrapper.lock.acquire() return wrapper.checkout(), cookie def release_cache(self, cookie): name, i = cookie wrappers = self._get(name) wrappers[i].lock.release() class ClientWrapper(object): def __init__(self, config): self.lock = threading.Lock() def checkout(self): '''Get the actual memcached.Client object''' class DirectCacheClientWrapper(ClientWrapper): def __init__(self, config): super(DirectCacheClientWrapper, self).__init__(config) self.client = memcache.Client(config) def checkout(self): return self.client class DirectCacheFinder(MemcachedCacheFinder): client_wrapper_class = DirectCacheClientWrapper class ElastiCacheClientWrapper(ClientWrapper): POLL_INTERVAL = 600 # check for config updates every ten minutes def __init__(self, config): super(ElastiCacheClientWrapper, self).__init__(config) self.elasticache_config = config self.last_memcache_config = None self.last_poll = time.time() memcache_config = self.get_memcache_config() self.client = memcache.Client(memcache_config) def get_memcache_config(self): '''Try to get the config from Amazon in a short time. If this fails, fall back to the existing config, or if there is none, to a dummy connection to localhost''' have_config = self.last_memcache_config is not None timeout = 0.5 if have_config else 5.0 try: discovered = elasticache_auto_discovery.discover( self.elasticache_config, time_to_timeout=timeout) except socket.error: logger.warning('Could not fetch ElastiCache configuration for %s' % self.elasticache_config, exc_info=True) if have_config: return self.last_memcache_config # return a dummy value logger.warning('No existing ElastiCache configuration for %s; ' 'falling back to a dummy configuration' % self.elasticache_config) return ['127.0.0.1:11211'] # re-format for input to memcache.Client() memcache_config = ['%s:%s' % (e[1], e[2]) for e in discovered] self.last_memcache_config = memcache_config return memcache_config def checkout(self): if self.last_poll + self.POLL_INTERVAL < time.time(): # reconfigure (this has the side-effect of marking dead # servers as live again) self.last_poll = time.time() memcache_config = self.get_memcache_config() self.client.set_servers(memcache_config) return self.client class ElastiCacheFinder(MemcachedCacheFinder): client_wrapper_class = ElastiCacheClientWrapper class CacheFinder(object): def __init__(self): self._finders = { 'direct': DirectCacheFinder(), 'elasticache': ElastiCacheFinder(), 'mock': MockCacheFinder(), } @contextlib.contextmanager def cache(self, config): if isinstance(config, basestring): parsed = urlparse.urlparse(config) style = parsed.scheme config = parsed.netloc else: style = 'direct' mc, cookie = self._finders[style].acquire_cache(config) try: yield mc finally: self._finders[style].release_cache(cookie) def init_app(app): app.memcached = CacheFinder()
mpl-2.0
ioram7/keystone-federado-pgid2013
build/python-keystoneclient/keystoneclient/v3/client.py
1
3508
# Copyright 2011 Nebula, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging from keystoneclient.v2_0 import client from keystoneclient.v3 import credentials from keystoneclient.v3 import endpoints from keystoneclient.v3 import domains from keystoneclient.v3 import groups from keystoneclient.v3 import policies from keystoneclient.v3 import projects from keystoneclient.v3 import roles from keystoneclient.v3 import services from keystoneclient.v3 import users _logger = logging.getLogger(__name__) class Client(client.Client): """Client for the OpenStack Identity API v3. :param string username: Username for authentication. (optional) :param string password: Password for authentication. (optional) :param string token: Token for authentication. (optional) :param string tenant_name: Tenant id. (optional) :param string tenant_id: Tenant name. (optional) :param string auth_url: Keystone service endpoint for authorization. :param string region_name: Name of a region to select when choosing an endpoint from the service catalog. :param string endpoint: A user-supplied endpoint URL for the keystone service. Lazy-authentication is possible for API service calls if endpoint is set at instantiation.(optional) :param integer timeout: Allows customization of the timeout for client http requests. (optional) Example:: >>> from keystoneclient.v3 import client >>> keystone = client.Client(username=USER, ... password=PASS, ... tenant_name=TENANT_NAME, ... auth_url=KEYSTONE_URL) ... >>> keystone.tenants.list() ... >>> user = keystone.users.get(USER_ID) >>> user.delete() """ def __init__(self, endpoint=None, **kwargs): """ Initialize a new client for the Keystone v3.0 API. """ super(Client, self).__init__(endpoint=endpoint, **kwargs) self.credentials = credentials.CredentialManager(self) self.endpoints = endpoints.EndpointManager(self) self.domains = domains.DomainManager(self) self.groups = groups.GroupManager(self) self.policies = policies.PolicyManager(self) self.projects = projects.ProjectManager(self) self.roles = roles.RoleManager(self) self.services = services.ServiceManager(self) self.users = users.UserManager(self) # NOTE(gabriel): If we have a pre-defined endpoint then we can # get away with lazy auth. Otherwise auth immediately. if endpoint: self.management_url = endpoint else: self.authenticate() def serialize(self, entity): return json.dumps(entity, sort_keys=True)
apache-2.0
dymkowsk/mantid
Testing/SystemTests/tests/analysis/SANSMaskWorkspaceTest.py
3
20035
# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments import unittest import stresstesting import os import mantid from mantid.api import AlgorithmManager from sans.common.enums import SANSFacility from sans.test_helper.test_director import TestDirector from sans.state.data import get_data_builder from sans.state.mask import get_mask_builder def get_masked_spectrum_numbers(workspace): for index in range(workspace.getNumberHistograms()): try: det = workspace.getDetector(index) except RuntimeError: break if det.isMasked(): yield workspace.getSpectrum(index).getSpectrumNo() def get_non_masked_spectrum_numbers(workspace): for index in range(workspace.getNumberHistograms()): try: det = workspace.getDetector(index) except RuntimeError: break if not det.isMasked(): yield workspace.getSpectrum(index).getSpectrumNo() def elements_in_range(range_start, range_stop, collection): for element in collection: if range_start <= element <= range_stop: yield element # ----------------------------------------------- # Tests for the SANSLoad algorithm # ----------------------------------------------- class SANSMaskWorkspaceTest(unittest.TestCase): def _load_workspace(self, state, move_workspace=True): load_alg = AlgorithmManager.createUnmanaged("SANSLoad") load_alg.setChild(True) load_alg.initialize() state_dict = state.property_manager load_alg.setProperty("SANSState", state_dict) load_alg.setProperty("PublishToCache", False) load_alg.setProperty("UseCached", False) load_alg.setProperty("MoveWorkspace", move_workspace) load_alg.setProperty("SampleScatterWorkspace", "dummy") load_alg.setProperty("SampleScatterMonitorWorkspace", "dummy") # Act load_alg.execute() self.assertTrue(load_alg.isExecuted()) return load_alg.getProperty("SampleScatterWorkspace").value def _run_mask(self, state, workspace, component): mask_alg = AlgorithmManager.createUnmanaged("SANSMaskWorkspace") mask_alg.setChild(True) mask_alg.initialize() state_dict = state.property_manager mask_alg.setProperty("SANSState", state_dict) mask_alg.setProperty("Workspace", workspace) mask_alg.setProperty("Component", component) mask_alg.execute() self.assertTrue(mask_alg.isExecuted()) return mask_alg.getProperty("Workspace").value def _do_assert(self, workspace, expected_spectra): # Remove duplicate masks from expected expected_spectra = list(set(expected_spectra)) masked_spectra = list(get_masked_spectrum_numbers(workspace)) self.assertTrue(len(expected_spectra) == len(masked_spectra)) for expected, actual in zip(sorted(expected_spectra), sorted(masked_spectra)): self.assertTrue(expected == actual) def _do_assert_non_masked(self, workspace, expected_spectra): # Remove duplicate masks from expected expected_spectra = list(set(expected_spectra)) non_masked_spectra = list(get_non_masked_spectrum_numbers(workspace)) self.assertTrue(len(expected_spectra) == len(non_masked_spectra)) for expected, actual in zip(sorted(expected_spectra), sorted(non_masked_spectra)): self.assertTrue(expected == actual) def test_that_spectra_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Expected_spectra expected_spectra = [] # Standard spectra single_spectra = [13, 14, 17] expected_spectra.extend(single_spectra) spectrum_range_start = [20, 30] spectrum_range_stop = [25, 35] expected_spectra.extend(list(range(20, 25 + 1))) expected_spectra.extend(list(range(30, 35 + 1))) # Detector-specific single horizontal strip mask # The horizontal strip will be evaluated for SANS2D on the LAB as: # e.g. [(50*512 + 9(monitors)] + x in range(0, 512) single_horizontal_strip_masks = [50, 53] for single_horizontal_strip_mask in single_horizontal_strip_masks: expected_spectra.extend(((single_horizontal_strip_mask*512 + 9) + x for x in range(0, 512))) # Detector-specific range horizontal strip mask # The horizontal range will be evaluated for SANS2D on the LAB as: # e.g. [(62*512 + 9(monitors)] + x in range(0, 512) + (63*512 + 9(monitors)] + x in range(0, 512) + ...] range_horizontal_strip_start = [62, 67] range_horizontal_strip_stop = [64, 70] for start, stop in zip(range_horizontal_strip_start, range_horizontal_strip_stop): expected_spectra.extend(((start*512 + 9) + y*512 + x for y in range(0, stop - start + 1) for x in range(0, 512))) # Detector-specific single vertical strip mask # The vertical strip will be evaluated for SANS2D on the LAB as: # e.g. [(45 + 9(monitors)] + y*512 for y in range(0, 120)] single_vertical_strip_masks = [45, 89] for single_vertical_strip_mask in single_vertical_strip_masks: expected_spectra.extend(((single_vertical_strip_mask + 9) + y*512 for y in range(0, 120))) # Detector-specific range vertical strip mask # The vertical range will be evaluated for SANS2D on the LAB as: range_vertical_strip_start = [99] range_vertical_strip_stop = [102] for start, stop in zip(range_vertical_strip_start, range_vertical_strip_stop): expected_spectra.extend(((start_elem + 9) + y * 512 for start_elem in range(start, stop + 1) for y in range(0, 120))) mask_builder.set_single_spectra_on_detector(single_spectra) mask_builder.set_spectrum_range_on_detector(spectrum_range_start, spectrum_range_stop) mask_builder.set_LAB_single_horizontal_strip_mask(single_horizontal_strip_masks) mask_builder.set_LAB_range_horizontal_strip_start(range_horizontal_strip_start) mask_builder.set_LAB_range_horizontal_strip_stop(range_horizontal_strip_stop) mask_builder.set_LAB_single_vertical_strip_mask(single_vertical_strip_masks) mask_builder.set_LAB_range_vertical_strip_start(range_vertical_strip_start) mask_builder.set_LAB_range_vertical_strip_stop(range_vertical_strip_stop) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state) # Act workspace = self._run_mask(state, workspace, "LAB") # Assert self._do_assert(workspace, expected_spectra) def test_that_block_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Expected_spectra expected_spectra = [] # Block # Detector-specific block # The block will be evaluated for SANS2D on the LAB as: block_horizontal_start = [12, 17] block_horizontal_stop = [14, 21] block_vertical_start = [45, 87] block_vertical_stop = [48, 91] for h_start, h_stop, v_start, v_stop in zip(block_horizontal_start, block_horizontal_stop, block_vertical_start, block_vertical_stop): expected_spectra.extend(((h_start*512 + 9) + y*512 + x for y in range(0, h_stop - h_start + 1) for x in range(v_start, v_stop + 1))) mask_builder.set_LAB_block_horizontal_start(block_horizontal_start) mask_builder.set_LAB_block_horizontal_stop(block_horizontal_stop) mask_builder.set_LAB_block_vertical_start(block_vertical_start) mask_builder.set_LAB_block_vertical_stop(block_vertical_stop) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state) # Act workspace = self._run_mask(state, workspace, "LAB") # Assert self._do_assert(workspace, expected_spectra) def test_that_cross_block_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Expected_spectra expected_spectra = [] # Block # Detector-specific cross block # The block will be evaluated for SANS2D on the LAB as: block_cross_horizontal = [12, 17] block_cross_vertical = [49, 67] for h, v in zip(block_cross_horizontal, block_cross_vertical): expected_spectra.extend([h*512 + 9 + v]) mask_builder.set_LAB_block_cross_horizontal(block_cross_horizontal) mask_builder.set_LAB_block_cross_vertical(block_cross_vertical) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state) # Act workspace = self._run_mask(state, workspace, "LAB") # Assert self._do_assert(workspace, expected_spectra) def test_that_mask_files_are_applied(self): def create_shape_xml_file(xml_string): f_name = os.path.join(mantid.config.getString('defaultsave.directory'), 'sample_mask_file.xml') if os.path.exists(f_name): os.remove(f_name) with open(f_name, 'w') as f: f.write(xml_string) return f_name # Arrange shape_xml = "<?xml version=\"1.0\"?>\n"\ "<detector-masking>\n" \ "<group>\n" \ "<detids>\n" \ "1313191-1313256\n" \ "</detids>\n" \ "</group>\n" \ "</detector-masking >" file_name = create_shape_xml_file(shape_xml) # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Mask file # Got the spectra from the detector view expected_spectra = [x for x in range(31432, 31498)] mask_builder.set_mask_files([file_name]) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state, move_workspace=False) # Act workspace = self._run_mask(state, workspace, "LAB") # Assert self._do_assert(workspace, expected_spectra) # Remove if os.path.exists(file_name): os.remove(file_name) def test_that_general_time_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Expected_spectra bin_mask_general_start = [30000., 67000.] bin_mask_general_stop = [35000., 75000.] # bin_mask_start = [14000] # bin_mask_stop = FloatListParameter() mask_builder.set_bin_mask_general_start(bin_mask_general_start) mask_builder.set_bin_mask_general_stop(bin_mask_general_stop) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state, move_workspace=False) tof_spectra_10_original = workspace.getSpectrum(10).getTofs() tof_spectra_11_original = workspace.getSpectrum(11).getTofs() # Act workspace = self._run_mask(state, workspace, "LAB") # Assert # Confirm that everything in the ranges 30000-35000 and 67000-75000 is removed from the event list tof_spectra_10_masked = workspace.getSpectrum(10).getTofs() tof_spectra_11_masked = workspace.getSpectrum(11).getTofs() # Spectrum 10 # Three events should have been removed self.assertTrue(len(tof_spectra_10_masked) == len(tof_spectra_10_original) - 3) # One event should have been removed self.assertTrue(len(tof_spectra_11_masked) == len(tof_spectra_11_original) - 1) # Make sure that there are no elements for start, stop in zip(bin_mask_general_start, bin_mask_general_stop): self.assertFalse(any(elements_in_range(start, stop, tof_spectra_10_masked))) self.assertFalse(any(elements_in_range(start, stop, tof_spectra_11_masked))) def test_that_detector_specific_time_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Expected_spectra bin_mask_start = [27000., 58000.] bin_mask_stop = [45000., 61000.] mask_builder.set_LAB_bin_mask_start(bin_mask_start) mask_builder.set_LAB_bin_mask_stop(bin_mask_stop) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state, move_workspace=False) # Is part of LAB tof_spectra_23813_original = workspace.getSpectrum(23813).getTofs() # Act workspace = self._run_mask(state, workspace, "LAB") # Assert # Confirm that everything in the ranges 27000-45000 and 58000-61000 is removed from the event list tof_spectra_23813_masked = workspace.getSpectrum(23813).getTofs() # Spectrum 23813 # Five events should have been removed self.assertTrue(len(tof_spectra_23813_masked) == len(tof_spectra_23813_original) - 5) # Make sure that there are no elements for start, stop in zip(bin_mask_start, bin_mask_stop): self.assertFalse(any(elements_in_range(start, stop, tof_spectra_23813_masked))) def test_that_angle_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Expected_spectra phi_mirror = False phi_min = 0. phi_max = 90. # This should mask everything except for the upper right quadrant # | 120 |-------------------| # | |---------------------| # | 60 |-------------------| # | |----------------------| # | # | # |-------------------|------------------| # 512 256 0 expected_spectra = [] # The strange double pattern arises from the offset of the SANS2D tube geometry (see InstrumentView) for y in range(60, 120): if y % 2: expected_spectra.extend(((y * 512 + 9) + x for x in range(0, 257))) else: expected_spectra.extend(((y * 512 + 9) + x for x in range(0, 255))) mask_builder.set_use_mask_phi_mirror(phi_mirror) mask_builder.set_phi_min(phi_min) mask_builder.set_phi_max(phi_max) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state, move_workspace=False) # Act workspace = self._run_mask(state, workspace, "LAB") # Assert self._do_assert_non_masked(workspace, expected_spectra) def test_that_beam_stop_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) beam_stop_arm_width = .01 beam_stop_arm_angle = 180.0 beam_stop_arm_pos1 = 0.0 beam_stop_arm_pos2 = 0.0 # Expected_spectra, again the tubes are shifted and that will produce the slightly strange masking expected_spectra = [] expected_spectra.extend((512*59 + 9 + x for x in range(0, 257))) expected_spectra.extend((512*60 + 9 + x for x in range(0, 255))) mask_builder.set_beam_stop_arm_width(beam_stop_arm_width) mask_builder.set_beam_stop_arm_angle(beam_stop_arm_angle) mask_builder.set_beam_stop_arm_pos1(beam_stop_arm_pos1) mask_builder.set_beam_stop_arm_pos2(beam_stop_arm_pos2) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state, move_workspace=False) # Act workspace = self._run_mask(state, workspace, "LAB") # Assert self._do_assert(workspace, expected_spectra) def test_that_cylinder_masking_is_applied(self): # Arrange data_builder = get_data_builder(SANSFacility.ISIS) data_builder.set_sample_scatter("SANS2D00028827") data_info = data_builder.build() mask_builder = get_mask_builder(data_info) # Radius Mask radius_min = 0.01 radius_max = 10. expected_spectra = [] expected_spectra.extend([30469, 30470, 30471, 30472, 30473, 30474, 30475, 30476, 30477, 30980, 30981, 30982, 30983, 30984, 30985, 30986, 30987, 30988]) mask_builder.set_radius_min(radius_min) mask_builder.set_radius_max(radius_max) mask_info = mask_builder.build() test_director = TestDirector() test_director.set_states(data_state=data_info, mask_state=mask_info) state = test_director.construct() workspace = self._load_workspace(state, move_workspace=False) # Act workspace = self._run_mask(state, workspace, "LAB") # Assert self._do_assert(workspace, expected_spectra) class SANSMaskWorkspaceRunnerTest(stresstesting.MantidStressTest): def __init__(self): stresstesting.MantidStressTest.__init__(self) self._success = False def runTest(self): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(SANSMaskWorkspaceTest, 'test')) runner = unittest.TextTestRunner() res = runner.run(suite) if res.wasSuccessful(): self._success = True def requiredMemoryMB(self): return 2000 def validate(self): return self._success if __name__ == '__main__': unittest.main()
gpl-3.0
henrysher/yum
yum-updatesd.py
9
31277
#!/usr/bin/python -tt # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # (c)2006 Duke University, Red Hat, Inc. # Seth Vidal <skvidal@linux.duke.edu> # Jeremy Katz <katzj@redhat.com> #TODO: # - clean up config and work on man page for docs # - need to be able to cancel downloads. requires some work in urlgrabber # - what to do if we're asked to exit while updates are being applied? # - what to do with the lock around downloads/updates # since it takes me time every time to figure this out again, here's how to # queue a check with dbus-send. adjust appropriately for other methods # $ dbus-send --system --print-reply --type=method_call \ # --dest=edu.duke.linux.yum /Updatesd edu.duke.linux.yum.CheckNow """ Daemon to periodically check for updates to installed packages, and associated classes and methods. """ import os import sys import time import gzip import dbus import dbus.service import dbus.glib import gobject import smtplib import threading from optparse import OptionParser from email.mime.text import MIMEText import yum import yum.Errors import syslog from yum.config import BaseConfig, Option, IntOption, ListOption, BoolOption from yum.parser import ConfigPreProcessor from ConfigParser import ConfigParser, ParsingError from yum.constants import * from yum.update_md import UpdateMetadata # FIXME: is it really sane to use this from here? sys.path.append('/usr/share/yum-cli') import callback config_file = '/etc/yum/yum-updatesd.conf' initial_directory = os.getcwd() class UpdateEmitter(object): """Abstract class for implementing different types of emitters. """ def __init__(self): pass def updatesAvailable(self, updateInfo): """Emitted when there are updates available to be installed. If not doing the download here, then called immediately on finding new updates. If we do the download here, then called after the updates have been downloaded. :param updateInfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ pass def updatesDownloading(self, updateInfo): """Emitted to give feedback of update download starting. :param updateInfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ pass def updatesApplied(self, updateInfo): """Emitted on successful installation of updates. :param updateInfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ pass def updatesFailed(self, errmsgs): """Emitted when an update has failed to install. :param errmsgs: a list of error messages """ pass def checkFailed(self, error): """Emitted when checking for updates failed. :param error: an error message """ pass def setupFailed(self, error, translation_domain): """Emitted when plugin initialization failed. :param error: an error message :param translation_domain: the translation domain supplied by the plugin """ pass class SyslogUpdateEmitter(UpdateEmitter): """Emitter class to send messages to syslog.""" def __init__(self, syslog_facility, ident = "yum-updatesd", level = "WARN"): UpdateEmitter.__init__(self) syslog.openlog(ident, 0, self._facilityMap(syslog_facility)) self.level = level def updatesAvailable(self, updateInfo): """Emit a message stating that updates are available. :param updateInfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ num = len(updateInfo) level = self.level if num > 1: msg = "%d updates available" %(num,) elif num == 1: msg = "1 update available" else: msg = "No updates available" level = syslog.LOG_DEBUG syslog.syslog(self._levelMap(level), msg) def _levelMap(self, lvl): level_map = { "EMERG": syslog.LOG_EMERG, "ALERT": syslog.LOG_ALERT, "CRIT": syslog.LOG_CRIT, "ERR": syslog.LOG_ERR, "WARN": syslog.LOG_WARNING, "NOTICE": syslog.LOG_NOTICE, "INFO": syslog.LOG_INFO, "DEBUG": syslog.LOG_DEBUG } if type(lvl) == int: return lvl return level_map.get(lvl.upper(), syslog.LOG_INFO) def _facilityMap(self, facility): facility_map = { "KERN": syslog.LOG_KERN, "USER": syslog.LOG_USER, "MAIL": syslog.LOG_MAIL, "DAEMON": syslog.LOG_DAEMON, "AUTH": syslog.LOG_AUTH, "LPR": syslog.LOG_LPR, "NEWS": syslog.LOG_NEWS, "UUCP": syslog.LOG_UUCP, "CRON": syslog.LOG_CRON, "LOCAL0": syslog.LOG_LOCAL0, "LOCAL1": syslog.LOG_LOCAL1, "LOCAL2": syslog.LOG_LOCAL2, "LOCAL3": syslog.LOG_LOCAL3, "LOCAL4": syslog.LOG_LOCAL4, "LOCAL5": syslog.LOG_LOCAL5, "LOCAL6": syslog.LOG_LOCAL6, "LOCAL7": syslog.LOG_LOCAL7,} if type(facility) == int: return facility return facility_map.get(facility.upper(), syslog.LOG_DAEMON) class EmailUpdateEmitter(UpdateEmitter): """Emitter class to send messages via email.""" def __init__(self, sender, rcpt): UpdateEmitter.__init__(self) self.sender = sender self.rcpt = rcpt def updatesAvailable(self, updateInfo): """Emit a message stating that updates are available. :param updateInfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ num = len(updateInfo) if num < 1: return output = """ Hi, There are %d package updates available. Please run the system updater. Thank You, Your Computer """ % num msg = MIMEText(output) msg['Subject'] = "%d Updates Available" %(num,) msg['From'] = self.sender msg['To'] = ",".join(self.rcpt) s = smtplib.SMTP() s.connect() s.sendmail(self.sender, self.rcpt, msg.as_string()) s.close() class DbusUpdateEmitter(UpdateEmitter): """Emitter class to send messages to the dbus message system.""" def __init__(self): UpdateEmitter.__init__(self) bus = dbus.SystemBus() name = dbus.service.BusName('edu.duke.linux.yum', bus = bus) yum_dbus = YumDbusInterface(name) self.dbusintf = yum_dbus def updatesAvailable(self, updateInfo): """Emit a message stating that updates are available. :param updateInfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ num = len(updateInfo) msg = "%d" %(num,) if num > 0: self.dbusintf.UpdatesAvailableSignal(msg) else: self.dbusintf.NoUpdatesAvailableSignal(msg) def updatesFailed(self, errmsgs): """Emit a message stating that an update has failed to install. :param errmsgs: a list of error messages """ self.dbusintf.UpdatesFailedSignal(errmsgs) def updatesApplied(self, updinfo): """Emit a message stating that updates were installed successfully. :param updinfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ self.dbusintf.UpdatesAppliedSignal(updinfo) def checkFailed(self, error): """Emit a message stating that checking for updates failed. :param error: an error message """ self.dbusintf.CheckFailedSignal(error) def setupFailed(self, error, translation_domain): """Emit a message stating that plugin initialization failed. :param error: an error message :param translation_domain: the translation domain supplied by the plugin """ self.dbusintf.SetupFailedSignal(error, translation_domain) class YumDbusInterface(dbus.service.Object): """Interface class for sending signals to the dbus.""" def __init__(self, bus_name, object_path='/UpdatesAvail'): dbus.service.Object.__init__(self, bus_name, object_path) @dbus.service.signal('edu.duke.linux.yum') def UpdatesAvailableSignal(self, message): """Send a signal stating that updates are available. :param message: the message to send in the signal """ pass @dbus.service.signal('edu.duke.linux.yum') def NoUpdatesAvailableSignal(self, message): """Send a signal stating that no updates are available. :param message: the message to send in the signal """ pass @dbus.service.signal('edu.duke.linux.yum') def UpdatesFailedSignal(self, errmsgs): """Send a signal stating that the update has failed. :param errmsgs: a list of error messages """ pass @dbus.service.signal('edu.duke.linux.yum') def UpdatesAppliedSignal(self, updinfo): """Send a signal stating that updates were applied successfully. :param updinfo: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ pass @dbus.service.signal('edu.duke.linux.yum') def CheckFailedSignal(self, message): """Send a signal stating that checking for updates failed. :param message: the message to send in the signal """ pass @dbus.service.signal('edu.duke.linux.yum') def SetupFailedSignal(self, message, translation_domain=""): """Send a signal stating that plugin initialization failed. :param message: the message to send in the signal :param translation_domain: the translation domain supplied by the plugin """ pass class UDConfig(BaseConfig): """Config format for the daemon""" run_interval = IntOption(3600) nonroot_workdir = Option("/var/tmp/yum-updatesd") emit_via = ListOption(['dbus', 'email', 'syslog']) email_to = ListOption(["root"]) email_from = Option("root") dbus_listener = BoolOption(True) do_update = BoolOption(False) do_download = BoolOption(False) do_download_deps = BoolOption(False) updaterefresh = IntOption(3600) syslog_facility = Option("DAEMON") syslog_level = Option("WARN") syslog_ident = Option("yum-updatesd") yum_config = Option("/etc/yum/yum.conf") class UpdateBuildTransactionThread(threading.Thread): """Class to build the update transaction in a new thread.""" def __init__(self, updd, name): self.updd = updd threading.Thread.__init__(self, name=name) def run(self): """Build the transaction, and download the packages to be updated. Finally, call self.processPkgs. This method must be provided by a subclass, and will determine what, if any, action is taken with the packages after they are downloaded. """ self.updd.tsInfo.makelists() try: (result, msgs) = self.updd.buildTransaction() except yum.Errors.RepoError, errmsg: # error downloading hdrs msgs = ["Error downloading headers"] self.updd.emitUpdateFailed(msgs) return dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.updd.tsInfo.getMembers())) self.updd.downloadPkgs(dlpkgs) self.processPkgs(dlpkgs) class UpdateDownloadThread(UpdateBuildTransactionThread): """Class to download the packages in the update in a new thread.""" def __init__(self, updd): UpdateBuildTransactionThread.__init__(self, updd, name="UpdateDownloadThread") def processPkgs(self, dlpkgs): """Output that there are updates available via the emitters, and release the yum locks. :param dlpkgs: unused """ self.updd.emitAvailable() self.updd.releaseLocks() class UpdateInstallThread(UpdateBuildTransactionThread): """Class to install updates in a new thread.""" def __init__(self, updd): UpdateBuildTransactionThread.__init__(self, updd, name="UpdateInstallThread") def failed(self, msgs): """Output that the update failed via the emitters. :param msgs: a list or error messages to emit """ self.updd.emitUpdateFailed(msgs) self.updd.releaseLocks() def success(self): """Output that the update has completed successfully via the emitters, and perform clean up. """ self.updd.emitUpdateApplied() self.updd.releaseLocks() self.updd.updateInfo = None self.updd.updateInfoTime = None def processPkgs(self, dlpkgs): """Apply the available updates. :param dlpkgs: a list of package objecs to update """ for po in dlpkgs: result, err = self.updd.sigCheckPkg(po) if result == 0: continue elif result == 1: try: self.updd.getKeyForPackage(po) except yum.Errors.YumBaseError, errmsg: self.failed([str(errmsg)]) del self.updd.ts self.updd.initActionTs() # make a new, blank ts to populate self.updd.populateTs(keepold=0) self.updd.ts.check() #required for ordering self.updd.ts.order() # order cb = callback.RPMInstallCallback(output = 0) cb.filelog = True cb.tsInfo = self.updd.tsInfo try: self.updd.runTransaction(cb=cb) except yum.Errors.YumBaseError, err: self.failed([str(err)]) self.success() class UpdatesDaemon(yum.YumBase): """Class to implement the update checking daemon.""" def __init__(self, opts): yum.YumBase.__init__(self) self.opts = opts self.didSetup = False self.emitters = [] if 'dbus' in self.opts.emit_via: self.emitters.append(DbusUpdateEmitter()) if 'email' in self.opts.emit_via: self.emitters.append(EmailUpdateEmitter(self.opts.email_from, self.opts.email_to)) if 'syslog' in self.opts.emit_via: self.emitters.append(SyslogUpdateEmitter(self.opts.syslog_facility, self.opts.syslog_ident, self.opts.syslog_level)) self.updateInfo = [] self.updateInfoTime = None def doSetup(self): """Perform set up, including setting up directories and parsing options. """ # if we are not root do the special subdir thing if os.geteuid() != 0: if not os.path.exists(self.opts.nonroot_workdir): os.makedirs(self.opts.nonroot_workdir) self.repos.setCacheDir(self.opts.nonroot_workdir) self.doConfigSetup(fn=self.opts.yum_config) def refreshUpdates(self): """Retrieve information about what updates are available.""" self.doLock() try: self.doRepoSetup() self.doSackSetup() self.updateCheckSetup() except Exception, e: syslog.syslog(syslog.LOG_WARNING, "error getting update info: %s" %(e,)) self.emitCheckFailed("%s" %(e,)) self.doUnlock() return False return True def populateUpdateMetadata(self): """Populate the metadata for the packages in the update.""" self.updateMetadata = UpdateMetadata() repos = [] for (new, old) in self.up.getUpdatesTuples(): pkg = self.getPackageObject(new) if pkg.repoid not in repos: repo = self.repos.getRepo(pkg.repoid) repos.append(repo.id) try: # grab the updateinfo.xml.gz from the repodata md = repo.retrieveMD('updateinfo') except Exception: # can't find any; silently move on continue md = gzip.open(md) self.updateMetadata.add(md) md.close() def populateUpdates(self): """Retrieve and set up information about the updates available for installed packages. """ def getDbusPackageDict(pkg): """Returns a dictionary corresponding to the package object in the form that we can send over the wire for dbus.""" pkgDict = { "name": pkg.name, "version": pkg.version, "release": pkg.release, "epoch": pkg.epoch, "arch": pkg.arch, "sourcerpm": pkg.sourcerpm, "summary": pkg.summary or "", } # check if any updateinfo is available md = self.updateMetadata.get_notice((pkg.name, pkg.ver, pkg.rel)) if md: # right now we only want to know if it is a security update pkgDict['type'] = md['type'] return pkgDict if self.up is None: # we're _only_ called after updates are setup return self.populateUpdateMetadata() self.updateInfo = [] for (new, old) in self.up.getUpdatesTuples(): n = getDbusPackageDict(self.getPackageObject(new)) o = getDbusPackageDict(self.rpmdb.searchPkgTuple(old)[0]) self.updateInfo.append((n, o)) if self.conf.obsoletes: for (obs, inst) in self.up.getObsoletesTuples(): n = getDbusPackageDict(self.getPackageObject(obs)) o = getDbusPackageDict(self.rpmdb.searchPkgTuple(inst)[0]) self.updateInfo.append((n, o)) self.updateInfoTime = time.time() def populateTsInfo(self): """Set up information about the update in the tsInfo object.""" # figure out the updates for (new, old) in self.up.getUpdatesTuples(): updating = self.getPackageObject(new) updated = self.rpmdb.searchPkgTuple(old)[0] self.tsInfo.addUpdate(updating, updated) # and the obsoletes if self.conf.obsoletes: for (obs, inst) in self.up.getObsoletesTuples(): obsoleting = self.getPackageObject(obs) installed = self.rpmdb.searchPkgTuple(inst)[0] self.tsInfo.addObsoleting(obsoleting, installed) self.tsInfo.addObsoleted(installed, obsoleting) def updatesCheck(self): """Check to see whether updates are available for any installed packages. If updates are available, install them, download them, or just emit a message, depending on what options are selected in the configuration file. :return: whether the daemon should continue looping """ if not self.didSetup: try: self.doSetup() except Exception, e: syslog.syslog(syslog.LOG_WARNING, "error initializing: %s" % e) if isinstance(e, yum.plugins.PluginYumExit): self.emitSetupFailed(e.value, e.translation_domain) else: # if we don't know where the string is from, then assume # it's not marked for translation (versus sending # gettext.textdomain() and assuming it's from the default # domain for this app) self.emitSetupFailed(str(e)) # Setup failed, let's restart and try again after the update # interval restart() else: self.didSetup = True try: if not self.refreshUpdates(): return except yum.Errors.LockError: return True # just pass for now try: self.populateTsInfo() self.populateUpdates() if self.opts.do_update: uit = UpdateInstallThread(self) uit.start() elif self.opts.do_download: self.emitDownloading() dl = UpdateDownloadThread(self) dl.start() else: # just notify about things being available self.emitAvailable() self.releaseLocks() except Exception, e: self.emitCheckFailed("%s" %(e,)) self.doUnlock() return True def getUpdateInfo(self): """Return information about the update. This may be previously cached information if the configured time interval between update retrievals has not yet elapsed, or there is an error in trying to retrieve the update information. :return: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ # if we have a cached copy, use it if self.updateInfoTime and (time.time() - self.updateInfoTime < self.opts.updaterefresh): return self.updateInfo # try to get the lock so we can update the info. fall back to # cached if available or try a few times. for i in range(10): try: self.doLock() break except yum.Errors.LockError: # if we can't get the lock, return what we have if we can if self.updateInfo: return self.updateInfo time.sleep(1) else: return [] try: self.updateCheckSetup() self.populateUpdates() self.releaseLocks() except: self.doUnlock() return self.updateInfo def updateCheckSetup(self): """Set up the transaction set, rpm database, and prepare to get updates. """ self.doTsSetup() self.doRpmDBSetup() self.doUpdateSetup() def releaseLocks(self): """Close the rpm database, and release the yum lock.""" self.closeRpmDB() self.doUnlock() def emitAvailable(self): """Emit a notice stating whether updates are available.""" map(lambda x: x.updatesAvailable(self.updateInfo), self.emitters) def emitDownloading(self): """Emit a notice stating that updates are downloading.""" map(lambda x: x.updatesDownloading(self.updateInfo), self.emitters) def emitUpdateApplied(self): """Emit a notice stating that automatic updates have been applied.""" map(lambda x: x.updatesApplied(self.updateInfo), self.emitters) def emitUpdateFailed(self, errmsgs): """Emit a notice stating that automatic updates failed.""" map(lambda x: x.updatesFailed(errmsgs), self.emitters) def emitCheckFailed(self, error): """Emit a notice stating that checking for updates failed.""" map(lambda x: x.checkFailed(error), self.emitters) def emitSetupFailed(self, error, translation_domain=""): """Emit a notice stating that checking for updates failed.""" map(lambda x: x.setupFailed(error, translation_domain), self.emitters) class YumDbusListener(dbus.service.Object): """Class to export methods that control the daemon over the dbus. """ def __init__(self, updd, bus_name, object_path='/Updatesd', allowshutdown = False): dbus.service.Object.__init__(self, bus_name, object_path) self.updd = updd self.allowshutdown = allowshutdown def doCheck(self): """Check whether updates are available. :return: False """ self.updd.updatesCheck() return False @dbus.service.method("edu.duke.linux.yum", in_signature="") def CheckNow(self): """Check whether updates are available. :return: a message stating that a check has been queued """ # make updating checking asynchronous since we discover whether # or not there are updates via a callback signal anyway gobject.idle_add(self.doCheck) return "check queued" @dbus.service.method("edu.duke.linux.yum", in_signature="") def ShutDown(self): """Shut down the daemon. :return: whether remote callers have permission to shut down the daemon """ if not self.allowshutdown: return False # we have to do this in a callback so that it doesn't get # sent back to the caller gobject.idle_add(shutDown) return True @dbus.service.method("edu.duke.linux.yum", in_signature="", out_signature="a(a{ss}a{ss})") def GetUpdateInfo(self): """Return information about the available updates :return: a list of tuples of dictionaries. Each dictionary contains information about a package, and each tuple specifies an available upgrade: the second dictionary in the tuple has information about a package that is currently installed, and the first dictionary has information what the package can be upgraded to """ # FIXME: should this be async? upds = self.updd.getUpdateInfo() return upds def shutDown(): """Shut down the daemon.""" sys.exit(0) def restart(): """Restart the daemon, reloading configuration files.""" os.chdir(initial_directory) os.execve(sys.argv[0], sys.argv, os.environ) def main(options = None): """Configure and start the daemon.""" # we'll be threading for downloads/updates gobject.threads_init() dbus.glib.threads_init() if options is None: parser = OptionParser() parser.add_option("-f", "--no-fork", action="store_true", default=False, dest="nofork") parser.add_option("-r", "--remote-shutdown", action="store_true", default=False, dest="remoteshutdown") (options, args) = parser.parse_args() if not options.nofork: if os.fork(): sys.exit() fd = os.open("/dev/null", os.O_RDWR) os.dup2(fd, 0) os.dup2(fd, 1) os.dup2(fd, 2) os.close(fd) confparser = ConfigParser() opts = UDConfig() if os.path.exists(config_file): confpp_obj = ConfigPreProcessor(config_file) try: confparser.readfp(confpp_obj) except ParsingError, e: print >> sys.stderr, "Error reading config file: %s" % e sys.exit(1) syslog.openlog("yum-updatesd", 0, syslog.LOG_DAEMON) opts.populate(confparser, 'main') updd = UpdatesDaemon(opts) if opts.dbus_listener: bus = dbus.SystemBus() name = dbus.service.BusName("edu.duke.linux.yum", bus=bus) YumDbusListener(updd, name, allowshutdown = options.remoteshutdown) run_interval_ms = opts.run_interval * 1000 # needs to be in ms gobject.timeout_add(run_interval_ms, updd.updatesCheck) mainloop = gobject.MainLoop() mainloop.run() if __name__ == "__main__": main()
gpl-2.0
bat-serjo/vivisect
PE/ordlookup/msvbvm60.py
11
16265
ord_names = { 100:'ThunRTMain', 101:'VBDllUnRegisterServer', 102:'VBDllCanUnloadNow', 103:'VBDllRegisterServer', 104:'VBDllGetClassObject', 105:'UserDllMain', 106:'DllRegisterServer', 107:'DllUnregisterServer', 108:'__vbaAryLock', 109:'__vbaBoolErrVar', 110:'__vbaRedimVar2', 111:'__vbaStrErrVarCopy', 112:'__vbaVarLateMemCallLd', 113:'__vbaVarLateMemCallLdRf', 114:'__vbaVarLateMemCallSt', 115:'__vbaVarLateMemSt', 116:'__vbaVarLateMemStAd', 117:'__vbaAryVarVarg', 118:'__vbaFpCDblR4', 119:'__vbaFpCDblR8', 120:'__vbaFpCSngR4', 121:'__vbaFpCSngR8', 122:'__vbaFpCmpCy', 123:'__vbaFpCy', 124:'__vbaFpI2', 125:'__vbaFpI4', 126:'__vbaFpR4', 127:'__vbaFpR8', 128:'__vbaFpUI1', 129:'__vbaFreeObj', 130:'__vbaFreeStr', 131:'__vbaFreeVar', 132:'__vbaFreeVarg', 133:'__vbaI2Abs', 134:'__vbaI2I4', 135:'__vbaI2Sgn', 136:'__vbaI4Abs', 137:'__vbaI4Sgn', 138:'__vbaStrCopy', 139:'__vbaStrMove', 140:'__vbaUI1I2', 141:'__vbaUI1I4', 142:'__vbaUI1Sgn', 143:'__vbaVarCopy', 144:'__vbaVarDup', 145:'__vbaVarMove', 146:'__vbaVarVargNofree', 147:'__vbaVarZero', 148:'__vbaVargParmRef', 149:'__vbaVargVar', 150:'__vbaVargVarCopy', 151:'__vbaVargVarMove', 152:'__vbaVargVarRef', 153:'DLLGetDocumentation', 154:'DllCanUnloadNow', 155:'DllGetClassObject', 156:'_CIatan', 157:'_CIcos', 158:'_CIexp', 159:'_CIlog', 160:'_CIsin', 161:'_CIsqrt', 162:'_CItan', 163:'__vbaAptOffset', 164:'__vbaAryConstruct2', 165:'__vbaAryConstruct', 166:'__vbaAryCopy', 167:'__vbaAryDestruct', 168:'__vbaAryMove', 169:'__vbaAryRebase1Var', 170:'__vbaAryRecCopy', 171:'__vbaAryRecMove', 172:'__vbaAryUnlock', 173:'__vbaAryVar', 174:'__vbaBoolStr', 175:'__vbaBoolVar', 176:'__vbaBoolVarNull', 177:'__vbaCVarAryUdt', 178:'__vbaCastObj', 179:'__vbaCastObjVar', 180:'__vbaCheckType', 181:'__vbaCheckTypeVar', 182:'__vbaChkstk', 183:'__vbaCopyBytes', 184:'__vbaCopyBytesZero', 185:'__vbaCyAbs', 186:'ProcCallEngine', 187:'DllFunctionCall', 188:'__vbaCyAdd', 189:'__vbaCyErrVar', 190:'CopyRecord', 191:'__vbaCyFix', 192:'__vbaCyForInit', 193:'__vbaCyForNext', 194:'__vbaCyI2', 195:'TipGetAddressOfPredeclaredInstance', 196:'__vbaCyI4', 197:'__vbaCyInt', 198:'__vbaCyMul', 199:'MethCallEngine', 200:'__vbaCyMulI2', 201:'__vbaCySgn', 202:'__vbaCyStr', 203:'__vbaCySub', 204:'__vbaCyUI1', 205:'__vbaCyVar', 206:'__vbaDateR4', 207:'__vbaDateR8', 208:'__vbaDateStr', 209:'__vbaDateVar', 210:'__vbaDerefAry', 211:'__vbaDerefAry1', 212:'__vbaEnd', 213:'__vbaErase', 214:'__vbaEraseKeepData', 215:'__vbaEraseNoPop', 216:'__vbaError', 217:'__vbaErrorOverflow', 218:'__vbaExceptHandler', 219:'__vbaExitEachAry', 220:'TipSetOption', 221:'__vbaExitEachColl', 222:'__vbaExitEachVar', 223:'__vbaExitProc', 224:'__vbaFPException', 225:'__vbaFPFix', 226:'__vbaFPInt', 227:'TipUnloadProject', 228:'__vbaFailedFriend', 229:'__vbaFileClose', 230:'TipCreateInstanceProject2', 231:'EbResetProject', 232:'EbGetHandleOfExecutingProject', 233:'__vbaFileCloseAll', 234:'__vbaFileLock', 235:'__vbaFileOpen', 236:'__vbaFileSeek', 237:'__vbaFixstrConstruct', 238:'__vbaForEachAry', 239:'__vbaForEachCollAd', 240:'__vbaForEachCollObj', 241:'__vbaForEachCollVar', 242:'__vbaForEachVar', 243:'__vbaFreeObjList', 244:'__vbaFreeStrList', 245:'__vbaFreeVarList', 246:'__vbaGenerateBoundsError', 247:'__vbaGet3', 248:'__vbaGet4', 249:'__vbaGetFxStr3', 250:'__vbaGetFxStr4', 251:'__vbaGetOwner3', 252:'__vbaGetOwner4', 253:'__vbaGosub', 254:'__vbaGosubFree', 255:'__vbaGosubReturn', 256:'__vbaHresultCheck', 257:'__vbaHresultCheckNonvirt', 258:'__vbaHresultCheckObj', 259:'__vbaI2Cy', 260:'__vbaI2ErrVar', 261:'__vbaI2ForNextCheck', 262:'__vbaI2Str', 263:'__vbaI2Var', 264:'__vbaI4Cy', 265:'EbResetProjectNormal', 266:'TipUnloadInstance', 267:'__vbaI4ErrVar', 268:'EbLibraryLoad', 269:'EbLibraryUnload', 270:'__vbaI4ForNextCheck', 271:'EbLoadRunTime', 272:'__vbaI4Str', 273:'__vbaI4Var', 274:'EbCreateContext', 275:'EbDestroyContext', 276:'EbSetContextWorkerThread', 277:'__vbaInStr', 278:'__vbaInStrB', 279:'__vbaInStrVar', 280:'__vbaInStrVarB', 281:'__vbaInputFile', 282:'__vbaLateIdCall', 283:'__vbaLateIdCallLd', 284:'EbGetErrorInfo', 285:'__vbaLateIdCallSt', 286:'__vbaLateIdNamedCall', 287:'__vbaLateIdNamedCallLd', 288:'__vbaLateIdNamedCallSt', 289:'__vbaLateIdNamedStAd', 290:'__vbaLateIdSt', 291:'__vbaLateIdStAd', 292:'__vbaLateMemCall', 293:'__vbaLateMemCallLd', 294:'__vbaLateMemCallSt', 295:'__vbaLateMemNamedCall', 296:'__vbaLateMemNamedCallLd', 297:'__vbaLateMemNamedCallSt', 298:'EbIsProjectOnStack', 299:'TipCreateInstanceEx', 300:'GetMem2', 301:'GetMem4', 302:'GetMem8', 303:'GetMemStr', 304:'GetMemVar', 305:'GetMemObj', 306:'PutMem2', 307:'PutMem4', 308:'PutMem8', 309:'PutMemStr', 310:'PutMemVar', 311:'PutMemObj', 312:'SetMemVar', 313:'SetMemObj', 314:'GetMemNewObj', 315:'PutMemNewObj', 316:'SetMemNewObj', 317:'GetMem1', 318:'PutMem1', 319:'GetMemEvent', 320:'PutMemEvent', 321:'SetMemEvent', 322:'__vbaLateMemNamedStAd', 323:'__vbaLateMemSt', 324:'__vbaLateMemStAd', 325:'__vbaLbound', 326:'__vbaLdZeroAry', 327:'__vbaLenBstr', 328:'__vbaLenBstrB', 329:'__vbaLenVar', 330:'__vbaLenVarB', 331:'__vbaLineInputStr', 332:'__vbaLineInputVar', 333:'__vbaLsetFixstr', 334:'__vbaLsetFixstrFree', 335:'__vbaMidStmtBstr', 336:'__vbaMidStmtBstrB', 337:'__vbaMidStmtVar', 338:'__vbaMidStmtVarB', 339:'__vbaNameFile', 340:'__vbaNew2', 341:'__vbaNew', 342:'__vbaNextEachAry', 343:'__vbaNextEachCollAd', 344:'__vbaNextEachCollObj', 345:'__vbaNextEachCollVar', 346:'__vbaNextEachVar', 347:'__vbaObjAddref', 348:'__vbaObjIs', 349:'__vbaObjSet', 350:'__vbaObjSetAddref', 351:'__vbaObjVar', 352:'__vbaOnError', 353:'__vbaOnGoCheck', 354:'__vbaPowerR8', 355:'__vbaPrintFile', 356:'__vbaPrintObj', 357:'__vbaPut3', 358:'__vbaPut4', 359:'__vbaPutFxStr3', 360:'__vbaPutFxStr4', 361:'__vbaPutOwner3', 362:'__vbaPutOwner4', 363:'__vbaR4Cy', 364:'__vbaR4ErrVar', 365:'__vbaR4ForNextCheck', 366:'__vbaR4Sgn', 367:'__vbaR4Str', 368:'__vbaR4Var', 369:'__vbaR8Cy', 370:'__vbaR8ErrVar', 371:'__vbaR8FixI2', 372:'__vbaR8FixI4', 373:'__vbaR8ForNextCheck', 374:'__vbaR8IntI2', 375:'__vbaR8IntI4', 376:'__vbaR8Sgn', 377:'__vbaR8Str', 378:'__vbaR8Var', 379:'__vbaRaiseEvent', 380:'__vbaRecAnsiToUni', 381:'__vbaRecAssign', 382:'__vbaRecDestruct', 383:'__vbaRecDestructAnsi', 384:'__vbaRecUniToAnsi', 385:'__vbaRedim', 386:'__vbaRedimPreserve', 387:'__vbaRedimPreserveVar', 388:'__vbaRedimPreserveVar2', 389:'__vbaRedimVar', 390:'__vbaRefVarAry', 391:'__vbaResume', 392:'__vbaRsetFixstr', 393:'__vbaRsetFixstrFree', 394:'__vbaSetSystemError', 395:'__vbaStopExe', 396:'__vbaStr2Vec', 397:'__vbaStrAryToAnsi', 398:'__vbaStrAryToUnicode', 399:'__vbaStrBool', 400:'EVENT_SINK_QueryInterface', 401:'EVENT_SINK_AddRef', 402:'EVENT_SINK_Release', 403:'EVENT_SINK_GetIDsOfNames', 404:'EVENT_SINK_Invoke', 405:'__vbaStrCat', 406:'__vbaStrCmp', 407:'__vbaStrComp', 408:'__vbaStrCompVar', 409:'__vbaStrCy', 410:'BASIC_CLASS_QueryInterface', 411:'BASIC_CLASS_AddRef', 412:'BASIC_CLASS_Release', 413:'BASIC_CLASS_GetIDsOfNames', 414:'BASIC_CLASS_Invoke', 415:'__vbaStrDate', 416:'__vbaStrFixstr', 417:'__vbaStrI2', 418:'__vbaStrI4', 419:'__vbaStrLike', 420:'BASIC_DISPINTERFACE_GetTICount', 421:'BASIC_DISPINTERFACE_GetTypeInfo', 422:'__vbaStrR4', 423:'__vbaStrR8', 424:'__vbaStrTextCmp', 425:'__vbaStrTextLike', 426:'__vbaStrToAnsi', 427:'__vbaStrToUnicode', 428:'__vbaStrUI1', 429:'__vbaStrVarCopy', 430:'Zombie_QueryInterface', 431:'Zombie_AddRef', 432:'Zombie_Release', 433:'Zombie_GetTypeInfoCount', 434:'Zombie_GetTypeInfo', 435:'Zombie_GetIDsOfNames', 436:'Zombie_Invoke', 437:'__vbaStrVarMove', 438:'__vbaStrVarVal', 439:'__vbaUI1Cy', 440:'EVENT_SINK2_AddRef', 441:'EVENT_SINK2_Release', 442:'__vbaUI1ErrVar', 443:'__vbaUI1Str', 444:'__vbaUI1Var', 445:'__vbaUbound', 446:'__vbaUdtVar', 447:'__vbaUnkVar', 448:'__vbaVar2Vec', 449:'__vbaVarAbs', 450:'__vbaVarAdd', 451:'__vbaVarAnd', 452:'__vbaVarCat', 453:'__vbaVarCmpEq', 454:'__vbaVarCmpGe', 455:'__vbaVarCmpGt', 456:'__vbaVarCmpLe', 457:'__vbaVarCmpLt', 458:'__vbaVarCmpNe', 459:'__vbaVarDateVar', 460:'__vbaVarDiv', 461:'__vbaVarEqv', 462:'__vbaVarErrI4', 463:'__vbaVarFix', 464:'__vbaVarForInit', 465:'__vbaVarForNext', 466:'__vbaVarIdiv', 467:'__vbaVarImp', 468:'__vbaVarIndexLoad', 469:'__vbaVarIndexLoadRef', 470:'__vbaVarIndexLoadRefLock', 471:'__vbaVarIndexStore', 472:'__vbaVarIndexStoreObj', 473:'__vbaVarInt', 474:'__vbaVarLike', 475:'__vbaVarLikeVar', 476:'__vbaVarMod', 477:'__vbaVarMul', 478:'__vbaVarNeg', 479:'__vbaVarNot', 480:'__vbaVarOr', 481:'__vbaVarPow', 482:'__vbaVarSetObj', 483:'__vbaVarSetObjAddref', 484:'__vbaVarSetUnk', 485:'__vbaVarSetUnkAddref', 486:'__vbaVarSetVar', 487:'__vbaVarSetVarAddref', 488:'__vbaVarSub', 489:'__vbaVarTextCmpEq', 490:'__vbaVarTextCmpGe', 491:'__vbaVarTextCmpGt', 492:'__vbaVarTextCmpLe', 493:'__vbaVarTextCmpLt', 494:'__vbaVarTextCmpNe', 495:'__vbaVarTextLike', 496:'__vbaVarTextLikeVar', 497:'__vbaVarTextTstEq', 498:'__vbaVarTextTstGe', 499:'__vbaVarTextTstGt', 500:'__vbaVarTextTstLe', 501:'__vbaVarTextTstLt', 502:'__vbaVarTextTstNe', 503:'__vbaVarTstEq', 504:'__vbaVarTstGe', 505:'__vbaVarTstGt', 506:'__vbaVarTstLe', 507:'__vbaVarTstLt', 508:'__vbaVarTstNe', 509:'__vbaVarXor', 510:'__vbaVargObj', 511:'__vbaVargObjAddref', 512:'rtcLeftBstr', 513:'rtcLeftVar', 514:'rtcRightBstr', 515:'rtcRightVar', 516:'rtcAnsiValueBstr', 517:'rtcLowerCaseBstr', 518:'rtcLowerCaseVar', 519:'rtcTrimBstr', 520:'rtcTrimVar', 521:'rtcLeftTrimBstr', 522:'rtcLeftTrimVar', 523:'rtcRightTrimBstr', 524:'rtcRightTrimVar', 525:'rtcSpaceBstr', 526:'rtcSpaceVar', 527:'rtcUpperCaseBstr', 528:'rtcUpperCaseVar', 529:'rtcKillFiles', 530:'rtcChangeDir', 531:'rtcMakeDir', 532:'rtcRemoveDir', 533:'rtcChangeDrive', 534:'rtcBeep', 535:'rtcGetTimer', 536:'rtcStrFromVar', 537:'rtcBstrFromAnsi', 538:'rtcPackDate', 539:'rtcPackTime', 540:'rtcGetDateValue', 541:'rtcGetTimeValue', 542:'rtcGetDayOfMonth', 543:'rtcGetHourOfDay', 544:'rtcGetMinuteOfHour', 545:'rtcGetMonthOfYear', 546:'rtcGetPresentDate', 547:'rtcGetSecondOfMinute', 548:'rtcSetDateVar', 549:'rtcSetDateBstr', 550:'rtcSetTimeVar', 551:'rtcSetTimeBstr', 552:'rtcGetDayOfWeek', 553:'rtcGetYear', 554:'rtcFileReset', 555:'rtcFileAttributes', 556:'rtcIsArray', 557:'rtcIsDate', 558:'rtcIsEmpty', 559:'rtcIsError', 560:'rtcIsNull', 561:'rtcIsNumeric', 562:'rtcIsObject', 563:'rtcVarType', 564:'rtDecFromVar', 565:'rtcFileWidth', 566:'rtcInputCount', 567:'rtcInputCountVar', 568:'rtcFileSeek', 569:'rtcFileLocation', 570:'rtcFileLength', 571:'rtcEndOfFile', 572:'rtcHexBstrFromVar', 573:'rtcHexVarFromVar', 574:'rtcOctBstrFromVar', 575:'rtcOctVarFromVar', 576:'rtcFileCopy', 577:'rtcFileDateTime', 578:'rtcFileLen', 579:'rtcGetFileAttr', 580:'rtcSetFileAttr', 581:'rtcR8ValFromBstr', 582:'rtcSin', 583:'rtcCos', 584:'rtcTan', 585:'rtcAtn', 586:'rtcExp', 587:'rtcLog', 588:'rtcRgb', 589:'rtcQBColor', 590:'rtcMacId', 591:'rtcTypeName', 592:'rtcIsMissing', 593:'rtcRandomNext', 594:'rtcRandomize', 595:'rtcMsgBox', 596:'rtcInputBox', 597:'rtcAppActivate', 598:'rtcDoEvents', 599:'rtcSendKeys', 600:'rtcShell', 601:'rtcArray', 602:'__vbaVargUnk', 603:'__vbaVargUnkAddref', 604:'__vbaVerifyVarObj', 605:'rtcGetErl', 606:'rtcStringBstr', 607:'rtcStringVar', 608:'rtcVarBstrFromAnsi', 609:'rtcGetDateBstr', 610:'rtcGetDateVar', 611:'rtcGetTimeBstr', 612:'rtcGetTimeVar', 613:'rtcVarStrFromVar', 614:'rtcSqr', 615:'rtcIMEStatus', 616:'rtcLeftCharBstr', 617:'rtcLeftCharVar', 618:'rtcRightCharBstr', 619:'rtcRightCharVar', 620:'rtcInputCharCount', 621:'rtcInputCharCountVar', 622:'rtcStrConvVar', 623:'__vbaWriteFile', 624:'rtcGetHostLCID', 625:'rtcCreateObject', 626:'rtcGetObject', 627:'rtcAppleScript', 628:'rtcMidBstr', 629:'rtcMidVar', 630:'rtcInStr', 631:'rtcMidCharBstr', 632:'rtcMidCharVar', 633:'rtcInStrChar', 634:'rtBstrFromErrVar', 635:'rtBoolFromErrVar', 636:'rtCyFromErrVar', 637:'rtI2FromErrVar', 638:'rtI4FromErrVar', 639:'rtR4FromErrVar', 640:'rtR8FromErrVar', 641:'rtcDateFromVar', 642:'rtcVarFromVar', 643:'rtcCVErrFromVar', 644:'VarPtr', 645:'rtcDir', 646:'rtcCurrentDirBstr', 647:'rtcCurrentDir', 648:'rtcFreeFile', 649:'rtcCompareBstr', 650:'rtcBstrFromFormatVar', 651:'rtcBstrFromError', 652:'rtcVarFromError', 653:'rtcLenCharVar', 654:'rtcLenVar', 655:'rtcFixVar', 656:'rtcAbsVar', 657:'rtcIntVar', 658:'rtcSgnVar', 659:'_adj_fdiv_m16i', 660:'rtcVarFromFormatVar', 661:'rtcDateAdd', 662:'rtcDateDiff', 663:'rtcDatePart', 664:'rtcPartition', 665:'rtcChoose', 666:'rtcEnvironVar', 667:'rtcEnvironBstr', 668:'rtcSwitch', 669:'rtcCommandBstr', 670:'rtcCommandVar', 671:'rtcSLN', 672:'rtcSYD', 673:'rtcDDB', 674:'rtcIPMT', 675:'rtcPPMT', 676:'rtcPMT', 677:'rtcPV', 678:'rtcFV', 679:'rtcNPer', 680:'rtcRate', 681:'rtcImmediateIf', 682:'rtcIRR', 683:'rtcMIRR', 684:'rtcNPV', 685:'rtcErrObj', 686:'rtUI1FromErrVar', 687:'rtcVarDateFromVar', 688:'_adj_fdiv_m32', 689:'rtcGetSetting', 690:'rtcSaveSetting', 691:'rtcDeleteSetting', 692:'rtcGetAllSettings', 693:'rtcByteValueBstr', 694:'rtcBstrFromByte', 695:'rtcVarBstrFromByte', 696:'rtcCharValueBstr', 697:'rtcBstrFromChar', 698:'rtcVarBstrFromChar', 699:'rtcSetCurrentCalendar', 700:'rtcGetCurrentCalendar', 701:'_adj_fdiv_m32i', 702:'rtcFormatNumber', 703:'rtcFormatCurrency', 704:'rtcFormatPercent', 705:'rtcFormatDateTime', 706:'rtcWeekdayName', 707:'rtcMonthName', 708:'rtcFilter', 709:'rtcInStrRev', 710:'rtcJoin', 711:'rtcSplit', 712:'rtcReplace', 713:'rtcStrReverse', 714:'rtcRound', 715:'rtcCallByName', 716:'rtcCreateObject2', 717:'rtcStrConvVar2', 718:'_adj_fdiv_m64', 719:'_adj_fdiv_r', 720:'_adj_fdivr_m16i', 721:'_adj_fdivr_m32', 722:'_adj_fdivr_m32i', 723:'_adj_fdivr_m64', 724:'_adj_fpatan', 725:'_adj_fprem', 726:'_adj_fprem1', 727:'_adj_fptan', 728:'_allmul', 999:'TipInvokeMethod2', 1016:'TipInvokeMethod', 1024:'IID_IVbaHost', 1025:'EbGetObjConnectionCounts', 2000:'CreateIExprSrvObj', 2010:'EbGetVBAObject', }
apache-2.0
kevinmel2000/sl4a
python-build/python-libs/xmpppy/xmpp/filetransfer.py
212
10157
## filetransfer.py ## ## Copyright (C) 2004 Alexey "Snake" Nezhdanov ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2, or (at your option) ## any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. # $Id: filetransfer.py,v 1.6 2004/12/25 20:06:59 snakeru Exp $ """ This module contains IBB class that is the simple implementation of JEP-0047. Note that this is just a transport for data. You have to negotiate data transfer before (via StreamInitiation most probably). Unfortunately SI is not implemented yet. """ from protocol import * from dispatcher import PlugIn import base64 class IBB(PlugIn): """ IBB used to transfer small-sized data chunk over estabilished xmpp connection. Data is split into small blocks (by default 3000 bytes each), encoded as base 64 and sent to another entity that compiles these blocks back into the data chunk. This is very inefficiend but should work under any circumstances. Note that using IBB normally should be the last resort. """ def __init__(self): """ Initialise internal variables. """ PlugIn.__init__(self) self.DBG_LINE='ibb' self._exported_methods=[self.OpenStream] self._streams={} self._ampnode=Node(NS_AMP+' amp',payload=[Node('rule',{'condition':'deliver-at','value':'stored','action':'error'}),Node('rule',{'condition':'match-resource','value':'exact','action':'error'})]) def plugin(self,owner): """ Register handlers for receiving incoming datastreams. Used internally. """ self._owner.RegisterHandlerOnce('iq',self.StreamOpenReplyHandler) # Move to StreamOpen and specify stanza id self._owner.RegisterHandler('iq',self.IqHandler,ns=NS_IBB) self._owner.RegisterHandler('message',self.ReceiveHandler,ns=NS_IBB) def IqHandler(self,conn,stanza): """ Handles streams state change. Used internally. """ typ=stanza.getType() self.DEBUG('IqHandler called typ->%s'%typ,'info') if typ=='set' and stanza.getTag('open',namespace=NS_IBB): self.StreamOpenHandler(conn,stanza) elif typ=='set' and stanza.getTag('close',namespace=NS_IBB): self.StreamCloseHandler(conn,stanza) elif typ=='result': self.StreamCommitHandler(conn,stanza) elif typ=='error': self.StreamOpenReplyHandler(conn,stanza) else: conn.send(Error(stanza,ERR_BAD_REQUEST)) raise NodeProcessed def StreamOpenHandler(self,conn,stanza): """ Handles opening of new incoming stream. Used internally. """ """ <iq type='set' from='romeo@montague.net/orchard' to='juliet@capulet.com/balcony' id='inband_1'> <open sid='mySID' block-size='4096' xmlns='http://jabber.org/protocol/ibb'/> </iq> """ err=None sid,blocksize=stanza.getTagAttr('open','sid'),stanza.getTagAttr('open','block-size') self.DEBUG('StreamOpenHandler called sid->%s blocksize->%s'%(sid,blocksize),'info') try: blocksize=int(blocksize) except: err=ERR_BAD_REQUEST if not sid or not blocksize: err=ERR_BAD_REQUEST elif sid in self._streams.keys(): err=ERR_UNEXPECTED_REQUEST if err: rep=Error(stanza,err) else: self.DEBUG("Opening stream: id %s, block-size %s"%(sid,blocksize),'info') rep=Protocol('iq',stanza.getFrom(),'result',stanza.getTo(),{'id':stanza.getID()}) self._streams[sid]={'direction':'<'+str(stanza.getFrom()),'block-size':blocksize,'fp':open('/tmp/xmpp_file_'+sid,'w'),'seq':0,'syn_id':stanza.getID()} conn.send(rep) def OpenStream(self,sid,to,fp,blocksize=3000): """ Start new stream. You should provide stream id 'sid', the endpoind jid 'to', the file object containing info for send 'fp'. Also the desired blocksize can be specified. Take into account that recommended stanza size is 4k and IBB uses base64 encoding that increases size of data by 1/3.""" if sid in self._streams.keys(): return if not JID(to).getResource(): return self._streams[sid]={'direction':'|>'+to,'block-size':blocksize,'fp':fp,'seq':0} self._owner.RegisterCycleHandler(self.SendHandler) syn=Protocol('iq',to,'set',payload=[Node(NS_IBB+' open',{'sid':sid,'block-size':blocksize})]) self._owner.send(syn) self._streams[sid]['syn_id']=syn.getID() return self._streams[sid] def SendHandler(self,conn): """ Send next portion of data if it is time to do it. Used internally. """ self.DEBUG('SendHandler called','info') for sid in self._streams.keys(): stream=self._streams[sid] if stream['direction'][:2]=='|>': cont=1 elif stream['direction'][0]=='>': chunk=stream['fp'].read(stream['block-size']) if chunk: datanode=Node(NS_IBB+' data',{'sid':sid,'seq':stream['seq']},base64.encodestring(chunk)) stream['seq']+=1 if stream['seq']==65536: stream['seq']=0 conn.send(Protocol('message',stream['direction'][1:],payload=[datanode,self._ampnode])) else: """ notify the other side about stream closing notify the local user about sucessfull send delete the local stream""" conn.send(Protocol('iq',stream['direction'][1:],'set',payload=[Node(NS_IBB+' close',{'sid':sid})])) conn.Event(self.DBG_LINE,'SUCCESSFULL SEND',stream) del self._streams[sid] self._owner.UnregisterCycleHandler(self.SendHandler) """ <message from='romeo@montague.net/orchard' to='juliet@capulet.com/balcony' id='msg1'> <data xmlns='http://jabber.org/protocol/ibb' sid='mySID' seq='0'> qANQR1DBwU4DX7jmYZnncmUQB/9KuKBddzQH+tZ1ZywKK0yHKnq57kWq+RFtQdCJ WpdWpR0uQsuJe7+vh3NWn59/gTc5MDlX8dS9p0ovStmNcyLhxVgmqS8ZKhsblVeu IpQ0JgavABqibJolc3BKrVtVV1igKiX/N7Pi8RtY1K18toaMDhdEfhBRzO/XB0+P AQhYlRjNacGcslkhXqNjK5Va4tuOAPy2n1Q8UUrHbUd0g+xJ9Bm0G0LZXyvCWyKH kuNEHFQiLuCY6Iv0myq6iX6tjuHehZlFSh80b5BVV9tNLwNR5Eqz1klxMhoghJOA </data> <amp xmlns='http://jabber.org/protocol/amp'> <rule condition='deliver-at' value='stored' action='error'/> <rule condition='match-resource' value='exact' action='error'/> </amp> </message> """ def ReceiveHandler(self,conn,stanza): """ Receive next portion of incoming datastream and store it write it to temporary file. Used internally. """ sid,seq,data=stanza.getTagAttr('data','sid'),stanza.getTagAttr('data','seq'),stanza.getTagData('data') self.DEBUG('ReceiveHandler called sid->%s seq->%s'%(sid,seq),'info') try: seq=int(seq); data=base64.decodestring(data) except: seq=''; data='' err=None if not sid in self._streams.keys(): err=ERR_ITEM_NOT_FOUND else: stream=self._streams[sid] if not data: err=ERR_BAD_REQUEST elif seq<>stream['seq']: err=ERR_UNEXPECTED_REQUEST else: self.DEBUG('Successfull receive sid->%s %s+%s bytes'%(sid,stream['fp'].tell(),len(data)),'ok') stream['seq']+=1 stream['fp'].write(data) if err: self.DEBUG('Error on receive: %s'%err,'error') conn.send(Error(Iq(to=stanza.getFrom(),frm=stanza.getTo(),payload=[Node(NS_IBB+' close')]),err,reply=0)) def StreamCloseHandler(self,conn,stanza): """ Handle stream closure due to all data transmitted. Raise xmpppy event specifying successfull data receive. """ sid=stanza.getTagAttr('close','sid') self.DEBUG('StreamCloseHandler called sid->%s'%sid,'info') if sid in self._streams.keys(): conn.send(stanza.buildReply('result')) conn.Event(self.DBG_LINE,'SUCCESSFULL RECEIVE',self._streams[sid]) del self._streams[sid] else: conn.send(Error(stanza,ERR_ITEM_NOT_FOUND)) def StreamBrokenHandler(self,conn,stanza): """ Handle stream closure due to all some error while receiving data. Raise xmpppy event specifying unsuccessfull data receive. """ syn_id=stanza.getID() self.DEBUG('StreamBrokenHandler called syn_id->%s'%syn_id,'info') for sid in self._streams.keys(): stream=self._streams[sid] if stream['syn_id']==syn_id: if stream['direction'][0]=='<': conn.Event(self.DBG_LINE,'ERROR ON RECEIVE',stream) else: conn.Event(self.DBG_LINE,'ERROR ON SEND',stream) del self._streams[sid] def StreamOpenReplyHandler(self,conn,stanza): """ Handle remote side reply about is it agree or not to receive our datastream. Used internally. Raises xmpppy event specfiying if the data transfer is agreed upon.""" syn_id=stanza.getID() self.DEBUG('StreamOpenReplyHandler called syn_id->%s'%syn_id,'info') for sid in self._streams.keys(): stream=self._streams[sid] if stream['syn_id']==syn_id: if stanza.getType()=='error': if stream['direction'][0]=='<': conn.Event(self.DBG_LINE,'ERROR ON RECEIVE',stream) else: conn.Event(self.DBG_LINE,'ERROR ON SEND',stream) del self._streams[sid] elif stanza.getType()=='result': if stream['direction'][0]=='|': stream['direction']=stream['direction'][1:] conn.Event(self.DBG_LINE,'STREAM COMMITTED',stream) else: conn.send(Error(stanza,ERR_UNEXPECTED_REQUEST))
apache-2.0
OpenCobolIDE/OpenCobolIDE
open_cobol_ide/extlibs/pyqode/core/_designer_plugins/__init__.py
7
1408
""" This packages contains the various qt designer plugins """ from pyqode.qt import QtDesigner class WidgetPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin): """ Base class for writing a designer plugins. To write a plugin, inherit from this class and define implement at least: - klass() - objectName() """ def __init__(self, parent=None): super(WidgetPlugin, self).__init__(parent=parent) self.initialized = False print(self.name(), self.includeFile(), self.objectName()) def klass(self): """ Returns the classname of the widget """ raise NotImplementedError() def initialize(self, form_editor): self.initialized = True def isInitialized(self): return self.initialized def isContainer(self): return False def icon(self): return None def domXml(self): return ('<widget class="%s" name="%s">\n</widget>\n' % (self.name(), self.objectName())) def group(self): return 'pyQode' def objectName(self): return self.name() def includeFile(self): return self.klass().__module__ def name(self): return self.klass().__name__ def toolTip(self): return '' def whatsThis(self): return '' def createWidget(self, parent): return self.klass()(parent)
gpl-3.0
cpollard1001/FreeCAD_sf_master
src/Mod/Arch/ArchFrame.py
14
8636
#*************************************************************************** #* * #* Copyright (c) 2013 * #* Yorik van Havre <yorik@uncreated.net> * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Lesser General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* This program is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with this program; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #*************************************************************************** import FreeCAD,Draft,ArchComponent,DraftVecUtils,ArchCommands from FreeCAD import Vector if FreeCAD.GuiUp: import FreeCADGui from PySide import QtCore, QtGui from DraftTools import translate else: def translate(ctxt,txt): return txt __title__="FreeCAD Arch Frame" __author__ = "Yorik van Havre" __url__ = "http://www.freecadweb.org" # Possible roles for frames Roles = ['Covering','Member','Railing','Shading Device','Tendon'] def makeFrame(baseobj,profile,name=translate("Arch","Frame")): """makeFrame(baseobj,profile,[name]): creates a frame object from a base sketch (or any other object containing wires) and a profile object (an extrudable 2D object containing faces or closed wires)""" obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name) obj.Label = translate("Arch",name) _Frame(obj) if FreeCAD.GuiUp: _ViewProviderFrame(obj.ViewObject) if baseobj: obj.Base = baseobj if profile: obj.Profile = profile if FreeCAD.GuiUp: profile.ViewObject.hide() return obj class _CommandFrame: "the Arch Frame command definition" def GetResources(self): return {'Pixmap' : 'Arch_Frame', 'MenuText': QtCore.QT_TRANSLATE_NOOP("Arch_Frame","Frame"), 'Accel': "F, R", 'ToolTip': QtCore.QT_TRANSLATE_NOOP("Arch_Frame","Creates a frame object from a planar 2D object and a profile")} def IsActive(self): return not FreeCAD.ActiveDocument is None def Activated(self): s = FreeCADGui.Selection.getSelection() if len(s) == 2: FreeCAD.ActiveDocument.openTransaction(translate("Arch","Create Frame")) FreeCADGui.addModule("Arch") FreeCADGui.doCommand("Arch.makeFrame(FreeCAD.ActiveDocument."+s[0].Name+",FreeCAD.ActiveDocument."+s[1].Name+")") FreeCAD.ActiveDocument.commitTransaction() FreeCAD.ActiveDocument.recompute() class _Frame(ArchComponent.Component): "A parametric frame object" def __init__(self,obj): ArchComponent.Component.__init__(self,obj) obj.addProperty("App::PropertyLink","Profile","Arch","The profile used to build this frame") obj.addProperty("App::PropertyBool","Align","Arch","Specifies if the profile must be aligned with the extrusion wires") obj.addProperty("App::PropertyVectorDistance","Offset","Arch","An offset vector between the base sketch and the frame") obj.addProperty("App::PropertyInteger","BasePoint","Arch","Crossing point of the path on the profile.") obj.addProperty("App::PropertyAngle","Rotation","Arch","The rotation of the profile around its extrusion axis") self.Type = "Frame" obj.Role = Roles def execute(self,obj): if self.clone(obj): return if not obj.Base: return if not obj.Base.Shape: return if not obj.Base.Shape.Wires: return pl = obj.Placement if obj.Base.Shape.Solids: obj.Shape = obj.Base.Shape.copy() if not pl.isNull(): obj.Placement = obj.Shape.Placement.multiply(pl) else: if not obj.Profile: return if not obj.Profile.isDerivedFrom("Part::Part2DObject"): return if not obj.Profile.Shape: return if not obj.Profile.Shape.Wires: return if not obj.Profile.Shape.Faces: for w in obj.Profile.Shape.Wires: if not w.isClosed(): return import DraftGeomUtils, Part, math baseprofile = obj.Profile.Shape.copy() if not baseprofile.Faces: f = [] for w in baseprofile.Wires: f.append(Part.Face(w)) if len(f) == 1: baseprofile = f[0] else: baseprofile = Part.makeCompound(f) shapes = [] normal = DraftGeomUtils.getNormal(obj.Base.Shape) #for wire in obj.Base.Shape.Wires: for e in obj.Base.Shape.Edges: #e = wire.Edges[0] bvec = DraftGeomUtils.vec(e) bpoint = e.Vertexes[0].Point profile = baseprofile.copy() #basepoint = profile.Placement.Base if hasattr(obj,"BasePoint"): edges = Part.__sortEdges__(profile.Edges) basepointliste = [profile.CenterOfMass] for edge in edges: basepointliste.append(DraftGeomUtils.findMidpoint(edge)) basepointliste.append(edge.Vertexes[-1].Point) try: basepoint = basepointliste[obj.BasePoint] except IndexError: FreeCAD.Console.PrintMessage(translate("Arch","Crossing point not found in profile.\n")) basepoint = basepointliste[0] else : basepoint = profile.CenterOfMass profile.translate(bpoint.sub(basepoint)) if obj.Align: axis = profile.Placement.Rotation.multVec(FreeCAD.Vector(0,0,1)) angle = bvec.getAngle(axis) if round(angle,Draft.precision()) != 0: if round(angle,Draft.precision()) != round(math.pi,Draft.precision()): rotaxis = axis.cross(bvec) profile.rotate(DraftVecUtils.tup(bpoint), DraftVecUtils.tup(rotaxis), math.degrees(angle)) if obj.Rotation: profile.rotate(DraftVecUtils.tup(bpoint), DraftVecUtils.tup(FreeCAD.Vector(bvec).normalize()), obj.Rotation) #profile = wire.makePipeShell([profile],True,False,2) TODO buggy profile = profile.extrude(bvec) if obj.Offset: if not DraftVecUtils.isNull(obj.Offset): profile.translate(obj.Offset) shapes.append(profile) if shapes: obj.Shape = Part.makeCompound(shapes) obj.Placement = pl class _ViewProviderFrame(ArchComponent.ViewProviderComponent): "A View Provider for the Frame object" def __init__(self,vobj): ArchComponent.ViewProviderComponent.__init__(self,vobj) def getIcon(self): import Arch_rc return ":/icons/Arch_Frame_Tree.svg" def claimChildren(self): p = [] if hasattr(self,"Object"): if self.Object.Profile: p = [self.Object.Profile] return ArchComponent.ViewProviderComponent.claimChildren(self)+p if FreeCAD.GuiUp: FreeCADGui.addCommand('Arch_Frame',_CommandFrame())
lgpl-2.1
dataxu/ansible
lib/ansible/module_utils/facts/hardware/dragonfly.py
232
1090
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.facts.hardware.base import HardwareCollector from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardware class DragonFlyHardwareCollector(HardwareCollector): # Note: This uses the freebsd fact class, there is no dragonfly hardware fact class _fact_class = FreeBSDHardware _platform = 'DragonFly'
gpl-3.0
ManageIQ/integration_tests
cfme/tests/cloud/test_cloud_timelines.py
2
15406
import fauxfactory import pytest from wrapanapi.exceptions import NotFoundError from cfme import test_requirements from cfme.base.ui import ServerDiagnosticsView from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.control.explorer.policies import VMControlPolicy from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.blockers import GH from cfme.utils.log import logger from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.tier(2), # Only one prov out of the 2 is taken, if not supplying --use-provider=complete pytest.mark.provider([AzureProvider, EC2Provider], required_flags=['timelines', 'events']), pytest.mark.usefixtures('setup_provider', 'provider'), pytest.mark.meta(blockers=[ GH("ManageIQ/manageiq-providers-amazon:620", unblock=lambda provider: not provider.one_of(EC2Provider)) ]), test_requirements.timelines, test_requirements.events, ] @pytest.fixture(scope="function") def mark_vm_as_appliance(create_vm, appliance): # set diagnostics vm relations_view = navigate_to(create_vm, 'EditManagementEngineRelationship', wait_for_view=0) relations_view.form.server.select_by_visible_text( "{name} ({sid})".format( name=appliance.server.name, sid=appliance.server.sid ) ) relations_view.form.save_button.click() @pytest.fixture(scope='function') def control_policy(appliance, create_vm): action = appliance.collections.actions.create(fauxfactory.gen_alpha(), "Tag", dict(tag=("My Company Tags", "Environment", "Development"))) policy = appliance.collections.policies.create(VMControlPolicy, fauxfactory.gen_alpha()) policy.assign_events("VM Power Off") policy.assign_actions_to_event("VM Power Off", action) profile = appliance.collections.policy_profiles.create(fauxfactory.gen_alpha(), policies=[policy]) yield create_vm.assign_policy_profiles(profile.description) for obj in [profile, policy, action]: if obj.exists: obj.delete() @pytest.fixture(scope='function') def azone(create_vm, appliance): zone_id = create_vm.rest_api_entity.availability_zone_id rest_zones = create_vm.appliance.rest_api.collections.availability_zones zone_name = next(zone.name for zone in rest_zones if zone.id == zone_id) inst_zone = appliance.collections.cloud_av_zones.instantiate(name=zone_name, provider=create_vm.provider) return inst_zone class InstEvent: ACTIONS = { 'create': { 'tl_event': ('AWS_EC2_Instance_CREATE', 'virtualMachines_write_EndRequest'), 'tl_category': 'Creation/Addition', 'db_event_type': ('AWS_EC2_Instance_CREATE', 'virtualMachines_write_EndRequest'), 'emit_cmd': '_create_vm' }, 'start': { 'tl_event': ( 'AWS_API_CALL_StartInstances', 'AWS_EC2_Instance_running', 'virtualMachines_start_EndRequest' ), 'tl_category': 'Power Activity', 'db_event_type': ( 'AWS_EC2_Instance_running', 'virtualMachines_start_EndRequest'), 'emit_cmd': '_power_on' }, 'stop': { 'tl_event': ( 'AWS_API_CALL_StopInstances', 'AWS_EC2_Instance_stopped', 'virtualMachines_deallocate_EndRequest' ), 'tl_category': 'Power Activity', 'db_event_type': ('AWS_EC2_Instance_stopped', 'virtualMachines_deallocate_EndRequest'), 'emit_cmd': '_power_off' }, 'rename': { 'tl_event': 'AWS_EC2_Instance_CREATE', 'tl_category': 'Creation/Addition', 'db_event_type': 'AWS_EC2_Instance_CREATE', 'emit_cmd': '_rename_vm' }, 'delete': { 'tl_event': ( 'virtualMachines_delete_EndRequest', 'AWS_EC2_Instance_DELETE', 'AWS_API_CALL_TerminateInstances', ), 'tl_category': 'Deletion/Removal', 'db_event_type': ( 'virtualMachines_delete_EndRequest', 'AWS_API_CALL_TerminateInstances' ), 'emit_cmd': '_delete_vm' }, 'policy': { 'tl_event': ('vm_poweroff',), 'tl_category': 'VM Operation', 'emit_cmd': '_power_off' }, } def __init__(self, inst, event): self.inst = inst self.event = event self.__dict__.update(self.ACTIONS[self.event]) def emit(self): try: emit_action = getattr(self, self.emit_cmd) emit_action() except AttributeError: raise AttributeError('{} is not a valid key in ACTION. self: {}'.format(self.event, self.__dict__)) def _create_vm(self): if not self.inst.exists_on_provider: self.inst.create_on_provider(allow_skip="default", find_in_cfme=True) else: logger.info('%r already exists on provider', self.inst.name) def _power_on(self): return self.inst.mgmt.start() def _power_off(self): return self.inst.mgmt.stop() def _power_off_power_on(self): self.inst.mgmt.stop() return self.inst.mgmt.start() def _restart(self): return self.inst.mgmt.restart() def _rename_vm(self): logger.info('%r will be renamed', self.inst.name) new_name = f"{self.inst.name}-renamed" self.inst.mgmt.rename(new_name) self.inst.name = new_name self.inst.mgmt.restart() self.inst.provider.refresh_provider_relationships() self.inst.wait_to_appear() return self.inst.name def _delete_vm(self): try: logger.info("attempting to delete vm %s", self.inst.name) self.inst.mgmt.cleanup() except NotFoundError: logger.info("can't delete vm %r, does not exist", self.inst.name) pass def _check_timelines(self, target, policy_events): """Verify that the event is present in the timeline Args: target: A entity where a Timeline is present (Instance, Availability zone, Provider...) policy_events: switch between the management event timeline and the policy timeline. Returns: The length of the array containing the event found on the Timeline of the target. """ def _get_timeline_events(target, policy_events): """Navigate to the timeline of the target and select the management timeline or the policy timeline. Returns an array of the found events. """ timelines_view = navigate_to(target, 'Timelines', wait_for_view=20, force=True) if isinstance(timelines_view, ServerDiagnosticsView): timelines_view = timelines_view.timelines timeline_filter = timelines_view.filter if policy_events: logger.info('Will search in Policy event timelines') timelines_view.filter.event_type.select_by_visible_text('Policy Events') timeline_filter.policy_event_category.select_by_visible_text(self.tl_category) timeline_filter.policy_event_status.fill('Both') else: if timelines_view.browser.product_version < "5.10": timeline_filter.detailed_events.fill(True) for selected_option in timeline_filter.event_category.all_selected_options: timeline_filter.event_category.select_by_visible_text(selected_option) timeline_filter.event_category.select_by_visible_text(self.tl_category) timeline_filter.time_position.select_by_visible_text('centered') timeline_filter.apply.click() logger.info('Searching for event type: %r in timeline category: %r', self.event, self.tl_category) return timelines_view.chart.get_events(self.tl_category) events_list = _get_timeline_events(target, policy_events) logger.debug('events_list: %r', str(events_list)) if not events_list: self.inst.provider.refresh_provider_relationships() logger.warning('Event list of %r is empty!', target) found_events = [] for evt in events_list: try: if not policy_events: if evt.source_instance in self.inst.name and evt.event_type in self.tl_event: found_events.append(evt) break else: if evt.event_type in self.tl_event and evt.target in self.inst.name: found_events.append(evt) break except AttributeError as err: logger.warning('Issue with TimelinesEvent: %r .Faulty event: %r', str(err), str(evt)) continue logger.info('found events on %r: %s', target, "\n".join([repr(e) for e in found_events])) return len(found_events) def catch_in_timelines(self, soft_assert, targets, policy_events=False): for target in targets: try: wait_for(self._check_timelines, [target, policy_events], timeout='15m', fail_condition=0) except TimedOutError: soft_assert(False, '0 occurrence of {evt} found on the timeline of {tgt}'.format( evt=self.event, tgt=target)) @pytest.mark.parametrize('create_vm', ['small_template'], indirect=True) def test_cloud_timeline_create_event(create_vm, soft_assert, azone): """ Metadata: test_flag: timelines, events Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Events """ if BZ(1670550).blocks: targets = (create_vm, ) else: targets = (create_vm, create_vm.provider, azone) event = 'create' inst_event = InstEvent(create_vm, event) logger.info('Will generate event %r on machine %r', event, create_vm.name) wait_for(inst_event.emit, timeout='9m', message=f'Event {event} did timeout') inst_event.catch_in_timelines(soft_assert, targets) @pytest.mark.parametrize('create_vm', ['small_template'], indirect=True) def test_cloud_timeline_policy_event(create_vm, control_policy, soft_assert): """ Metadata: test_flag: timelines, events Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Events """ event = 'policy' # accordions on azone and provider's page are not displayed in 5.10 if BZ(1670550).blocks: targets = (create_vm, ) else: targets = (create_vm, create_vm.provider) inst_event = InstEvent(create_vm, event) logger.info('Will generate event %r on machine %r', event, create_vm.name) wait_for(inst_event.emit, timeout='9m', message=f'Event {event} did timeout') inst_event.catch_in_timelines(soft_assert, targets, policy_events=True) @pytest.mark.parametrize('create_vm', ['small_template'], indirect=True) def test_cloud_timeline_stop_event(create_vm, soft_assert, azone): """ Metadata: test_flag: timelines, events Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Events """ # accordions on azone and provider's page are not displayed in 5.10 if BZ(1670550).blocks: targets = (create_vm, ) else: targets = (create_vm, create_vm.provider, azone) event = 'stop' inst_event = InstEvent(create_vm, event) logger.info('Will generate event %r on machine %r', event, create_vm.name) wait_for(inst_event.emit, timeout='7m', message=f'Event {event} did timeout') inst_event.catch_in_timelines(soft_assert, targets) @pytest.mark.parametrize('create_vm', ['small_template'], indirect=True) def test_cloud_timeline_start_event(create_vm, soft_assert, azone): """ Metadata: test_flag: timelines, events Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Events """ # accordions on azone and provider's page are not displayed in 5.10 if BZ(1670550).blocks: targets = (create_vm, ) else: targets = (create_vm, create_vm.provider, azone) event = 'start' inst_event = InstEvent(create_vm, 'start') logger.info('Will generate event %r on machine %r', event, create_vm.name) wait_for(inst_event.emit, timeout='7m', message=f'Event {event} did timeout') inst_event.catch_in_timelines(soft_assert, targets) @pytest.mark.parametrize('create_vm', ['small_template'], indirect=True) def test_cloud_timeline_diagnostic(create_vm, mark_vm_as_appliance, soft_assert): """Check Configuration/diagnostic/timelines. Metadata: test_flag: timelines, events Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Events """ event = 'create' targets = (create_vm.appliance.server,) inst_event = InstEvent(create_vm, event) logger.info('Will generate event %r on machine %r', event, create_vm.name) inst_event.catch_in_timelines(soft_assert, targets) @pytest.mark.provider([EC2Provider], scope='function') @pytest.mark.parametrize('create_vm', ['small_template'], indirect=True) def test_cloud_timeline_rename_event(create_vm, soft_assert, azone): """ Metadata: test_flag: timelines, events Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Events """ event = 'rename' # accordions on azone and provider's page are not displayed in 5.10 if BZ(1670550).blocks: targets = (create_vm, ) else: targets = (create_vm, create_vm.provider, azone) inst_event = InstEvent(create_vm, event) logger.info('Will generate event %r on machine %r', event, create_vm.name) wait_for(inst_event.emit, timeout='12m', message=f'Event {event} did timeout') inst_event.catch_in_timelines(soft_assert, targets) @pytest.mark.meta(automates=[1730819]) @pytest.mark.parametrize('create_vm', ['small_template'], indirect=True) def test_cloud_timeline_delete_event(create_vm, soft_assert, azone): """ Metadata: test_flag: timelines, events Bugzilla: 1730819 Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Events """ event = 'delete' # accordions on azone and provider's page are not displayed in 5.10 if BZ(1670550).blocks: targets = (create_vm, ) else: targets = (create_vm, create_vm.provider, azone) inst_event = InstEvent(create_vm, event) logger.info('Will generate event %r on machine %r', event, create_vm.name) wait_for(inst_event.emit, timeout='9m', message=f'Event {event} did timeout') inst_event.catch_in_timelines(soft_assert, targets)
gpl-2.0
lexyan/SickBeard
lib/hachoir_parser/misc/ole2.py
90
14227
""" Microsoft Office documents parser. Informations: * wordole.c of AntiWord program (v0.35) Copyright (C) 1998-2003 A.J. van Os Released under GNU GPL http://www.winfield.demon.nl/ * File gsf-infile-msole.c of libgsf library (v1.14.0) Copyright (C) 2002-2004 Jody Goldberg (jody@gnome.org) Released under GNU LGPL 2.1 http://freshmeat.net/projects/libgsf/ * PDF from AAF Association Copyright (C) 2004 AAF Association Copyright (C) 1991-2003 Microsoft Corporation http://www.aafassociation.org/html/specs/aafcontainerspec-v1.0.1.pdf Author: Victor Stinner Creation: 2006-04-23 """ from lib.hachoir_parser import HachoirParser from lib.hachoir_core.field import ( FieldSet, ParserError, SeekableFieldSet, RootSeekableFieldSet, UInt8, UInt16, UInt32, UInt64, TimestampWin64, Enum, Bytes, RawBytes, NullBytes, String) from lib.hachoir_core.text_handler import filesizeHandler from lib.hachoir_core.endian import LITTLE_ENDIAN from lib.hachoir_parser.common.win32 import GUID from lib.hachoir_parser.misc.msoffice import CustomFragment, OfficeRootEntry, PROPERTY_NAME from lib.hachoir_parser.misc.word_doc import WordDocumentParser from lib.hachoir_parser.misc.msoffice_summary import SummaryParser MIN_BIG_BLOCK_LOG2 = 6 # 512 bytes MAX_BIG_BLOCK_LOG2 = 14 # 64 kB # Number of items in DIFAT NB_DIFAT = 109 class SECT(UInt32): UNUSED = 0xFFFFFFFF # -1 END_OF_CHAIN = 0xFFFFFFFE # -2 BFAT_SECTOR = 0xFFFFFFFD # -3 DIFAT_SECTOR = 0xFFFFFFFC # -4 SPECIALS = set((END_OF_CHAIN, UNUSED, BFAT_SECTOR, DIFAT_SECTOR)) special_value_name = { UNUSED: "unused", END_OF_CHAIN: "end of a chain", BFAT_SECTOR: "BFAT sector (in a FAT)", DIFAT_SECTOR: "DIFAT sector (in a FAT)", } def __init__(self, parent, name, description=None): UInt32.__init__(self, parent, name, description) def createDisplay(self): val = self.value return SECT.special_value_name.get(val, str(val)) class Property(FieldSet): TYPE_ROOT = 5 TYPE_NAME = { 1: "storage", 2: "stream", 3: "ILockBytes", 4: "IPropertyStorage", 5: "root" } DECORATOR_NAME = { 0: "red", 1: "black", } static_size = 128 * 8 def createFields(self): bytes = self.stream.readBytes(self.absolute_address, 4) if bytes == "\0R\0\0": charset = "UTF-16-BE" else: charset = "UTF-16-LE" yield String(self, "name", 64, charset=charset, truncate="\0") yield UInt16(self, "namelen", "Length of the name") yield Enum(UInt8(self, "type", "Property type"), self.TYPE_NAME) yield Enum(UInt8(self, "decorator", "Decorator"), self.DECORATOR_NAME) yield SECT(self, "left") yield SECT(self, "right") yield SECT(self, "child", "Child node (valid for storage and root types)") yield GUID(self, "clsid", "CLSID of this storage (valid for storage and root types)") yield NullBytes(self, "flags", 4, "User flags") yield TimestampWin64(self, "creation", "Creation timestamp(valid for storage and root types)") yield TimestampWin64(self, "lastmod", "Modify timestamp (valid for storage and root types)") yield SECT(self, "start", "Starting SECT of the stream (valid for stream and root types)") if self["/header/bb_shift"].value == 9: yield filesizeHandler(UInt32(self, "size", "Size in bytes (valid for stream and root types)")) yield NullBytes(self, "padding", 4) else: yield filesizeHandler(UInt64(self, "size", "Size in bytes (valid for stream and root types)")) def createDescription(self): name = self["name"].display size = self["size"].display return "Property: %s (%s)" % (name, size) class DIFat(SeekableFieldSet): def __init__(self, parent, name, db_start, db_count, description=None): SeekableFieldSet.__init__(self, parent, name, description) self.start=db_start self.count=db_count def createFields(self): for index in xrange(NB_DIFAT): yield SECT(self, "index[%u]" % index) for index in xrange(self.count): # this is relative to real DIFAT start self.seekBit(NB_DIFAT * SECT.static_size+self.parent.sector_size*(self.start+index)) for sect_index in xrange(NB_DIFAT*(index+1),NB_DIFAT*(index+2)): yield SECT(self, "index[%u]" % sect_index) class Header(FieldSet): static_size = 68 * 8 def createFields(self): yield GUID(self, "clsid", "16 bytes GUID used by some apps") yield UInt16(self, "ver_min", "Minor version") yield UInt16(self, "ver_maj", "Minor version") yield Bytes(self, "endian", 2, "Endian (0xFFFE for Intel)") yield UInt16(self, "bb_shift", "Log, base 2, of the big block size") yield UInt16(self, "sb_shift", "Log, base 2, of the small block size") yield NullBytes(self, "reserved[]", 6, "(reserved)") yield UInt32(self, "csectdir", "Number of SECTs in directory chain for 4 KB sectors (version 4)") yield UInt32(self, "bb_count", "Number of Big Block Depot blocks") yield SECT(self, "bb_start", "Root start block") yield NullBytes(self, "transaction", 4, "Signature used for transactions (must be zero)") yield UInt32(self, "threshold", "Maximum size for a mini stream (typically 4096 bytes)") yield SECT(self, "sb_start", "Small Block Depot start block") yield UInt32(self, "sb_count") yield SECT(self, "db_start", "First block of DIFAT") yield UInt32(self, "db_count", "Number of SECTs in DIFAT") # Header (ole_id, header, difat) size in bytes HEADER_SIZE = 64 + Header.static_size + NB_DIFAT * SECT.static_size class SectFat(FieldSet): def __init__(self, parent, name, start, count, description=None): FieldSet.__init__(self, parent, name, description, size=count*32) self.count = count self.start = start def createFields(self): for i in xrange(self.start, self.start + self.count): yield SECT(self, "index[%u]" % i) class OLE2_File(HachoirParser, RootSeekableFieldSet): PARSER_TAGS = { "id": "ole2", "category": "misc", "file_ext": ( "doc", "dot", # Microsoft Word "ppt", "ppz", "pps", "pot", # Microsoft Powerpoint "xls", "xla", # Microsoft Excel "msi", # Windows installer ), "mime": ( u"application/msword", u"application/msexcel", u"application/mspowerpoint", ), "min_size": 512*8, "description": "Microsoft Office document", "magic": (("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", 0),), } endian = LITTLE_ENDIAN def __init__(self, stream, **args): RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) HachoirParser.__init__(self, stream, **args) def validate(self): if self["ole_id"].value != "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1": return "Invalid magic" if self["header/ver_maj"].value not in (3, 4): return "Unknown major version (%s)" % self["header/ver_maj"].value if self["header/endian"].value not in ("\xFF\xFE", "\xFE\xFF"): return "Unknown endian (%s)" % self["header/endian"].raw_display if not(MIN_BIG_BLOCK_LOG2 <= self["header/bb_shift"].value <= MAX_BIG_BLOCK_LOG2): return "Invalid (log 2 of) big block size (%s)" % self["header/bb_shift"].value if self["header/bb_shift"].value < self["header/sb_shift"].value: return "Small block size (log2=%s) is bigger than big block size (log2=%s)!" \ % (self["header/sb_shift"].value, self["header/bb_shift"].value) return True def createFields(self): # Signature yield Bytes(self, "ole_id", 8, "OLE object signature") header = Header(self, "header") yield header # Configure values self.sector_size = (8 << header["bb_shift"].value) self.fat_count = header["bb_count"].value self.items_per_bbfat = self.sector_size / SECT.static_size self.ss_size = (8 << header["sb_shift"].value) self.items_per_ssfat = self.items_per_bbfat # Read DIFAT (one level of indirection) yield DIFat(self, "difat", header["db_start"].value, header["db_count"].value, "Double Indirection FAT") # Read FAT (one level of indirection) for field in self.readBFAT(): yield field # Read SFAT for field in self.readSFAT(): yield field # Read properties chain = self.getChain(self["header/bb_start"].value) prop_per_sector = self.sector_size // Property.static_size self.properties = [] for block in chain: self.seekBlock(block) for index in xrange(prop_per_sector): property = Property(self, "property[]") yield property self.properties.append(property) # Parse first property for index, property in enumerate(self.properties): if index == 0: name = "root" else: try: name = PROPERTY_NAME[property["name"].value] except LookupError: name = property.name+"content" for field in self.parseProperty(property, name): yield field def parseProperty(self, property, name_prefix): if not property["size"].value: return if property.name != "property[0]" \ and (property["size"].value < self["header/threshold"].value): # Field is stored in the ministream, skip it return name = "%s[]" % name_prefix first = None previous = None size = 0 fragment_group = None chain = self.getChain(property["start"].value) while True: try: block = chain.next() contiguous = False if not first: first = block contiguous = True if previous and block == (previous+1): contiguous = True if contiguous: previous = block size += self.sector_size continue except StopIteration: block = None if first is None: break self.seekBlock(first) desc = "Big blocks %s..%s (%s)" % (first, previous, previous-first+1) desc += " of %s bytes" % (self.sector_size // 8) if name_prefix in set(("root", "summary", "doc_summary", "word_doc")): if name_prefix == "root": parser = OfficeRootEntry elif name_prefix == "word_doc": parser = WordDocumentParser else: parser = SummaryParser field = CustomFragment(self, name, size, parser, desc, fragment_group) yield field if not fragment_group: fragment_group = field.group else: yield RawBytes(self, name, size//8, desc) if block is None: break first = block previous = block size = self.sector_size def getChain(self, start, use_sfat=False): if use_sfat: fat = self.ss_fat items_per_fat = self.items_per_ssfat err_prefix = "SFAT chain" else: fat = self.bb_fat items_per_fat = self.items_per_bbfat err_prefix = "BFAT chain" block = start block_set = set() previous = block while block != SECT.END_OF_CHAIN: if block in SECT.SPECIALS: raise ParserError("%s: Invalid block index (0x%08x), previous=%s" % (err_prefix, block, previous)) if block in block_set: raise ParserError("%s: Found a loop (%s=>%s)" % (err_prefix, previous, block)) block_set.add(block) yield block previous = block index = block // items_per_fat try: block = fat[index]["index[%u]" % block].value except LookupError: break def readBFAT(self): self.bb_fat = [] start = 0 count = self.items_per_bbfat for index, block in enumerate(self.array("difat/index")): block = block.value if block == SECT.UNUSED: break desc = "FAT %u/%u at block %u" % \ (1+index, self["header/bb_count"].value, block) self.seekBlock(block) field = SectFat(self, "bbfat[]", start, count, desc) yield field self.bb_fat.append(field) start += count def readSFAT(self): chain = self.getChain(self["header/sb_start"].value) start = 0 self.ss_fat = [] count = self.items_per_ssfat for index, block in enumerate(chain): self.seekBlock(block) field = SectFat(self, "sfat[]", \ start, count, \ "SFAT %u/%u at block %u" % \ (1+index, self["header/sb_count"].value, block)) yield field self.ss_fat.append(field) start += count def createContentSize(self): max_block = 0 for fat in self.array("bbfat"): for entry in fat: block = entry.value if block not in SECT.SPECIALS: max_block = max(block, max_block) if max_block in SECT.SPECIALS: return None else: return HEADER_SIZE + (max_block+1) * self.sector_size def seekBlock(self, block): self.seekBit(HEADER_SIZE + block * self.sector_size)
gpl-3.0
andnovar/networkx
networkx/tests/benchmark.py
22
7440
from timeit import Timer # This is gratefully modeled after the benchmarks found in # the numpy svn repository. http://svn.scipy.org/svn/numpy/trunk class Benchmark(object): """ Benchmark a method or simple bit of code using different Graph classes. If the test code is the same for each graph class, then you can set it during instantiation through the argument test_string. The argument test_string can also be a tuple of test code and setup code. The code is entered as a string valid for use with the timeit module. Example: >>> b=Benchmark(['Graph','XGraph']) >>> b['Graph']=('G.add_nodes_from(nlist)','nlist=range(100)') >>> b.run() """ def __init__(self,graph_classes,title='',test_string=None,runs=3,reps=1000): self.runs = runs self.reps = reps self.title = title self.class_tests = dict((gc,'') for gc in graph_classes) # set up the test string if it is the same for all classes. if test_string is not None: if isinstance(test_string,tuple): self['all']=test_string else: self['all']=(test_string,'') def __setitem__(self,graph_class,some_strs): """ Set a simple bit of code and setup string for the test. Use this for cases where the code differs from one class to another. """ test_str, setup_str = some_strs if graph_class == 'all': graph_class = self.class_tests.keys() elif not isinstance(graph_class,list): graph_class = [graph_class] for GC in graph_class: # setup_string='import networkx as NX\nG=NX.%s.%s()\n'%(GC.lower(),GC) \ # + setup_str setup_string='import networkx as NX\nG=NX.%s()\n'%(GC,) \ + setup_str self.class_tests[GC] = Timer(test_str, setup_string) def run(self): """Run the benchmark for each class and print results.""" column_len = max(len(G) for G in self.class_tests) print('='*72) if self.title: print("%s: %s runs, %s reps"% (self.title,self.runs,self.reps)) print('='*72) times=[] for GC,timer in self.class_tests.items(): name = GC.ljust(column_len) try: # t=sum(timer.repeat(self.runs,self.reps))/self.runs t=min(timer.repeat(self.runs,self.reps)) # print "%s: %s" % (name, timer.repeat(self.runs,self.reps)) times.append((t,name)) except Exception as e: print("%s: Failed to benchmark (%s)." % (name,e)) times.sort() tmin=times[0][0] for t,name in times: print("%s: %5.2f %s" % (name, t/tmin*100.,t)) print('-'*72) print() if __name__ == "__main__": # set up for all routines: classes=['Graph','MultiGraph','DiGraph','MultiDiGraph'] # classes=['Graph','MultiGraph','DiGraph','MultiDiGraph', # 'SpecialGraph','SpecialDiGraph','SpecialMultiGraph','SpecialMultiDiGraph'] # classes=['Graph','SpecialGraph'] all_tests=['add_nodes','add_edges','remove_nodes','remove_edges',\ 'neighbors','edges','degree','dijkstra','shortest path',\ 'subgraph','edgedata_subgraph','laplacian'] # Choose which tests to run tests=all_tests # tests=['edges','laplacian'] #tests=all_tests[-1:] N=100 if 'add_nodes' in tests: title='Benchmark: Adding nodes' test_string=('G.add_nodes_from(nlist)','nlist=range(%i)'%N) b=Benchmark(classes,title,test_string,runs=3,reps=1000) b.run() if 'add_edges' in tests: title='Benchmark: Adding edges' setup='elist=[(i,i+3) for i in range(%s-3)]\nG.add_nodes_from(range(%i))'%(N,N) test_string=('G.add_edges_from(elist)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=1000) b.run() if 'remove_nodes' in tests: title='Benchmark: Adding and Deleting nodes' setup='nlist=range(%i)'%N test_string=('G.add_nodes_from(nlist)\nG.remove_nodes_from(nlist)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=1000) b.run() if 'remove_edges' in tests: title='Benchmark: Adding and Deleting edges' setup='elist=[(i,i+3) for i in range(%s-3)]'%N test_string=('G.add_edges_from(elist)\nG.remove_edges_from(elist)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=1000) b.run() if 'neighbors' in tests: N=500 p=0.3 title='Benchmark: reporting neighbors' setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v),(v,u)])'%(N,p) test_string=('for n in G:\n for nbr in G.neighbors(n):\n pass',setup) b=Benchmark(classes,title,test_string,runs=3,reps=10) b.run() if 'edges' in tests: N=500 p=0.3 title='Benchmark: reporting edges' setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v),(v,u)])'%(N,p) test_string=('for n in G:\n for e in G.edges(n):\n pass',setup) b=Benchmark(classes,title,test_string,runs=3,reps=10) b.run() if 'degree' in tests: N=500 p=0.3 title='Benchmark: reporting degree' setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v),(v,u)])'%(N,p) test_string=('for d in G.degree():\n pass',setup) b=Benchmark(classes,title,test_string,runs=3,reps=10) b.run() if 'dijkstra' in tests: N=500 p=0.3 title='dijkstra single source shortest path' setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v),(v,u)])'%(N,p) test_string=('p=NX.single_source_dijkstra(G,i)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=10) b.run() if 'shortest path' in tests: N=500 p=0.3 title='single source shortest path' setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v),(v,u)])'%(N,p) test_string=('p=NX.single_source_shortest_path(G,i)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=10) b.run() if 'subgraph' in tests: N=500 p=0.3 title='subgraph method' setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v),(v,u)])'%(N,p) test_string=('G.subgraph(nlist)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=10) b.run() if 'edgedata_subgraph' in tests: N=500 p=0.3 title='subgraph method with edge data present' setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v,dict(hi=2)),(v,u,dict(hi=2))])'%(N,p) test_string=('G.subgraph(nlist)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=10) b.run() if 'laplacian' in tests: N=500 p=0.3 title='creation of laplacian matrix' setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges():\n G.add_edges_from([(u,v),(v,u)])'%(N,p) test_string=('NX.laplacian_matrix(G)',setup) b=Benchmark(classes,title,test_string,runs=3,reps=1) b.run()
bsd-3-clause
ericpre/hyperspy
hyperspy/drawing/_markers/vertical_line_segment.py
2
3486
# -*- coding: utf-8 -*- # Copyright 2007-2021 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import matplotlib.pyplot as plt from hyperspy.drawing.marker import MarkerBase class VerticalLineSegment(MarkerBase): """Vertical line segment marker that can be added to the signal figure Parameters ---------- x : array or float The position of line segment in x. If float, the marker is fixed. If array, the marker will be updated when navigating. The array should have the same dimensions in the navigation axes. y1 : array or float The position of the start of the line segment in x. see x1 arguments y2 : array or float The position of the start of the line segment in y. see x1 arguments kwargs : Keywords argument of axvline valid properties (i.e. recognized by mpl.plot). Example ------- >>> im = hs.signals.Signal2D(np.zeros((100, 100))) >>> m = hs.plot.markers.vertical_line_segment( >>> x=20, y1=30, y2=70, linewidth=4, color='red', linestyle='dotted') >>> im.add_marker(m) Add a marker permanently to a marker >>> im = hs.signals.Signal2D(np.zeros((60, 60))) >>> m = hs.plot.markers.vertical_line_segment(x=10, y1=20, y2=50) >>> im.add_marker(m, permanent=True) """ def __init__(self, x, y1, y2, **kwargs): MarkerBase.__init__(self) lp = {'color': 'black', 'linewidth': 1} self.marker_properties = lp self.set_data(x1=x, y1=y1, y2=y2) self.set_marker_properties(**kwargs) self.name = 'vertical_line_segment' def __repr__(self): string = "<marker.{}, {} (x={},y1={},y2={},color={})>".format( self.__class__.__name__, self.name, self.get_data_position('x1'), self.get_data_position('y1'), self.get_data_position('y2'), self.marker_properties['color'], ) return(string) def update(self): if self.auto_update is False: return self._update_segment() def _plot_marker(self): self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties) self._update_segment() def _update_segment(self): segments = self.marker.get_segments() segments[0][0, 0] = self.get_data_position('x1') segments[0][1, 0] = segments[0][0, 0] if self.get_data_position('y1') is None: segments[0][0, 1] = plt.getp(self.marker.axes, 'ylim')[0] else: segments[0][0, 1] = self.get_data_position('y1') if self.get_data_position('y2') is None: segments[0][1, 1] = plt.getp(self.marker.axes, 'ylim')[1] else: segments[0][1, 1] = self.get_data_position('y2') self.marker.set_segments(segments)
gpl-3.0
dominikl/openmicroscopy
components/tools/OmeroWeb/omeroweb/webclient/controller/share.py
14
7631
#!/usr/bin/env python # -*- coding: utf-8 -*- # # # # Copyright (c) 2008-2011 University of Dundee. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008. # # Version: 1.0 # import datetime import time from omero.rtypes import rtime from webclient.controller import BaseController class BaseShare(BaseController): shares = None shSize = None ownShares = None oshSize = 0 memberShares = None mshSize = 0 share = None imageInShare = None imgSize = 0 membersInShare = None comments = None cmSize = None def __init__(self, conn, share_id=None, **kw): BaseController.__init__(self, conn) if share_id is not None: self.share = self.conn.getShare(share_id) if self.share is None: raise AttributeError( "We are sorry, but that share either does not exist, or" " if it does, you have not been invited to see it." " Contact the user you think might own this share for" " more information.") if self.share._obj is None: raise AttributeError( "We are sorry, but that share either does not exist, or" " if it does, you have not been invited to see it." " Contact the user you think might own this share for" " more information.") if (self.share is not None and not self.share.active and not self.share.isOwned()): raise AttributeError( "%s is not active and cannot be visible. Please contact" " the user you think might own this share for more" " information." % self.share.getShareType()) def obj_type(self): """ Same as BaseContainer. Used to create identifier E.g. share-123 in right-hand panel """ return self.share.getShareType().lower() def obj_id(self): """ Same as BaseContainer. Used to create identifier E.g. share-123 in right-hand panel """ return self.share.getId() def createShare(self, host, images, message, members, enable, expiration=None): expiration_date = None if expiration is not None: d1 = datetime.datetime.strptime( expiration+" 23:59:59", "%Y-%m-%d %H:%M:%S") expiration_date = long( time.mktime(d1.timetuple()) + 1e-6 * d1.microsecond) * 1000 image_objects = list(self.conn.getObjects("Image", images)) member_objects = list(self.conn.getObjects("Experimenter", members)) return self.conn.createShare( host, image_objects, message, member_objects, enable, expiration_date) def createDiscussion(self, host, message, members, enable, expiration=None): expiration_date = None if expiration is not None: d1 = datetime.datetime.strptime( expiration+" 23:59:59", "%Y-%m-%d %H:%M:%S") expiration_date = rtime(long( time.mktime(d1.timetuple()) + 1e-6 * d1.microsecond) * 1000) member_objects = list(self.conn.getObjects("Experimenter", members)) return self.conn.createShare( host, [], message, member_objects, enable, expiration_date) def updateShareOrDiscussion(self, host, message, members, enable, expiration=None): expiration_date = None if expiration is not None: d1 = datetime.datetime.strptime( expiration+" 23:59:59", "%Y-%m-%d %H:%M:%S") expiration_date = long(time.mktime(d1.timetuple()) + 1e-6 * d1.microsecond) * 1000 old_groups = [m._obj for m in self.conn.getAllMembers(self.share.id)] new_groups = [e._obj for e in self.conn.getObjects("Experimenter", members)] add_mem = list() rm_mem = list() # remove for ogr in old_groups: flag = False for ngr in new_groups: if ngr.id.val == ogr.id.val: flag = True if not flag: rm_mem.append(ogr) # add for ngr in new_groups: flag = False for ogr in old_groups: if ogr.id.val == ngr.id.val: flag = True if not flag: add_mem.append(ngr) return self.conn.updateShareOrDiscussion( host, self.share.id, message, add_mem, rm_mem, enable, expiration_date) def addComment(self, host, comment): return self.conn.addComment(host, self.share.id, comment) def getShares(self): sh_list = list(self.conn.getOwnShares()) sh_list.extend(list(self.conn.getMemberShares())) sh_list.sort(key=lambda x: x.id, reverse=True) sh_list_with_counters = list() sh_ids = [sh.id for sh in sh_list] if len(sh_ids) > 0: sh_annotation_counter = self.conn.getCommentCount(sh_ids) for sh in sh_list: sh.annotation_counter = sh_annotation_counter.get(sh.id) sh_list_with_counters.append(sh) self.shares = sh_list_with_counters self.shSize = len(self.shares) def getComments(self, share_id): self.comments = list(self.conn.getComments(share_id)) self.comments.sort(key=lambda x: x.creationEventDate(), reverse=True) self.cmSize = len(self.comments) def removeImage(self, image_id): self.conn.removeImage(self.share.id, image_id) def getMembers(self, share_id): self.membersInShare = [m.id for m in self.conn.getAllMembers(share_id)] def getAllUsers(self, share_id): self.allInShare = list(self.conn.getAllMembers(share_id)) # list(self.conn.getAllUsers(share_id)) def loadShareContent(self): content = self.conn.getContents(self.share.id) imageInShare = list() # The content images can be dead links and this is indicated # by the getContents returning these: # omero.gateway.BlitzObjectWrapper(self,None) # These need to be appended like regular images to ensure that # broken links get properly displayed. If there is a need to handle # other content types, then this must be addressed by more special # caseing or by updating getContent for ex in content: imageInShare.append(ex) imageInShare.sort( # Sort deleted items to the end of the list key=lambda x: hasattr(x, 'getName') and x.getName() or "~") self.containers = {'images': imageInShare} self.c_size = len(imageInShare)
gpl-2.0
leiferikb/bitpop
src/content/test/gpu/gpu_tests/memory.py
1
3224
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import memory_expectations from telemetry import test from telemetry.page import page_test from telemetry.core.timeline import counter from telemetry.core.timeline import model MEMORY_LIMIT_MB = 256 SINGLE_TAB_LIMIT_MB = 128 WIGGLE_ROOM_MB = 4 test_harness_script = r""" var domAutomationController = {}; domAutomationController._finished = false; domAutomationController.send = function(msg) { // This should wait until all effects of memory management complete. // We will need to wait until all // 1. pending commits from the main thread to the impl thread in the // compositor complete (for visible compositors). // 2. allocations that the renderer's impl thread will make due to the // compositor and WebGL are completed. // 3. pending GpuMemoryManager::Manage() calls to manage are made. // 4. renderers' OnMemoryAllocationChanged callbacks in response to // manager are made. // Each step in this sequence can cause trigger the next (as a 1-2-3-4-1 // cycle), so we will need to pump this cycle until it stabilizes. // Pump the cycle 8 times (in principle it could take an infinite number // of iterations to settle). var rafCount = 0; var totalRafCount = 8; function pumpRAF() { if (rafCount == totalRafCount) { domAutomationController._finished = true; return; } ++rafCount; window.requestAnimationFrame(pumpRAF); } pumpRAF(); } window.domAutomationController = domAutomationController; window.addEventListener("load", function() { useGpuMemory(%d); }, false); """ % MEMORY_LIMIT_MB class _MemoryValidator(page_test.PageTest): def ValidatePage(self, page, tab, results): timeline_data = tab.browser.StopTracing() timeline_model = model.TimelineModel(timeline_data) for process in timeline_model.GetAllProcesses(): if 'gpu.GpuMemoryUsage' in process.counters: counter = process.GetCounter('gpu', 'GpuMemoryUsage') mb_used = counter.samples[-1] / 1048576 if mb_used + WIGGLE_ROOM_MB < SINGLE_TAB_LIMIT_MB: raise page_test.Failure('Memory allocation too low') if mb_used - WIGGLE_ROOM_MB > MEMORY_LIMIT_MB: raise page_test.Failure('Memory allocation too high') def CustomizeBrowserOptions(self, options): options.AppendExtraBrowserArgs('--enable-logging') options.AppendExtraBrowserArgs( '--force-gpu-mem-available-mb=%s' % MEMORY_LIMIT_MB) def WillNavigateToPage(self, page, tab): custom_categories = ['webkit.console', 'gpu'] tab.browser.StartTracing(','.join(custom_categories), 60) class Memory(test.Test): """Tests GPU memory limits""" test = _MemoryValidator page_set = 'page_sets/memory_tests.py' def CreateExpectations(self, page_set): return memory_expectations.MemoryExpectations() def CreatePageSet(self, options): page_set = super(Memory, self).CreatePageSet(options) for page in page_set.pages: page.script_to_evaluate_on_commit = test_harness_script return page_set
gpl-3.0
sacnayak/ssnayak-byte-1
lib/jinja2/exceptions.py
977
4428
# -*- coding: utf-8 -*- """ jinja2.exceptions ~~~~~~~~~~~~~~~~~ Jinja exceptions. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ from jinja2._compat import imap, text_type, PY2, implements_to_string class TemplateError(Exception): """Baseclass for all template errors.""" if PY2: def __init__(self, message=None): if message is not None: message = text_type(message).encode('utf-8') Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message.decode('utf-8', 'replace') def __unicode__(self): return self.message or u'' else: def __init__(self, message=None): Exception.__init__(self, message) @property def message(self): if self.args: message = self.args[0] if message is not None: return message @implements_to_string class TemplateNotFound(IOError, LookupError, TemplateError): """Raised if a template does not exist.""" # looks weird, but removes the warning descriptor that just # bogusly warns us about message being deprecated message = None def __init__(self, name, message=None): IOError.__init__(self) if message is None: message = name self.message = message self.name = name self.templates = [name] def __str__(self): return self.message class TemplatesNotFound(TemplateNotFound): """Like :class:`TemplateNotFound` but raised if multiple templates are selected. This is a subclass of :class:`TemplateNotFound` exception, so just catching the base exception will catch both. .. versionadded:: 2.2 """ def __init__(self, names=(), message=None): if message is None: message = u'none of the templates given were found: ' + \ u', '.join(imap(text_type, names)) TemplateNotFound.__init__(self, names and names[-1] or None, message) self.templates = list(names) @implements_to_string class TemplateSyntaxError(TemplateError): """Raised to tell the user that there is a problem with the template.""" def __init__(self, message, lineno, name=None, filename=None): TemplateError.__init__(self, message) self.lineno = lineno self.name = name self.filename = filename self.source = None # this is set to True if the debug.translate_syntax_error # function translated the syntax error into a new traceback self.translated = False def __str__(self): # for translated errors we only return the message if self.translated: return self.message # otherwise attach some stuff location = 'line %d' % self.lineno name = self.filename or self.name if name: location = 'File "%s", %s' % (name, location) lines = [self.message, ' ' + location] # if the source is set, add the line to the output if self.source is not None: try: line = self.source.splitlines()[self.lineno - 1] except IndexError: line = None if line: lines.append(' ' + line.strip()) return u'\n'.join(lines) class TemplateAssertionError(TemplateSyntaxError): """Like a template syntax error, but covers cases where something in the template caused an error at compile time that wasn't necessarily caused by a syntax error. However it's a direct subclass of :exc:`TemplateSyntaxError` and has the same attributes. """ class TemplateRuntimeError(TemplateError): """A generic runtime error in the template engine. Under some situations Jinja may raise this exception. """ class UndefinedError(TemplateRuntimeError): """Raised if a template tries to operate on :class:`Undefined`.""" class SecurityError(TemplateRuntimeError): """Raised if a template tries to do something insecure if the sandbox is enabled. """ class FilterArgumentError(TemplateRuntimeError): """This error is raised if a filter was called with inappropriate arguments """
apache-2.0
joomel1/phantomjs
src/breakpad/src/third_party/protobuf/protobuf/python/stubout.py
671
4940
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is used for testing. The original is at: # http://code.google.com/p/pymox/ class StubOutForTesting: """Sample Usage: You want os.path.exists() to always return true during testing. stubs = StubOutForTesting() stubs.Set(os.path, 'exists', lambda x: 1) ... stubs.UnsetAll() The above changes os.path.exists into a lambda that returns 1. Once the ... part of the code finishes, the UnsetAll() looks up the old value of os.path.exists and restores it. """ def __init__(self): self.cache = [] self.stubs = [] def __del__(self): self.SmartUnsetAll() self.UnsetAll() def SmartSet(self, obj, attr_name, new_attr): """Replace obj.attr_name with new_attr. This method is smart and works at the module, class, and instance level while preserving proper inheritance. It will not stub out C types however unless that has been explicitly allowed by the type. This method supports the case where attr_name is a staticmethod or a classmethod of obj. Notes: - If obj is an instance, then it is its class that will actually be stubbed. Note that the method Set() does not do that: if obj is an instance, it (and not its class) will be stubbed. - The stubbing is using the builtin getattr and setattr. So, the __get__ and __set__ will be called when stubbing (TODO: A better idea would probably be to manipulate obj.__dict__ instead of getattr() and setattr()). Raises AttributeError if the attribute cannot be found. """ if (inspect.ismodule(obj) or (not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))): orig_obj = obj orig_attr = getattr(obj, attr_name) else: if not inspect.isclass(obj): mro = list(inspect.getmro(obj.__class__)) else: mro = list(inspect.getmro(obj)) mro.reverse() orig_attr = None for cls in mro: try: orig_obj = cls orig_attr = getattr(obj, attr_name) except AttributeError: continue if orig_attr is None: raise AttributeError("Attribute not found.") # Calling getattr() on a staticmethod transforms it to a 'normal' function. # We need to ensure that we put it back as a staticmethod. old_attribute = obj.__dict__.get(attr_name) if old_attribute is not None and isinstance(old_attribute, staticmethod): orig_attr = staticmethod(orig_attr) self.stubs.append((orig_obj, attr_name, orig_attr)) setattr(orig_obj, attr_name, new_attr) def SmartUnsetAll(self): """Reverses all the SmartSet() calls, restoring things to their original definition. Its okay to call SmartUnsetAll() repeatedly, as later calls have no effect if no SmartSet() calls have been made. """ self.stubs.reverse() for args in self.stubs: setattr(*args) self.stubs = [] def Set(self, parent, child_name, new_child): """Replace child_name's old definition with new_child, in the context of the given parent. The parent could be a module when the child is a function at module scope. Or the parent could be a class when a class' method is being replaced. The named child is set to new_child, while the prior definition is saved away for later, when UnsetAll() is called. This method supports the case where child_name is a staticmethod or a classmethod of parent. """ old_child = getattr(parent, child_name) old_attribute = parent.__dict__.get(child_name) if old_attribute is not None and isinstance(old_attribute, staticmethod): old_child = staticmethod(old_child) self.cache.append((parent, old_child, child_name)) setattr(parent, child_name, new_child) def UnsetAll(self): """Reverses all the Set() calls, restoring things to their original definition. Its okay to call UnsetAll() repeatedly, as later calls have no effect if no Set() calls have been made. """ # Undo calls to Set() in reverse order, in case Set() was called on the # same arguments repeatedly (want the original call to be last one undone) self.cache.reverse() for (parent, old_child, child_name) in self.cache: setattr(parent, child_name, old_child) self.cache = []
bsd-3-clause
ns950/calibre
src/calibre/gui2/actions/toc_edit.py
14
5583
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' __docformat__ = 'restructuredtext en' from collections import OrderedDict from PyQt5.Qt import (QTimer, QDialog, QGridLayout, QCheckBox, QLabel, QDialogButtonBox, QIcon) from calibre.gui2 import error_dialog, gprefs from calibre.gui2.actions import InterfaceAction SUPPORTED = {'EPUB', 'AZW3'} class ChooseFormat(QDialog): # {{{ def __init__(self, formats, parent=None): QDialog.__init__(self, parent) self.setWindowTitle(_('Choose format to edit')) self.setWindowIcon(QIcon(I('dialog_question.png'))) l = self.l = QGridLayout() self.setLayout(l) la = self.la = QLabel(_('Choose which format you want to edit:')) formats = sorted(formats) l.addWidget(la, 0, 0, 1, -1) self.buttons = [] for i, f in enumerate(formats): b = QCheckBox('&' + f, self) l.addWidget(b, 1, i) self.buttons.append(b) self.formats = gprefs.get('edit_toc_last_selected_formats', ['EPUB',]) bb = self.bb = QDialogButtonBox( QDialogButtonBox.Ok|QDialogButtonBox.Cancel) bb.addButton(_('&All formats'), bb.ActionRole).clicked.connect(self.do_all) bb.accepted.connect(self.accept) bb.rejected.connect(self.reject) l.addWidget(bb, l.rowCount(), 0, 1, -1) self.resize(self.sizeHint()) self.finished.connect(lambda code:gprefs.set('edit_toc_last_selected_formats', list(self.formats))) def do_all(self): for b in self.buttons: b.setChecked(True) self.accept() @dynamic_property def formats(self): def fget(self): for b in self.buttons: if b.isChecked(): yield unicode(b.text())[1:] def fset(self, formats): formats = {x.upper() for x in formats} for b in self.buttons: b.setChecked(b.text()[1:] in formats) return property(fget=fget, fset=fset) # }}} class ToCEditAction(InterfaceAction): name = 'Edit ToC' action_spec = (_('Edit ToC'), 'toc.png', _('Edit the Table of Contents in your books'), _('K')) dont_add_to = frozenset(['context-menu-device']) action_type = 'current' accepts_drops = True def accept_enter_event(self, event, mime_data): if mime_data.hasFormat("application/calibre+from_library"): return True return False def accept_drag_move_event(self, event, mime_data): if mime_data.hasFormat("application/calibre+from_library"): return True return False def drop_event(self, event, mime_data): mime = 'application/calibre+from_library' if mime_data.hasFormat(mime): self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split())) QTimer.singleShot(1, self.do_drop) return True return False def do_drop(self): book_id_map = self.get_supported_books(self.dropped_ids) del self.dropped_ids if book_id_map: self.do_edit(book_id_map) def genesis(self): self.qaction.triggered.connect(self.edit_books) def get_supported_books(self, book_ids): db = self.gui.library_view.model().db supported = set(SUPPORTED) ans = [(x, set((db.formats(x, index_is_id=True) or '').split(',')) .intersection(supported)) for x in book_ids] ans = [x for x in ans if x[1]] if not ans: error_dialog(self.gui, _('Cannot edit ToC'), _('Editing Table of Contents is only supported for books in the %s' ' formats. Convert to one of those formats before polishing.') %_(' or ').join(sorted(supported)), show=True) ans = OrderedDict(ans) return ans def get_books_for_editing(self): rows = [r.row() for r in self.gui.library_view.selectionModel().selectedRows()] if not rows or len(rows) == 0: d = error_dialog(self.gui, _('Cannot edit ToC'), _('No books selected')) d.exec_() return None db = self.gui.current_db ans = (db.id(r) for r in rows) return self.get_supported_books(ans) def do_edit(self, book_id_map): for book_id, fmts in book_id_map.iteritems(): if len(fmts) > 1: d = ChooseFormat(fmts, self.gui) if d.exec_() != d.Accepted: return fmts = d.formats for fmt in fmts: self.do_one(book_id, fmt) def do_one(self, book_id, fmt): from calibre.gui2.toc.main import TOCEditor db = self.gui.current_db path = db.format(book_id, fmt, index_is_id=True, as_path=True) title = db.title(book_id, index_is_id=True) + ' [%s]'%fmt d = TOCEditor(path, title=title, parent=self.gui) d.start() if d.exec_() == d.Accepted: with open(path, 'rb') as f: db.add_format(book_id, fmt, f, index_is_id=True) def edit_books(self): book_id_map = self.get_books_for_editing() if not book_id_map: return self.do_edit(book_id_map)
gpl-3.0
cogeorg/black_rhino
examples/Georg2012/production/networkx/readwrite/tests/test_pajek.py
13
2581
#!/usr/bin/env python """ Pajek tests """ from nose.tools import assert_equal from networkx import * import os,tempfile from io import open class TestPajek(object): def setUp(self): self.data="""*network Tralala\n*vertices 4\n 1 "A1" 0.0938 0.0896 ellipse x_fact 1 y_fact 1\n 2 "Bb" 0.8188 0.2458 ellipse x_fact 1 y_fact 1\n 3 "C" 0.3688 0.7792 ellipse x_fact 1\n 4 "D2" 0.9583 0.8563 ellipse x_fact 1\n*arcs\n1 1 1 h2 0 w 3 c Blue s 3 a1 -130 k1 0.6 a2 -130 k2 0.6 ap 0.5 l "Bezier loop" lc BlueViolet fos 20 lr 58 lp 0.3 la 360\n2 1 1 h2 0 a1 120 k1 1.3 a2 -120 k2 0.3 ap 25 l "Bezier arc" lphi 270 la 180 lr 19 lp 0.5\n1 2 1 h2 0 a1 40 k1 2.8 a2 30 k2 0.8 ap 25 l "Bezier arc" lphi 90 la 0 lp 0.65\n4 2 -1 h2 0 w 1 k1 -2 k2 250 ap 25 l "Circular arc" c Red lc OrangeRed\n3 4 1 p Dashed h2 0 w 2 c OliveGreen ap 25 l "Straight arc" lc PineGreen\n1 3 1 p Dashed h2 0 w 5 k1 -1 k2 -20 ap 25 l "Oval arc" c Brown lc Black\n3 3 -1 h1 6 w 1 h2 12 k1 -2 k2 -15 ap 0.5 l "Circular loop" c Red lc OrangeRed lphi 270 la 180""" self.G=nx.MultiDiGraph() self.G.add_nodes_from(['A1', 'Bb', 'C', 'D2']) self.G.add_edges_from([('A1', 'A1'), ('A1', 'Bb'), ('A1', 'C'), ('Bb', 'A1'),('C', 'C'), ('C', 'D2'), ('D2', 'Bb')]) self.G.graph['name']='Tralala' (self.fd,self.fname)=tempfile.mkstemp() fh=open(self.fname,'wb') fh.write(self.data.encode('UTF-8')) fh.close() def tearDown(self): os.close(self.fd) os.unlink(self.fname) def test_parse_pajek_simple(self): # Example without node positions or shape data="""*Vertices 2\n1 "1"\n2 "2"\n*Edges\n1 2\n2 1""" G=parse_pajek(data) assert_equal(sorted(G.nodes()), ['1', '2']) assert_equal(sorted(G.edges()), [('1', '2'), ('1', '2')]) def test_parse_pajek(self): G=parse_pajek(self.data) assert_equal(sorted(G.nodes()), ['A1', 'Bb', 'C', 'D2']) assert_equal(sorted(G.edges()), [('A1', 'A1'), ('A1', 'Bb'), ('A1', 'C'), ('Bb', 'A1'), ('C', 'C'), ('C', 'D2'), ('D2', 'Bb')]) def test_read_pajek(self): G=parse_pajek(self.data) Gin=read_pajek(self.fname) assert_equal(sorted(G.nodes()), sorted(Gin.nodes())) assert_equal(sorted(G.edges()), sorted(Gin.edges())) assert_equal(self.G.graph,Gin.graph) for n in G.node: assert_equal(G.node[n],Gin.node[n])
gpl-3.0
malkoto1/just_cook
SQLAlchemy-1.0.4/examples/dogpile_caching/local_session_caching.py
30
3372
"""local_session_caching.py Grok everything so far ? This example creates a new dogpile.cache backend that will persist data in a dictionary which is local to the current session. remove() the session and the cache is gone. Create a new Dogpile cache backend that will store cached data local to the current Session. This is an advanced example which assumes familiarity with the basic operation of CachingQuery. """ from dogpile.cache.api import CacheBackend, NO_VALUE from dogpile.cache.region import register_backend class ScopedSessionBackend(CacheBackend): """A dogpile backend which will cache objects locally on the current session. When used with the query_cache system, the effect is that the objects in the cache are the same as that within the session - the merge() is a formality that doesn't actually create a second instance. This makes it safe to use for updates of data from an identity perspective (still not ideal for deletes though). When the session is removed, the cache is gone too, so the cache is automatically disposed upon session.remove(). """ def __init__(self, arguments): self.scoped_session = arguments['scoped_session'] def get(self, key): return self._cache_dictionary.get(key, NO_VALUE) def set(self, key, value): self._cache_dictionary[key] = value def delete(self, key): self._cache_dictionary.pop(key, None) @property def _cache_dictionary(self): """Return the cache dictionary linked to the current Session.""" sess = self.scoped_session() try: cache_dict = sess._cache_dictionary except AttributeError: sess._cache_dictionary = cache_dict = {} return cache_dict register_backend("sqlalchemy.session", __name__, "ScopedSessionBackend") if __name__ == '__main__': from .environment import Session, regions from .caching_query import FromCache from dogpile.cache import make_region # set up a region based on the ScopedSessionBackend, # pointing to the scoped_session declared in the example # environment. regions['local_session'] = make_region().configure( 'sqlalchemy.session', arguments={ "scoped_session": Session } ) from .model import Person # query to load Person by name, with criterion # of "person 10" q = Session.query(Person).\ options(FromCache("local_session")).\ filter(Person.name == "person 10") # load from DB person10 = q.one() # next call, the query is cached. person10 = q.one() # clear out the Session. The "_cache_dictionary" dictionary # disappears with it. Session.remove() # query calls from DB again person10 = q.one() # identity is preserved - person10 is the *same* object that's # ultimately inside the cache. So it is safe to manipulate # the not-queried-for attributes of objects when using such a # cache without the need to invalidate - however, any change # that would change the results of a cached query, such as # inserts, deletes, or modification to attributes that are # part of query criterion, still require careful invalidation. cache, key = q._get_cache_plus_key() assert person10 is cache.get(key)[0]
gpl-2.0
RayHightower/wireshark
tools/ftsanity.py
14
3132
#!/usr/bin/env python """ Check the sanity of field definitions in Wireshark. """ # # Gilbert Ramirez <gram [AT] alumni.rice.edu> # # Wireshark - Network traffic analyzer # By Gerald Combs <gerald@wireshark.org> # Copyright 1998 Gerald Combs # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import sys try: from optparse import OptionParser except ImportError: sys.exit("Need python 2.3.") try: import commands except ImportError: sys.exit("Need to run on Unix.") errors = 0 class Proto: """Data for a protocol.""" def __init__(self, line): data = line.split("\t") assert len(data) == 3 assert data[0] == "P" self.name = data[1] self.abbrev = data[2] class Field: """Data for a field.""" def __init__(self, line): data = line.split("\t") assert len(data) == 8 assert data[0] == "F" self.name = data[1] self.abbrev = data[2] self.ftype = data[3] self.parent = data[4] self.blurb = data[5] self.base = data[6] self.bitmask = int(data[7],0) def gather_data(tshark): """Calls tshark and gathers data.""" cmd = "%s -G fields3" % (tshark,) (status, output) = commands.getstatusoutput(cmd) if status != 0: sys.exit("Failed: " + cmd) lines = output.split("\n") protos = [Proto(x) for x in lines if x[0] == "P"] fields = [Field(x) for x in lines if x[0] == "F"] return protos, fields def check_fields(fields): """Looks for problems in field definitions.""" global errors for field in fields: if field.bitmask != 0: if field.ftype.find("FT_UINT") != 0 and \ field.ftype.find("FT_INT") != 0 and \ field.ftype != "FT_BOOLEAN": print "%s has a bitmask 0x%x but is type %s" % \ (field.abbrev, field.bitmask, field.ftype) errors += 1 def run(tshark): """Run the tests.""" global errors protos, fields = gather_data(tshark) check_fields(fields) if errors > 0: sys.exit("%d errors found" % (errors,)) else: print "Success." def main(): """Parse the command-line.""" usage = "%prog tshark" parser = OptionParser(usage=usage) (options, args) = parser.parse_args() if len(args) != 1: parser.error("Need location of tshark.") run(args[0]) if __name__ == "__main__": main()
gpl-2.0
relue2718/diff-scraper
diffscraper/libdiffscraper/template.py
1
18300
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Author: Seunghyun Yoo (shyoo1st@cs.ucla.edu) """ import bisect import pickle import enum from . import selector, util, tokenizer, tree class TokenType(enum.Enum): VARIANT = 0 NOT_UNIQUE_INVARIANT = 1 UNIQUE_INVARIANT = 2 def expand_segment(pivots, tokens_of, tentative_decision, rightward): """ This function tries to expand a segment by looking at adjacent tokens from pivot points. If the adjacent tokens have the same value, they can be regarded as invariant tokens (they would get promoted). :param pivots: :param tokens_of: :param tentative_decision: :param rightward: :return: """ current_pivots = pivots is_valid_pivots = True while is_valid_pivots: if rightward: current_pivots = util.get_next_line(current_pivots) else: current_pivots = util.get_prev_line(current_pivots) # First of all, pivot points should be in the valid range. for doc_index in range(len(tokens_of)): if not util.in_range(current_pivots[doc_index], 0, len(tokens_of[doc_index])): is_valid_pivots = False break if is_valid_pivots: # In order to expand the segment, every token must have the same value. temp_token = None is_identical = True for doc_index in range(len(tokens_of)): current_token = tokens_of[doc_index][current_pivots[doc_index]] if temp_token is None: temp_token = current_token else: if temp_token != current_token: is_identical = False break # Let's stop at the moment that invariant tokens are found. is_all_variant = True for doc_index in range(len(tokens_of)): current_type = tentative_decision[doc_index][current_pivots[doc_index]] if current_type != TokenType.VARIANT: is_all_variant = False break if is_identical and is_all_variant: for doc_index in range(len(tokens_of)): # Type promotion tentative_decision[doc_index][current_pivots[doc_index]] = TokenType.NOT_UNIQUE_INVARIANT else: break def find_next_candidates(tokens_of, tokens_with_loc, current_line): """ To find the next candidates from the current line :param tokens_of: :param tokens_with_loc: an associative array containing key-value pairs of each token and its locations :param current_line: :return: """ candidates = [] for doc_index, tokens in enumerate(tokens_of): if current_line[doc_index] < len(tokens): token_current = tokens[current_line[doc_index]] hash_current = util.compute_hash(token_current) is_data_token = False for another_doc_index, _ in enumerate(tokens_of): if util.compute_freq(tokens_with_loc[hash_current][another_doc_index], current_line[another_doc_index]) == 0: is_data_token = True break if is_data_token: # Ignoring a data token (variant) as it doesn't appear in other documents. continue invariant_token = list(current_line) for another_doc_index, _ in enumerate(tokens_of): locs = tokens_with_loc[hash_current][another_doc_index] # There must be at least one element. next_token_idx = bisect.bisect_left(locs, current_line[another_doc_index]) next_token_pos = locs[next_token_idx] invariant_token[another_doc_index] = next_token_pos tuple_invariant_token = tuple(invariant_token) if tuple_invariant_token not in candidates: candidates.append(tuple_invariant_token) return candidates def find_unique_invariants(tokens_of, tokens_with_loc, current_line): """ In order to find the candidate tree quickly :param tokens_of: :param tokens_with_loc: :param current_line: :return: """ candidate_tree = tree.nary_tree() # the root node candidate_tree.set_value("<root>") node_cache = dict() working_set = list() working_set.append((current_line, "<root>")) node_cache["<root>"] = candidate_tree while working_set: current_line, origin_line = working_set[0] working_set.pop(0) # current_line must be in the range of documents is_out_of_range = False for doc_index, tokens in enumerate(tokens_of): if current_line[doc_index] >= len(tokens): is_out_of_range = True break if is_out_of_range: continue is_detected = False candidates = find_next_candidates(tokens_of, tokens_with_loc, current_line) # print("cur ", current_line) # print("candidates ", candidates) for candidate in candidates: freq = [] for doc_index, token_index in enumerate(candidate): token_hash = util.compute_hash(tokens_of[doc_index][token_index]) token_freq = util.compute_freq(tokens_with_loc[token_hash][doc_index], token_index) freq.append(token_freq) # print(candidate, freq) if all(map(lambda x:x == 1, freq)): # only for unique invariant tokens if candidate not in node_cache: # do not recompute the path that was already searched new_branch = tree.nary_tree() new_branch.set_value(candidate) node_cache[candidate] = new_branch is_detected = True working_set.append((util.get_next_line(candidate), candidate)) node_cache[origin_line].insert(node_cache[candidate]) # print("{} -> {}".format(origin_line, candidate)) if not is_detected: # print("***") working_set.append((util.get_next_line(current_line), origin_line)) # print(working_set) # input() # Getting the longest path working_set = list() working_set.append((candidate_tree, list())) current_tree = None temp = list() while working_set: current_tree, current_path = working_set[0] working_set.pop(0) if current_tree.get_value() != "<root>": current_path.append(current_tree.get_value()) if not current_tree.get_children(): temp.append(current_path) else: for child in current_tree.get_children(): working_set.append((child, list(current_path))) # print(temp) return temp def compute_tokens_with_loc(tokens_of): """ To make an associative array containing a pair of a token hash and its locations. (cache) :param tokens_of: :return: """ tokens_with_loc = {} for doc_index, tokens in enumerate(tokens_of): for token_index, token in enumerate(tokens): token_hash = util.compute_hash(token) if token_hash not in tokens_with_loc: tokens_with_loc[token_hash] = util.make_empty_array(len(tokens_of)) tokens_with_loc[token_hash][doc_index].append(token_index) return tokens_with_loc def invariant_matching_algorithm(documents): """ Matching segments by referring to unique invariant tokens This algorithm can be applied to documents recursively. :param documents: :return: a pair of invariant segment text and tentative decisions, which are for debug purpose. """ num_of_docs = len(documents) # Tokenize raw documents tokens_of = [] for doc_index, raw_html in enumerate(documents): tokens = tokenizer.Tokenizer.tokenize("html", raw_html) tokens_of.append(tokens) # Cache each token's locations tokens_with_loc = compute_tokens_with_loc(tokens_of) # Search unique invariant tokens and construct a candidate tree candidates = find_unique_invariants(tokens_of, tokens_with_loc, (0,) * num_of_docs) # Choose the best one from the candidate tree (almost optimal) max_length_of = 0 best_candidate = None for candidate in candidates: if max_length_of < len(candidate): max_length_of = len(candidate) best_candidate = candidate tentative_decision = util.make_empty_array(num_of_docs) for doc_index, tokens in enumerate(tokens_of): tentative_decision[doc_index] = [TokenType.VARIANT] * len(tokens) if best_candidate is None: return [], tentative_decision for c in best_candidate: for doc_index, loc in enumerate(c): tentative_decision[doc_index][loc] = TokenType.NOT_UNIQUE_INVARIANT expand_segment(c, tokens_of, tentative_decision, True) expand_segment(c, tokens_of, tentative_decision, False) for c in best_candidate: for doc_index, loc in enumerate(c): tentative_decision[doc_index][loc] = TokenType.UNIQUE_INVARIANT # Segmentation is_searching = True invariant_tokens = list() invariant_segments_text = list() current_loc = [0] * num_of_docs while is_searching: for doc_index in range(num_of_docs): while tentative_decision[doc_index][current_loc[doc_index]] == TokenType.VARIANT: if util.in_range(current_loc[doc_index], 0, len(tokens_of[doc_index]) - 1): current_loc[doc_index] += 1 # Skipping variant tokens (they can't be a part of the template) else: is_searching = False break while is_searching: is_invariant = True for doc_index in range(num_of_docs): if tentative_decision[doc_index][current_loc[doc_index]] == TokenType.VARIANT: is_invariant = False break if is_invariant: invariant_tokens.append(tokens_of[0][current_loc[0]]) for doc_index in range(num_of_docs): current_loc[doc_index] += 1 is_in_range = True for doc_index in range(num_of_docs): if not util.in_range(current_loc[doc_index], 0, len(tokens_of[doc_index])): is_in_range = False break if not is_in_range: is_searching = False break else: invariant_segments_text.append("".join(invariant_tokens)) invariant_tokens = list() break if len(invariant_tokens) > 0: invariant_segments_text.append("".join(invariant_tokens)) # __print_decision(tentative_decision) return invariant_segments_text, tentative_decision def __print_decision(tentative_decision): print("Decision") for doc_index, decisions in enumerate(tentative_decision): print("Doc {}:".format(doc_index), end="") for decision in decisions: if decision == TokenType.VARIANT: print ("\033[41m\033[1;37m.\033[0m", end="") elif decision == TokenType.NOT_UNIQUE_INVARIANT: print ("\033[44m\033[37m.\033[0m", end="") elif decision == TokenType.UNIQUE_INVARIANT: print ("\033[44m\033[1;37mi\033[0m", end="") print("") def generate(documents, prev_text = []): """ To get the template recursively :param documents: :param prev_text: :return: """ if not documents: return None text, _ = invariant_matching_algorithm(documents) data_segments = list(map(lambda x: extract(text, x), documents)) data = [list(i) for i in zip(*data_segments)] final_text = [] for seg_index in range(len(data)): if prev_text != text: data_len = list(map(lambda x:len(x), data[seg_index])) if 0 not in data_len: text_sub = generate(data[seg_index], text) if len(text_sub) > 0: final_text.extend(text_sub) else: pass if seg_index < len(text): final_text.append(text[seg_index]) return final_text def extract(invariant_segments_text, document): """ To get data segments by removing invariant segments from the original document :param invariant_segments_text: :param document: :return: """ cur_segment_offset = 0 prev_segment_offset = 0 data_segments = [] for index, invariant_segment in enumerate(invariant_segments_text): cur_segment_offset = document.find(invariant_segment, cur_segment_offset) if cur_segment_offset == -1: # The invariant segment MUST be found in the document. return None else: data_segments.append(document[prev_segment_offset:cur_segment_offset]) cur_segment_offset += len(invariant_segment) prev_segment_offset = cur_segment_offset data_segments.append(document[cur_segment_offset:len(document)]) return data_segments def reconstruct(invariant_segments, data_segments): buffer = "" if len(data_segments) == len(invariant_segments) + 1: for index in range(len(invariant_segments)): buffer += (data_segments[index] + invariant_segments[index]) buffer += data_segments[-1] return buffer else: return None def select(features, combined_predicates, offset): status_code, selected_index = selector.selector_impl(features, combined_predicates) if status_code == selector.SelectorStatus.SUCCESS: return selected_index + offset else: return None def serialize_object(template_object): return pickle.dumps(template_object) def deserialize_object(serialized): return pickle.loads(serialized) def make_template_object(invariant_segments=None, merkle_root=None): template_object = {"inv_seg": invariant_segments, "mk_root":merkle_root} return template_object def make_data_object(data_segments=None, template_merkle_root=None, data_merkle_root=None, original_hash=None): data_object = {"data_seg": data_segments, "mk_root_template": template_merkle_root, "mk_root_data": data_merkle_root, "original_hash": original_hash} return data_object # def candidates_pattern_repetition(edges, outgoing_count, incoming_count): # cnt = {} # for prev_token_hash in edges: # for token_hash in edges[prev_token_hash]: # edge_value = edges[prev_token_hash][token_hash] # if edge_value > 1: # if edge_value not in cnt: # cnt[edge_value] = 0 # cnt[edge_value] += 1 # # helper_update_count(cnt, outgoing_count) # helper_update_count(cnt, incoming_count) # # # Choose the maximum # max_v = -1 # max_k = None # for k, v in cnt.items(): # if v > max_v: # max_v = v # max_k = k # # print(cnt) # # candidate = set() # for prev_token_hash in edges: # for token_hash in edges[prev_token_hash]: # edge_value = edges[prev_token_hash][token_hash] # if edge_value == max_k: # candidate.add(prev_token_hash) # candidate.add(token_hash) # # for token_hash, val in outgoing_count.items(): # if val == max_k: # candidate.add(token_hash) # # for token_hash, val in incoming_count.items(): # if val == max_k: # candidate.add(token_hash) # # return candidate # # # def helper_update_count(n_candidates, vertex_count): # filtered_vertex_count = {k: v for k, v in vertex_count.items() if v > 1} # # print(filtered_vertex_count) # for k, v in filtered_vertex_count.items(): # if v not in n_candidates: # n_candidates[v] = 0 # n_candidates[v] += 1 # # # def find_repeating_pattern(data): # # for data_segment in data: # # tokens_of = [] # # tokens_metadata_of = [] # # helper_tokenize(data_segment, tokens_of, tokens_metadata_of) # # for document_indexbuffer = [] # # for line_no in current_line: # # buffer.append(line_no + 1) # # return buffer, tokens in enumerate(tokens_of): # # print(document_index, len(tokens)) # # edges = {} # # outgoing_count = {} # # incoming_count = {} # # prev_token_hash = None # # for token in tokens: # # token_hash = compute_hash(token) # # # print(token_hash, token) # # if prev_token_hash is not None: # # if prev_token_hash not in outgoing_count: # # outgoing_count[prev_token_hash] = 0 # # if token_hash not in incoming_count: # # incoming_count[token_hash] = 0 # # outgoing_count[prev_token_hash] += 1 # # incoming_count[token_hash] += 1 # # if prev_token_hash not in edges: # # edges[prev_token_hash] = {} # # if token_hash not in edges[prev_token_hash]: # # edges[prev_token_hash][token_hash] = 0 # # edges[prev_token_hash][token_hash] += 1 """ # # # prev_token_hash = token_hash # # # # candidates = candidates_pattern_repetition(edges, outgoing_count, incoming_count) # # print(candidates) # # print("".join(list( # # map(lambda x: "\033[1;32m{}\033[0m".format(x) if (compute_hash(x)) in candidates else "\033[1;31m{}\033[0m".format(x), # # tokens)))) # next_invariant = list(candidate) # # # # #token_loc = helper_compute_token_loc(tokens_of) # To cache the corresponding line number of the given token # #print(token_loc) # # # print(len(data)) # # # print(tokens_of) # pass
gpl-3.0
PokemonGoF/PokemonGo-Bot-Desktop
build/pywin/Lib/encodings/zlib_codec.py
58
3048
""" Python 'zlib_codec' Codec - zlib compression encoding Unlike most of the other codecs which target Unicode, this codec will return Python string objects for both encode and decode. Written by Marc-Andre Lemburg (mal@lemburg.com). """ import codecs import zlib # this codec needs the optional zlib module ! ### Codec APIs def zlib_encode(input,errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = zlib.compress(input) return (output, len(input)) def zlib_decode(input,errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = zlib.decompress(input) return (output, len(input)) class Codec(codecs.Codec): def encode(self, input, errors='strict'): return zlib_encode(input, errors) def decode(self, input, errors='strict'): return zlib_decode(input, errors) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.compressobj = zlib.compressobj() def encode(self, input, final=False): if final: c = self.compressobj.compress(input) return c + self.compressobj.flush() else: return self.compressobj.compress(input) def reset(self): self.compressobj = zlib.compressobj() class IncrementalDecoder(codecs.IncrementalDecoder): def __init__(self, errors='strict'): assert errors == 'strict' self.errors = errors self.decompressobj = zlib.decompressobj() def decode(self, input, final=False): if final: c = self.decompressobj.decompress(input) return c + self.decompressobj.flush() else: return self.decompressobj.decompress(input) def reset(self): self.decompressobj = zlib.decompressobj() class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='zlib', encode=zlib_encode, decode=zlib_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, _is_text_encoding=False, )
mit
bryanbrazil/xbmc
tools/EventClients/lib/python/ps3/keymaps.py
245
2329
# -*- coding: utf-8 -*- # Copyright (C) 2008-2013 Team XBMC # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # PS3 Remote and Controller Keymaps keymap_remote = { "16": 'power' ,#EJECT "64": None ,#AUDIO "65": None ,#ANGLE "63": 'subtitle' ,#SUBTITLE "0f": None ,#CLEAR "28": None ,#TIME "00": 'one' ,#1 "01": 'two' ,#2 "02": 'three' ,#3 "03": 'four' ,#4 "04": 'five' ,#5 "05": 'six' ,#6 "06": 'seven' ,#7 "07": 'eight' ,#8 "08": 'nine' ,#9 "09": 'zero' ,#0 "81": 'mytv' ,#RED "82": 'mymusic' ,#GREEN "80": 'mypictures' ,#BLUE "83": 'myvideo' ,#YELLOW "70": 'display' ,#DISPLAY "1a": None ,#TOP MENU "40": 'menu' ,#POP UP/MENU "0e": None ,#RETURN "5c": 'menu' ,#OPTIONS/TRIANGLE "5d": 'back' ,#BACK/CIRCLE "5e": 'info' ,#X "5f": 'title' ,#VIEW/SQUARE "54": 'up' ,#UP "55": 'right' ,#RIGHT "56": 'down' ,#DOWN "57": 'left' ,#LEFT "0b": 'select' ,#ENTER "5a": 'volumeplus' ,#L1 "58": 'volumeminus' ,#L2 "51": 'Mute' ,#L3 "5b": 'pageplus' ,#R1 "59": 'pageminus' ,#R2 "52": None ,#R3 "43": None ,#PLAYSTATION "50": None ,#SELECT "53": None ,#START "33": 'reverse' ,#<-SCAN "34": 'forward' ,# SCAN-> "30": 'skipminus' ,#PREV "31": 'skipplus' ,#NEXT "60": None ,#<-SLOW/STEP "61": None ,# SLOW/STEP-> "32": 'play' ,#PLAY "38": 'stop' ,#STOP "39": 'pause' ,#PAUSE }
gpl-2.0
mattxlee/base64_cpp
googletest/googletest/test/gtest_xml_test_utils.py
364
8872
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_xml_output""" __author__ = 'eefacm@gmail.com (Sean Mcafee)' import re from xml.dom import minidom, Node import gtest_test_utils GTEST_OUTPUT_FLAG = '--gtest_output' GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' class GTestXMLTestCase(gtest_test_utils.TestCase): """ Base class for tests of Google Test's XML output functionality. """ def AssertEquivalentNodes(self, expected_node, actual_node): """ Asserts that actual_node (a DOM node object) is equivalent to expected_node (another DOM node object), in that either both of them are CDATA nodes and have the same value, or both are DOM elements and actual_node meets all of the following conditions: * It has the same tag name as expected_node. * It has the same set of attributes as expected_node, each with the same value as the corresponding attribute of expected_node. Exceptions are any attribute named "time", which needs only be convertible to a floating-point number and any attribute named "type_param" which only has to be non-empty. * It has an equivalent set of child nodes (including elements and CDATA sections) as expected_node. Note that we ignore the order of the children as they are not guaranteed to be in any particular order. """ if expected_node.nodeType == Node.CDATA_SECTION_NODE: self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType) self.assertEquals(expected_node.nodeValue, actual_node.nodeValue) return self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType) self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType) self.assertEquals(expected_node.tagName, actual_node.tagName) expected_attributes = expected_node.attributes actual_attributes = actual_node .attributes self.assertEquals( expected_attributes.length, actual_attributes.length, 'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % ( actual_node.tagName, expected_attributes.keys(), actual_attributes.keys())) for i in range(expected_attributes.length): expected_attr = expected_attributes.item(i) actual_attr = actual_attributes.get(expected_attr.name) self.assert_( actual_attr is not None, 'expected attribute %s not found in element %s' % (expected_attr.name, actual_node.tagName)) self.assertEquals( expected_attr.value, actual_attr.value, ' values of attribute %s in element %s differ: %s vs %s' % (expected_attr.name, actual_node.tagName, expected_attr.value, actual_attr.value)) expected_children = self._GetChildren(expected_node) actual_children = self._GetChildren(actual_node) self.assertEquals( len(expected_children), len(actual_children), 'number of child elements differ in element ' + actual_node.tagName) for child_id, child in expected_children.items(): self.assert_(child_id in actual_children, '<%s> is not in <%s> (in element %s)' % (child_id, actual_children, actual_node.tagName)) self.AssertEquivalentNodes(child, actual_children[child_id]) identifying_attribute = { 'testsuites': 'name', 'testsuite': 'name', 'testcase': 'name', 'failure': 'message', } def _GetChildren(self, element): """ Fetches all of the child nodes of element, a DOM Element object. Returns them as the values of a dictionary keyed by the IDs of the children. For <testsuites>, <testsuite> and <testcase> elements, the ID is the value of their "name" attribute; for <failure> elements, it is the value of the "message" attribute; CDATA sections and non-whitespace text nodes are concatenated into a single CDATA section with ID "detail". An exception is raised if any element other than the above four is encountered, if two child elements with the same identifying attributes are encountered, or if any other type of node is encountered. """ children = {} for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.assert_(child.tagName in self.identifying_attribute, 'Encountered unknown element <%s>' % child.tagName) childID = child.getAttribute(self.identifying_attribute[child.tagName]) self.assert_(childID not in children) children[childID] = child elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: if 'detail' not in children: if (child.nodeType == Node.CDATA_SECTION_NODE or not child.nodeValue.isspace()): children['detail'] = child.ownerDocument.createCDATASection( child.nodeValue) else: children['detail'].nodeValue += child.nodeValue else: self.fail('Encountered unexpected node type %d' % child.nodeType) return children def NormalizeXml(self, element): """ Normalizes Google Test's XML output to eliminate references to transient information that may change from run to run. * The "time" attribute of <testsuites>, <testsuite> and <testcase> elements is replaced with a single asterisk, if it contains only digit characters. * The "timestamp" attribute of <testsuites> elements is replaced with a single asterisk, if it contains a valid ISO8601 datetime value. * The "type_param" attribute of <testcase> elements is replaced with a single asterisk (if it sn non-empty) as it is the type name returned by the compiler and is platform dependent. * The line info reported in the first line of the "message" attribute and CDATA section of <failure> elements is replaced with the file's basename and a single asterisk for the line number. * The directory names in file paths are removed. * The stack traces are removed. """ if element.tagName == 'testsuites': timestamp = element.getAttributeNode('timestamp') timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$', '*', timestamp.value) if element.tagName in ('testsuites', 'testsuite', 'testcase'): time = element.getAttributeNode('time') time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value) type_param = element.getAttributeNode('type_param') if type_param and type_param.value: type_param.value = '*' elif element.tagName == 'failure': source_line_pat = r'^.*[/\\](.*:)\d+\n' # Replaces the source line information with a normalized form. message = element.getAttributeNode('message') message.value = re.sub(source_line_pat, '\\1*\n', message.value) for child in element.childNodes: if child.nodeType == Node.CDATA_SECTION_NODE: # Replaces the source line information with a normalized form. cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue) # Removes the actual stack trace. child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*', '', cdata) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.NormalizeXml(child)
mit
nickjhay/cgt
cgt/tests/test_scalars.py
19
2226
import cgt, numpy as np, numpy.random as nr, itertools as it from cgt import core, utils from cgt.numeric_diff import numeric_grad from cgt.tests import across_configs DISPLAY=False @across_configs def test_scalars(): np.random.seed(0) x = cgt.scalar('x') y = cgt.scalar('y') z = cgt.scalar('z') vars = [x,y,z] #pylint: disable=W0622 vals = nr.rand(len(vars))+1 PROB2RESULT = {} for ((key,_), cls) in it.chain( it.izip(core.UNARY_INFO.items(),it.repeat(core.ElwiseUnary)), it.izip(core.BINARY_INFO.items(),it.repeat(core.ElwiseBinary)) ): if key == "conj": print "skipping conj" continue utils.colorprint(utils.Color.YELLOW, "Testing %s\n"%key) if cls == core.ElwiseUnary: n_in = 1 op = cls(key) else: n_in = 2 op = cls(key, (True,True)) inputvars = vars[0:n_in] inputvals = vals[0:n_in] out = core.Result(op, inputvars) f = cgt.function(inputvars, out) try: grads = cgt.grad(out, inputvars) except core.NonDifferentiable: print "nondiff" continue if DISPLAY: print "Function:" cgt.print_tree(out) print "Gradient original:" cgt.print_tree(grads) print "Gradient simplified:" grads_simple = core.simplify(grads) if DISPLAY: cgt.print_tree(grads_simple) gradf = cgt.function(inputvars, grads) eps = {"single":1e-4,"double":1e-9}[cgt.get_precision()] nugrad = numeric_grad(lambda li: f(*li), inputvals,eps=eps) #pylint: disable=W0640 cgtgrad = gradf(*inputvals) np.testing.assert_almost_equal(nugrad,cgtgrad,decimal={"single":3,"double":6}[cgt.get_precision()]) grad_count = core.count_nodes(grads_simple) PROB2RESULT[key] = {} PROB2RESULT[key]["grad"] = grad_count if DISPLAY: from thirdparty.tabulate import tabulate print tabulate([[key,val["grad"]] for (key,val) in PROB2RESULT.iteritems()],headers=["funcname","gradcount"]) if __name__ == "__main__": import nose nose.runmodule()
mit
zhuxiaohao/linux
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
12527
1935
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if not dict.has_key(key): dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print "Install the audit-libs-python package to get syscall names" def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
gpl-2.0
lizardsystem/lizard-map
lizard_map/migrations/0011_auto__add_field_workspacestorage_extent_is_set.py
2
14603
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'WorkspaceStorage.extent_is_set' db.add_column('lizard_map_workspacestorage', 'extent_is_set', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'WorkspaceStorage.extent_is_set' db.delete_column('lizard_map_workspacestorage', 'extent_is_set') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'lizard_map.backgroundmap': { 'Meta': {'ordering': "('index',)", 'object_name': 'BackgroundMap'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'google_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}), 'is_base_layer': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_single_tile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'layer_names': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'layer_type': ('django.db.models.fields.IntegerField', [], {}), 'layer_url': ('django.db.models.fields.CharField', [], {'default': "'http://tile.openstreetmap.nl/tiles/${z}/${x}/${y}.png'", 'max_length': '200', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, 'lizard_map.collageedit': { 'Meta': {'object_name': 'CollageEdit'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, 'lizard_map.collageedititem': { 'Meta': {'ordering': "('name',)", 'object_name': 'CollageEditItem'}, 'adapter_class': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'adapter_layer_json': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'aggregation_period': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'boundary_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'collage': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'collage_items'", 'to': "orm['lizard_map.CollageEdit']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'index': ('django.db.models.fields.IntegerField', [], {'default': '100', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'percentile_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'restrict_to_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'lizard_map.legend': { 'Meta': {'object_name': 'Legend'}, 'default_color': ('lizard_map.fields.ColorField', [], {'max_length': '8'}), 'descriptor': ('django.db.models.fields.CharField', [], {'max_length': '80'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_color': ('lizard_map.fields.ColorField', [], {'max_length': '8'}), 'max_value': ('django.db.models.fields.FloatField', [], {'default': '100'}), 'min_color': ('lizard_map.fields.ColorField', [], {'max_length': '8'}), 'min_value': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'steps': ('django.db.models.fields.IntegerField', [], {'default': '10'}), 'too_high_color': ('lizard_map.fields.ColorField', [], {'max_length': '8'}), 'too_low_color': ('lizard_map.fields.ColorField', [], {'max_length': '8'}) }, 'lizard_map.legendpoint': { 'Meta': {'object_name': 'LegendPoint', '_ormbases': ['lizard_map.Legend']}, 'icon': ('django.db.models.fields.CharField', [], {'default': "'empty.png'", 'max_length': '80'}), 'legend_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_map.Legend']", 'unique': 'True', 'primary_key': 'True'}), 'mask': ('django.db.models.fields.CharField', [], {'default': "'empty_mask.png'", 'max_length': '80', 'null': 'True', 'blank': 'True'}) }, 'lizard_map.setting': { 'Meta': {'object_name': 'Setting'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'lizard_map.workspaceedit': { 'Meta': {'object_name': 'WorkspaceEdit'}, 'absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'background_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_map.BackgroundMap']", 'null': 'True', 'blank': 'True'}), 'custom_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'dt_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'dt_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'td': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'td_end': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'td_start': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'x_max': ('django.db.models.fields.FloatField', [], {'default': '1254790'}), 'x_min': ('django.db.models.fields.FloatField', [], {'default': '-14675'}), 'y_max': ('django.db.models.fields.FloatField', [], {'default': '6964942'}), 'y_min': ('django.db.models.fields.FloatField', [], {'default': '6668977'}) }, 'lizard_map.workspaceedititem': { 'Meta': {'ordering': "('index', 'visible', 'name')", 'object_name': 'WorkspaceEditItem'}, 'adapter_class': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'adapter_layer_json': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index': ('django.db.models.fields.IntegerField', [], {'default': '100', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'workspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workspace_items'", 'to': "orm['lizard_map.WorkspaceEdit']"}) }, 'lizard_map.workspacestorage': { 'Meta': {'object_name': 'WorkspaceStorage'}, 'absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'background_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_map.BackgroundMap']", 'null': 'True', 'blank': 'True'}), 'custom_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'dt_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'dt_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'extent_is_set': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'secret_slug': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}), 'td': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'td_end': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'td_start': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'x_max': ('django.db.models.fields.FloatField', [], {'default': '1254790'}), 'x_min': ('django.db.models.fields.FloatField', [], {'default': '-14675'}), 'y_max': ('django.db.models.fields.FloatField', [], {'default': '6964942'}), 'y_min': ('django.db.models.fields.FloatField', [], {'default': '6668977'}) }, 'lizard_map.workspacestorageitem': { 'Meta': {'ordering': "('index', 'visible', 'name')", 'object_name': 'WorkspaceStorageItem'}, 'adapter_class': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'adapter_layer_json': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index': ('django.db.models.fields.IntegerField', [], {'default': '100', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'workspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workspace_items'", 'to': "orm['lizard_map.WorkspaceStorage']"}) } } complete_apps = ['lizard_map']
lgpl-3.0
towerjoo/DjangoNotes
Django-1.5.1/tests/regressiontests/introspection/tests.py
44
7272
from __future__ import absolute_import, unicode_literals from functools import update_wrapper from django.db import connection from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature from django.utils import six, unittest from .models import Reporter, Article if connection.vendor == 'oracle': expectedFailureOnOracle = unittest.expectedFailure else: expectedFailureOnOracle = lambda f: f # The introspection module is optional, so methods tested here might raise # NotImplementedError. This is perfectly acceptable behavior for the backend # in question, but the tests need to handle this without failing. Ideally we'd # skip these tests, but until #4788 is done we'll just ignore them. # # The easiest way to accomplish this is to decorate every test case with a # wrapper that ignores the exception. # # The metaclass is just for fun. def ignore_not_implemented(func): def _inner(*args, **kwargs): try: return func(*args, **kwargs) except NotImplementedError: return None update_wrapper(_inner, func) return _inner class IgnoreNotimplementedError(type): def __new__(cls, name, bases, attrs): for k, v in attrs.items(): if k.startswith('test'): attrs[k] = ignore_not_implemented(v) return type.__new__(cls, name, bases, attrs) class IntrospectionTests(six.with_metaclass(IgnoreNotimplementedError, TestCase)): def test_table_names(self): tl = connection.introspection.table_names() self.assertEqual(tl, sorted(tl)) self.assertTrue(Reporter._meta.db_table in tl, "'%s' isn't in table_list()." % Reporter._meta.db_table) self.assertTrue(Article._meta.db_table in tl, "'%s' isn't in table_list()." % Article._meta.db_table) def test_django_table_names(self): cursor = connection.cursor() cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names() cursor.execute("DROP TABLE django_ixn_test_table;") self.assertTrue('django_ixn_testcase_table' not in tl, "django_table_names() returned a non-Django table") def test_django_table_names_retval_type(self): # Ticket #15216 cursor = connection.cursor() cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names(only_existing=True) self.assertIs(type(tl), list) tl = connection.introspection.django_table_names(only_existing=False) self.assertIs(type(tl), list) def test_installed_models(self): tables = [Article._meta.db_table, Reporter._meta.db_table] models = connection.introspection.installed_models(tables) self.assertEqual(models, set([Article, Reporter])) def test_sequence_list(self): sequences = connection.introspection.sequence_list() expected = {'table': Reporter._meta.db_table, 'column': 'id'} self.assertTrue(expected in sequences, 'Reporter sequence not found in sequence_list()') def test_get_table_description_names(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual([r[0] for r in desc], [f.column for f in Reporter._meta.fields]) def test_get_table_description_types(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [datatype(r[1], r) for r in desc], ['IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField'] ) # The following test fails on Oracle due to #17202 (can't correctly # inspect the length of character columns). @expectedFailureOnOracle def test_get_table_description_col_lengths(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [r[3] for r in desc if datatype(r[1], r) == 'CharField'], [30, 30, 75] ) # Oracle forces null=True under the hood in some cases (see # https://docs.djangoproject.com/en/dev/ref/databases/#null-and-empty-strings) # so its idea about null_ok in cursor.description is different from ours. @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_get_table_description_nullable(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [r[6] for r in desc], [False, False, False, False, True] ) # Regression test for #9991 - 'real' types in postgres @skipUnlessDBFeature('has_real_datatype') def test_postgresql_real_type(self): cursor = connection.cursor() cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);") desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table') cursor.execute('DROP TABLE django_ixn_real_test_table;') self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField') def test_get_relations(self): cursor = connection.cursor() relations = connection.introspection.get_relations(cursor, Article._meta.db_table) # Older versions of MySQL don't have the chops to report on this stuff, # so just skip it if no relations come back. If they do, though, we # should test that the response is correct. if relations: # That's {field_index: (field_index_other_table, other_table)} self.assertEqual(relations, {3: (0, Reporter._meta.db_table)}) def test_get_key_columns(self): cursor = connection.cursor() key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table) self.assertEqual(key_columns, [('reporter_id', Reporter._meta.db_table, 'id')]) def test_get_primary_key_column(self): cursor = connection.cursor() primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table) self.assertEqual(primary_key_column, 'id') def test_get_indexes(self): cursor = connection.cursor() indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table) self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False}) def test_get_indexes_multicol(self): """ Test that multicolumn indexes are not included in the introspection results. """ cursor = connection.cursor() indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table) self.assertNotIn('first_name', indexes) self.assertIn('id', indexes) def datatype(dbtype, description): """Helper to convert a data type into a string.""" dt = connection.introspection.get_field_type(dbtype, description) if type(dt) is tuple: return dt[0] else: return dt
mit
pbs/django-filer
filer/south_migrations/0009_auto__add_field_folderpermission_can_edit_new__add_field_folderpermiss.py
49
11635
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'FolderPermission.can_edit_new' db.add_column('filer_folderpermission', 'can_edit_new', self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True), keep_default=False) # Adding field 'FolderPermission.can_read_new' db.add_column('filer_folderpermission', 'can_read_new', self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True), keep_default=False) # Adding field 'FolderPermission.can_add_children_new' db.add_column('filer_folderpermission', 'can_add_children_new', self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'FolderPermission.can_edit_new' db.delete_column('filer_folderpermission', 'can_edit_new') # Deleting field 'FolderPermission.can_read_new' db.delete_column('filer_folderpermission', 'can_read_new') # Deleting field 'FolderPermission.can_add_children_new' db.delete_column('filer_folderpermission', 'can_add_children_new') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'filer.clipboard': { 'Meta': {'object_name': 'Clipboard'}, 'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'in_clipboards'", 'symmetrical': 'False', 'through': "orm['filer.ClipboardItem']", 'to': "orm['filer.File']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filer_clipboards'", 'to': "orm['auth.User']"}) }, 'filer.clipboarditem': { 'Meta': {'object_name': 'ClipboardItem'}, 'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}), 'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'filer.file': { 'Meta': {'object_name': 'File'}, '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.folder': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.folderpermission': { 'Meta': {'object_name': 'FolderPermission'}, 'can_add_children': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_add_children_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_edit_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_read_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'filer.image': { 'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']}, '_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), '_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}), 'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['filer']
bsd-3-clause
lehmannro/translate
storage/placeables/base.py
1
2891
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008-2009 Zuza Software Foundation # # This file is part of the Translate Toolkit. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """ Contains base placeable classes with names based on XLIFF placeables. See the XLIFF standard for more information about what the names mean. """ from translate.storage.placeables.strelem import StringElem from translate.storage.placeables.interfaces import * __all__ = ['Bpt', 'Ept', 'Ph', 'It', 'G', 'Bx', 'Ex', 'X', 'Sub', 'to_base_placeables'] # Basic placeable types. class Bpt(MaskingPlaceable, PairedDelimiter): has_content = True class Ept(MaskingPlaceable, PairedDelimiter): has_content = True class Ph(MaskingPlaceable): has_content = True istranslatable = False class It(MaskingPlaceable, Delimiter): has_content = True class G(ReplacementPlaceable): has_content = True class Bx(ReplacementPlaceable, PairedDelimiter): has_content = False istranslatable = False def __init__(self, id=None, xid=None, **kwargs): # kwargs is ignored ReplacementPlaceable.__init__(self, id=id, xid=xid, **kwargs) class Ex(ReplacementPlaceable, PairedDelimiter): has_content = False istranslatable = False def __init__(self, id=None, xid=None, **kwargs): # kwargs is ignored ReplacementPlaceable.__init__(self, id=id, xid=xid, **kwargs) class X(ReplacementPlaceable, Delimiter): has_content = False iseditable = False isfragile = True istranslatable = False def __init__(self, id=None, xid=None, **kwargs): ReplacementPlaceable.__init__(self, id=id, xid=xid, **kwargs) class Sub(SubflowPlaceable): has_content = True def to_base_placeables(tree): if not isinstance(tree, StringElem): return tree base_class = [klass for klass in tree.__class__.__bases__ \ if klass in [Bpt, Ept, Ph, It, G, Bx, Ex, X, Sub]] if not base_class: base_class = tree.__class__ else: base_class = base_class[0] newtree = base_class() newtree.id = tree.id newtree.rid = tree.rid newtree.xid = tree.xid newtree.sub = [] for subtree in tree.sub: newtree.sub.append(to_base_placeables(subtree)) return newtree
gpl-2.0
paulproteus/django
django/contrib/localflavor/tr/forms.py
2
3532
""" TR-specific Form helpers """ from __future__ import absolute_import, unicode_literals import re from django.contrib.localflavor.tr.tr_provinces import PROVINCE_CHOICES from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField, Select, CharField from django.utils.encoding import smart_unicode from django.utils.translation import ugettext_lazy as _ phone_digits_re = re.compile(r'^(\+90|0)? ?(([1-9]\d{2})|\([1-9]\d{2}\)) ?([2-9]\d{2} ?\d{2} ?\d{2})$') class TRPostalCodeField(RegexField): default_error_messages = { 'invalid': _('Enter a postal code in the format XXXXX.'), } def __init__(self, max_length=5, min_length=5, *args, **kwargs): super(TRPostalCodeField, self).__init__(r'^\d{5}$', max_length, min_length, *args, **kwargs) def clean(self, value): value = super(TRPostalCodeField, self).clean(value) if value in EMPTY_VALUES: return '' if len(value) != 5: raise ValidationError(self.error_messages['invalid']) province_code = int(value[:2]) if province_code == 0 or province_code > 81: raise ValidationError(self.error_messages['invalid']) return value class TRPhoneNumberField(CharField): default_error_messages = { 'invalid': _('Phone numbers must be in 0XXX XXX XXXX format.'), } def clean(self, value): super(TRPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return '' value = re.sub('(\(|\)|\s+)', '', smart_unicode(value)) m = phone_digits_re.search(value) if m: return '%s%s' % (m.group(2), m.group(4)) raise ValidationError(self.error_messages['invalid']) class TRIdentificationNumberField(Field): """ A Turkey Identification Number number. See: http://tr.wikipedia.org/wiki/T%C3%BCrkiye_Cumhuriyeti_Kimlik_Numaras%C4%B1 Checks the following rules to determine whether the number is valid: * The number is 11-digits. * First digit is not 0. * Conforms to the following two formula: (sum(1st, 3rd, 5th, 7th, 9th)*7 - sum(2nd,4th,6th,8th)) % 10 = 10th digit sum(1st to 10th) % 10 = 11th digit """ default_error_messages = { 'invalid': _('Enter a valid Turkish Identification number.'), 'not_11': _('Turkish Identification number must be 11 digits.'), } def clean(self, value): super(TRIdentificationNumberField, self).clean(value) if value in EMPTY_VALUES: return '' if len(value) != 11: raise ValidationError(self.error_messages['not_11']) if not re.match(r'^\d{11}$', value): raise ValidationError(self.error_messages['invalid']) if int(value[0]) == 0: raise ValidationError(self.error_messages['invalid']) chksum = (sum([int(value[i]) for i in range(0, 9, 2)]) * 7 - sum([int(value[i]) for i in range(1, 9, 2)])) % 10 if chksum != int(value[9]) or \ (sum([int(value[i]) for i in range(10)]) % 10) != int(value[10]): raise ValidationError(self.error_messages['invalid']) return value class TRProvinceSelect(Select): """ A Select widget that uses a list of provinces in Turkey as its choices. """ def __init__(self, attrs=None): super(TRProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
bsd-3-clause
rusucosmin/courses
ubb/lftc/lab5/SLRParser.py
1
12884
import sys grammars = open("grammar.txt") G = {} C = {} start = "" terminals = [] nonterminals = [] symbols = [] def parse_grammar(): global G, start, terminals, nonterminals, symbols for line in grammars: line = " ".join(line.split()) if line == '\n': break head = line[:line.index("->")].strip() prods = [l.strip().split(' ') for l in ''.join(line[line.index("->") + 2:]).split('|')] if not start: start = head + "'" G[start] = [[head]] nonterminals.append(start) if head not in G: G[head] = [] if head not in nonterminals: nonterminals.append(head) for prod in prods: G[head].append(prod) for char in prod: if not char.isupper() and char != '^' and char not in terminals: terminals.append(char) elif char.isupper() and char not in nonterminals: nonterminals.append(char) G[char] = [] symbols = terminals + nonterminals first_seen = [] def FIRST(X): global first_seen first = [] first_seen.append(X) if X in terminals: # CASE 1 first.append(X) elif X in nonterminals: for prods in G[X]: # CASE 2 if prods[0] in terminals and prods[0] not in first: first.append(prods[0]) elif '^' in prods and '^' not in first: first.append('^') else: # CASE 3 found_null = False for nonterm in prods: found_null = False if nonterm not in first_seen: for terms in FIRST(nonterm): if terms == '^': found_null = True elif terms not in first: first.append(terms) if not found_null: break if found_null: first.append('^') for Gprods in G[X]: if X in Gprods and Gprods.index(X) + 1 < len(Gprods): for terms in FIRST(Gprods[Gprods.index(X) + 1]): if terms not in first: first.append(terms) first_seen.remove(X) return first follow_seen = [] def FOLLOW(A): global follow_seen follow = [] follow_seen.append(A) if A == start: # CASE 1 follow.append('$') for heads in G.keys(): for prods in G[heads]: follow_head = False if A in prods: next_symbol_pos = prods.index(A) + 1 if next_symbol_pos < len(prods): # CASE 2 for terms in FIRST(prods[next_symbol_pos]): if terms != '^': if terms not in follow: follow.append(terms) else: # CASE 3 follow_head = True else: # CASE 3 follow_head = True if follow_head and heads not in follow_seen: for terms in FOLLOW(heads): if terms not in follow: follow.append(terms) follow_seen.remove(A) return follow def closure(I): J = I while True: item_len = len(J) + sum(len(v) for v in J.itervalues()) for heads in J.keys(): for prods in J[heads]: dot_pos = prods.index('.') if dot_pos + 1 < len(prods): prod_after_dot = prods[dot_pos + 1] if prod_after_dot in nonterminals: for prod in G[prod_after_dot]: if prod == ['^']: item = ["."] else: item = ["."] + prod if prod_after_dot not in J.keys(): J[prod_after_dot] = [item] elif item not in J[prod_after_dot]: J[prod_after_dot].append(item) if item_len == len(J) + sum(len(v) for v in J.itervalues()): return J def GOTO(I, X): goto = {} for heads in I.keys(): for prods in I[heads]: for i in range(len(prods) - 1): if "." == prods[i] and X == prods[i + 1]: temp_prods = prods[:] temp_prods[i], temp_prods[i + 1] = temp_prods[i + 1], temp_prods[i] prod_closure = closure({heads: [temp_prods]}) for keys in prod_closure: if keys not in goto.keys(): goto[keys] = prod_closure[keys] elif prod_closure[keys] not in goto[keys]: for prod in prod_closure[keys]: goto[keys].append(prod) return goto def items(): global C i = 1 C = {'I0': closure({start: [['.'] + G[start][0]]})} while True: item_len = len(C) + sum(len(v) for v in C.itervalues()) for I in C.keys(): for X in symbols: if GOTO(C[I], X) and GOTO(C[I], X) not in C.values(): C['I' + str(i)] = GOTO(C[I], X) i += 1 if item_len == len(C) + sum(len(v) for v in C.itervalues()): return def ACTION(i, a): for heads in C['I' + str(i)]: for prods in C['I' + str(i)][heads]: for j in range(len(prods) - 1): if prods[j] == '.' and prods[j + 1] == a: for k in range(len(C)): if GOTO(C['I' + str(i)], a) == C['I' + str(k)]: if a in terminals: if "r" in parse_table[i][terminals.index(a)]: print "ERROR: Shift-Reduce Conflict at State " + str(i) + ", Symbol " + str(terminals.index(a)) exit(1) parse_table[i][terminals.index(a)] = "s" + str(k) else: parse_table[i][len(terminals) + nonterminals.index(a)] = str(k) return "s" + str(k) for heads in C['I' + str(i)]: if heads != start: for prods in C['I' + str(i)][heads]: if prods[-1] == '.': k = 0 for head in G.keys(): for Gprods in G[head]: if head == heads and (Gprods == prods[:-1] or (Gprods == ['^'] and prods == ['.'])) and (a in terminals or a == '$'): for terms in FOLLOW(heads): if terms == '$': index = len(terminals) else: index = terminals.index(terms) if "s" in parse_table[i][index]: print "ERROR: Shift-Reduce Conflict at State " + str(i) + ", Symbol " + str(terms) exit(1) elif parse_table[i][index] and parse_table[i][index] != "r" + str(k): print "ERROR: Reduce-Reduce Conflict at State " + str(i) + ", Symbol " + str(terms) exit(1) parse_table[i][index] = "r" + str(k) return "r" + str(k) k += 1 if start in C['I' + str(i)] and G[start][0] + ['.'] in C['I' + str(i)][start]: parse_table[i][len(terminals)] = "acc" return "acc" return "" def print_info(): print "GRAMMAR:" for head in G.keys(): if head == start: continue print "{:>{width}} ->".format(head, width=len(max(G.keys(), key=len))), num_prods = 0 for prods in G[head]: if num_prods > 0: print "|", for prod in prods: print prod, num_prods += 1 print print "\nAUGMENTED GRAMMAR:" i = 0 for head in G.keys(): for prods in G[head]: print "{:>{width}}:".format(str(i), width=len(str(sum(len(v) for v in G.itervalues()) - 1))), print "{:>{width}} ->".format(head, width=len(max(G.keys(), key=len))), for prod in prods: print prod, print i += 1 print "\nTERMINALS :", terminals print "NONTERMINALS:", nonterminals print "SYMBOLS :", symbols print "\nFIRST:" for head in G: print "{:>{width}} =".format(head, width=len(max(G.keys(), key=len))), print "{", num_terms = 0 for terms in FIRST(head): if num_terms > 0: print ", ", print terms, num_terms += 1 print "}" print "\nFOLLOW:" for head in G: print "{:>{width}} =".format(head, width=len(max(G.keys(), key=len))), print "{", num_terms = 0 for terms in FOLLOW(head): if num_terms > 0: print ", ", print terms, num_terms += 1 print "}" print "\nITEMS:" for i in range(len(C)): print 'I' + str(i) + ':' for keys in C['I' + str(i)]: for prods in C['I' + str(i)][keys]: print "{:>{width}} ->".format(keys, width=len(max(G.keys(), key=len))), for prod in prods: print prod, print print for i in range(len(parse_table)): for j in symbols: ACTION(i, j) print "PARSING TABLE:" print "+" + "--------+" * (len(terminals) + len(nonterminals) + 1) print "|{:^8}|".format('STATE'), for terms in terminals: print "{:^7}|".format(terms), print "{:^7}|".format("$"), for nonterms in nonterminals: if nonterms == start: continue print "{:^7}|".format(nonterms), print "\n+" + "--------+" * (len(terminals) + len(nonterminals) + 1) for i in range(len(parse_table)): print "|{:^8}|".format(i), for j in range(len(parse_table[i]) - 1): print "{:^7}|".format(parse_table[i][j]), print print "+" + "--------+" * (len(terminals) + len(nonterminals) + 1) def process_input(get_input): to_parse = " ".join((get_input + " $").split()).split(" ") pointer = 0 stack = ['0'] print "\n+--------+----------------------------+----------------------------+-----------+" print "|{:^8}|{:^28}|{:^28}|{:^11}|".format("STEP", "STACK", "INPUT", "ACTION") print "+--------+----------------------------+----------------------------+-----------+" step = 1 while True: curr_symbol = to_parse[pointer] top_stack = int(stack[-1]) stack_content = "" input_content = "" print "|{:^8}|".format(step), for i in stack: stack_content += i print "{:27}|".format(stack_content), i = pointer while i < len(to_parse): input_content += to_parse[i] i += 1 print "{:>26} | ".format(input_content), step += 1 get_action = ACTION(top_stack, curr_symbol) if "s" in get_action: print "{:^9}|".format(get_action) stack.append(curr_symbol) stack.append(get_action[1:]) pointer += 1 elif "r" in get_action: print "{:^9}|".format(get_action) i = 0 for head in G.keys(): for prods in G[head]: if i == int(get_action[1:]): if prods != '^': for j in range(2 * len(prods)): stack.pop() state = stack[-1] stack.append(head) stack.append(parse_table[int(state)][len(terminals) + nonterminals.index(head)]) i += 1 elif get_action == "acc": print "{:^9}|".format("ACCEPTED") break else: print "ERROR: Unrecognized symbol", curr_symbol, "|" break print "+--------+----------------------------+----------------------------+-----------+" if __name__ == "__main__": parse_grammar() items() parse_table = [["" for c in range(len(terminals) + len(nonterminals) + 1)] for r in range(len(C))] print_info() process_input(sys.argv[1])
mit
ShassAro/ShassAro
DockerAdmin/dockerVirtualEnv/lib/python2.7/site-packages/django/contrib/gis/db/backends/spatialite/base.py
61
3466
import sys from ctypes.util import find_library from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db.backends.sqlite3.base import (Database, DatabaseWrapper as SQLiteDatabaseWrapper, SQLiteCursorWrapper) from django.contrib.gis.db.backends.spatialite.client import SpatiaLiteClient from django.contrib.gis.db.backends.spatialite.creation import SpatiaLiteCreation from django.contrib.gis.db.backends.spatialite.introspection import SpatiaLiteIntrospection from django.contrib.gis.db.backends.spatialite.operations import SpatiaLiteOperations from django.contrib.gis.db.backends.spatialite.schema import SpatialiteSchemaEditor from django.utils import six class DatabaseWrapper(SQLiteDatabaseWrapper): def __init__(self, *args, **kwargs): # Before we get too far, make sure pysqlite 2.5+ is installed. if Database.version_info < (2, 5, 0): raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are ' 'compatible with SpatiaLite and GeoDjango.') # Trying to find the location of the SpatiaLite library. # Here we are figuring out the path to the SpatiaLite library # (`libspatialite`). If it's not in the system library path (e.g., it # cannot be found by `ctypes.util.find_library`), then it may be set # manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting. self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH', find_library('spatialite')) if not self.spatialite_lib: raise ImproperlyConfigured('Unable to locate the SpatiaLite library. ' 'Make sure it is in your library path, or set ' 'SPATIALITE_LIBRARY_PATH in your settings.' ) super(DatabaseWrapper, self).__init__(*args, **kwargs) self.ops = SpatiaLiteOperations(self) self.client = SpatiaLiteClient(self) self.creation = SpatiaLiteCreation(self) self.introspection = SpatiaLiteIntrospection(self) def schema_editor(self, *args, **kwargs): "Returns a new instance of this backend's SchemaEditor" return SpatialiteSchemaEditor(self, *args, **kwargs) def get_new_connection(self, conn_params): conn = super(DatabaseWrapper, self).get_new_connection(conn_params) # Enabling extension loading on the SQLite connection. try: conn.enable_load_extension(True) except AttributeError: raise ImproperlyConfigured( 'The pysqlite library does not support C extension loading. ' 'Both SQLite and pysqlite must be configured to allow ' 'the loading of extensions to use SpatiaLite.') # Loading the SpatiaLite library extension on the connection, and returning # the created cursor. cur = conn.cursor(factory=SQLiteCursorWrapper) try: cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,)) except Exception as msg: new_msg = ( 'Unable to load the SpatiaLite library extension ' '"%s" because: %s') % (self.spatialite_lib, msg) six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2]) cur.close() return conn
gpl-2.0
vijayendrabvs/ssl-neutron
neutron/tests/unit/vmware/db/test_nsx_db.py
13
3893
# Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron import context from neutron.db import api as db from neutron.db import models_v2 from neutron.openstack.common.db import exception as d_exc from neutron.plugins.vmware.dbexts import db as nsx_db from neutron.plugins.vmware.dbexts import models from neutron.tests import base class NsxDBTestCase(base.BaseTestCase): def setUp(self): super(NsxDBTestCase, self).setUp() db.configure_db() self.ctx = context.get_admin_context() self.addCleanup(db.clear_db) def _setup_neutron_network_and_port(self, network_id, port_id): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) port = models_v2.Port(id=port_id, network_id=network_id, mac_address='foo_mac_address', admin_state_up=True, status='ACTIVE', device_id='', device_owner='') self.ctx.session.add(port) def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self): neutron_net_id = 'foo_neutron_network_id' neutron_port_id = 'foo_neutron_port_id' nsx_port_id = 'foo_nsx_port_id' nsx_switch_id = 'foo_nsx_switch_id' self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id) nsx_db.add_neutron_nsx_port_mapping( self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id) # Call the method twice to trigger a db duplicate constraint error nsx_db.add_neutron_nsx_port_mapping( self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id) result = (self.ctx.session.query(models.NeutronNsxPortMapping). filter_by(neutron_id=neutron_port_id).one()) self.assertEqual(nsx_port_id, result.nsx_port_id) self.assertEqual(neutron_port_id, result.neutron_id) def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self): neutron_net_id = 'foo_neutron_network_id' neutron_port_id = 'foo_neutron_port_id' nsx_port_id_1 = 'foo_nsx_port_id_1' nsx_port_id_2 = 'foo_nsx_port_id_2' nsx_switch_id = 'foo_nsx_switch_id' self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id) nsx_db.add_neutron_nsx_port_mapping( self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_1) # Call the method twice to trigger a db duplicate constraint error, # this time with a different nsx port id! self.assertRaises(d_exc.DBDuplicateEntry, nsx_db.add_neutron_nsx_port_mapping, self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_2) def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self): neutron_port_id = 'foo_neutron_port_id' nsx_port_id = 'foo_nsx_port_id' nsx_switch_id = 'foo_nsx_switch_id' self.assertRaises(d_exc.DBError, nsx_db.add_neutron_nsx_port_mapping, self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
apache-2.0
varunarya10/boto
tests/unit/beanstalk/test_exception.py
114
2085
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. # All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.beanstalk.exception import simple from tests.compat import unittest class FakeError(object): def __init__(self, code, status, reason, body): self.code = code self.status = status self.reason = reason self.body = body class TestExceptions(unittest.TestCase): def test_exception_class_names(self): # Create exception from class name error = FakeError('TooManyApplications', 400, 'foo', 'bar') exception = simple(error) self.assertEqual(exception.__class__.__name__, 'TooManyApplications') # Create exception from class name + 'Exception' as seen from the # live service today error = FakeError('TooManyApplicationsException', 400, 'foo', 'bar') exception = simple(error) self.assertEqual(exception.__class__.__name__, 'TooManyApplications') # Make sure message body is present self.assertEqual(exception.message, 'bar')
mit
rrrene/django
django/contrib/humanize/templatetags/humanize.py
526
9442
# -*- encoding: utf-8 -*- from __future__ import unicode_literals import re from datetime import date, datetime from decimal import Decimal from django import template from django.conf import settings from django.template import defaultfilters from django.utils.encoding import force_text from django.utils.formats import number_format from django.utils.safestring import mark_safe from django.utils.timezone import is_aware, utc from django.utils.translation import pgettext, ugettext as _, ungettext register = template.Library() @register.filter(is_safe=True) def ordinal(value): """ Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd', 3 is '3rd', etc. Works for any integer. """ try: value = int(value) except (TypeError, ValueError): return value suffixes = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th')) if value % 100 in (11, 12, 13): # special case return mark_safe("%d%s" % (value, suffixes[0])) # Mark value safe so i18n does not break with <sup> or <sub> see #19988 return mark_safe("%d%s" % (value, suffixes[value % 10])) @register.filter(is_safe=True) def intcomma(value, use_l10n=True): """ Converts an integer to a string containing commas every three digits. For example, 3000 becomes '3,000' and 45000 becomes '45,000'. """ if settings.USE_L10N and use_l10n: try: if not isinstance(value, (float, Decimal)): value = int(value) except (TypeError, ValueError): return intcomma(value, False) else: return number_format(value, force_grouping=True) orig = force_text(value) new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig) if orig == new: return new else: return intcomma(new, use_l10n) # A tuple of standard large number to their converters intword_converters = ( (6, lambda number: ( ungettext('%(value).1f million', '%(value).1f million', number), ungettext('%(value)s million', '%(value)s million', number), )), (9, lambda number: ( ungettext('%(value).1f billion', '%(value).1f billion', number), ungettext('%(value)s billion', '%(value)s billion', number), )), (12, lambda number: ( ungettext('%(value).1f trillion', '%(value).1f trillion', number), ungettext('%(value)s trillion', '%(value)s trillion', number), )), (15, lambda number: ( ungettext('%(value).1f quadrillion', '%(value).1f quadrillion', number), ungettext('%(value)s quadrillion', '%(value)s quadrillion', number), )), (18, lambda number: ( ungettext('%(value).1f quintillion', '%(value).1f quintillion', number), ungettext('%(value)s quintillion', '%(value)s quintillion', number), )), (21, lambda number: ( ungettext('%(value).1f sextillion', '%(value).1f sextillion', number), ungettext('%(value)s sextillion', '%(value)s sextillion', number), )), (24, lambda number: ( ungettext('%(value).1f septillion', '%(value).1f septillion', number), ungettext('%(value)s septillion', '%(value)s septillion', number), )), (27, lambda number: ( ungettext('%(value).1f octillion', '%(value).1f octillion', number), ungettext('%(value)s octillion', '%(value)s octillion', number), )), (30, lambda number: ( ungettext('%(value).1f nonillion', '%(value).1f nonillion', number), ungettext('%(value)s nonillion', '%(value)s nonillion', number), )), (33, lambda number: ( ungettext('%(value).1f decillion', '%(value).1f decillion', number), ungettext('%(value)s decillion', '%(value)s decillion', number), )), (100, lambda number: ( ungettext('%(value).1f googol', '%(value).1f googol', number), ungettext('%(value)s googol', '%(value)s googol', number), )), ) @register.filter(is_safe=False) def intword(value): """ Converts a large integer to a friendly text representation. Works best for numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'. """ try: value = int(value) except (TypeError, ValueError): return value if value < 1000000: return value def _check_for_i18n(value, float_formatted, string_formatted): """ Use the i18n enabled defaultfilters.floatformat if possible """ if settings.USE_L10N: value = defaultfilters.floatformat(value, 1) template = string_formatted else: template = float_formatted return template % {'value': value} for exponent, converters in intword_converters: large_number = 10 ** exponent if value < large_number * 1000: new_value = value / float(large_number) return _check_for_i18n(new_value, *converters(new_value)) return value @register.filter(is_safe=True) def apnumber(value): """ For numbers 1-9, returns the number spelled out. Otherwise, returns the number. This follows Associated Press style. """ try: value = int(value) except (TypeError, ValueError): return value if not 0 < value < 10: return value return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'), _('seven'), _('eight'), _('nine'))[value - 1] # Perform the comparison in the default time zone when USE_TZ = True # (unless a specific time zone has been applied with the |timezone filter). @register.filter(expects_localtime=True) def naturalday(value, arg=None): """ For date values that are tomorrow, today or yesterday compared to present day returns representing string. Otherwise, returns a string formatted according to settings.DATE_FORMAT. """ try: tzinfo = getattr(value, 'tzinfo', None) value = date(value.year, value.month, value.day) except AttributeError: # Passed value wasn't a date object return value except ValueError: # Date arguments out of range return value today = datetime.now(tzinfo).date() delta = value - today if delta.days == 0: return _('today') elif delta.days == 1: return _('tomorrow') elif delta.days == -1: return _('yesterday') return defaultfilters.date(value, arg) # This filter doesn't require expects_localtime=True because it deals properly # with both naive and aware datetimes. Therefore avoid the cost of conversion. @register.filter def naturaltime(value): """ For date and time values shows how many seconds, minutes or hours ago compared to current timestamp returns representing string. """ if not isinstance(value, date): # datetime is a subclass of date return value now = datetime.now(utc if is_aware(value) else None) if value < now: delta = now - value if delta.days != 0: return pgettext( 'naturaltime', '%(delta)s ago' ) % {'delta': defaultfilters.timesince(value, now)} elif delta.seconds == 0: return _('now') elif delta.seconds < 60: return ungettext( # Translators: please keep a non-breaking space (U+00A0) # between count and time unit. 'a second ago', '%(count)s seconds ago', delta.seconds ) % {'count': delta.seconds} elif delta.seconds // 60 < 60: count = delta.seconds // 60 return ungettext( # Translators: please keep a non-breaking space (U+00A0) # between count and time unit. 'a minute ago', '%(count)s minutes ago', count ) % {'count': count} else: count = delta.seconds // 60 // 60 return ungettext( # Translators: please keep a non-breaking space (U+00A0) # between count and time unit. 'an hour ago', '%(count)s hours ago', count ) % {'count': count} else: delta = value - now if delta.days != 0: return pgettext( 'naturaltime', '%(delta)s from now' ) % {'delta': defaultfilters.timeuntil(value, now)} elif delta.seconds == 0: return _('now') elif delta.seconds < 60: return ungettext( # Translators: please keep a non-breaking space (U+00A0) # between count and time unit. 'a second from now', '%(count)s seconds from now', delta.seconds ) % {'count': delta.seconds} elif delta.seconds // 60 < 60: count = delta.seconds // 60 return ungettext( # Translators: please keep a non-breaking space (U+00A0) # between count and time unit. 'a minute from now', '%(count)s minutes from now', count ) % {'count': count} else: count = delta.seconds // 60 // 60 return ungettext( # Translators: please keep a non-breaking space (U+00A0) # between count and time unit. 'an hour from now', '%(count)s hours from now', count ) % {'count': count}
bsd-3-clause
slank/ansible
lib/ansible/modules/utilities/logic/pause.py
38
2465
# -*- mode: python -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'core', 'version': '1.0'} DOCUMENTATION = ''' --- module: pause short_description: Pause playbook execution description: - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt. - "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)." - "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts." version_added: "0.8" options: minutes: description: - A positive number of minutes to pause for. required: false default: null seconds: description: - A positive number of seconds to pause for. required: false default: null prompt: description: - Optional text to use for the prompt message. required: false default: null author: "Tim Bielawa (@tbielawa)" notes: - Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely. ''' EXAMPLES = ''' # Pause for 5 minutes to build app cache. - pause: minutes: 5 # Pause until you can verify updates to an application were successful. - pause: # A helpful reminder of what to look out for post-update. - pause: prompt: "Make sure org.foo.FooOverload exception is not present" '''
gpl-3.0
daisychainme/daisychain
daisychain/channel_clock/tests/test_setupview.py
1
1361
from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.test import TestCase from channel_clock.views import SetupView from channel_clock.models import ClockUserSettings class SetupViewTest(TestCase): def setUp(self): self.url = reverse("clock:connect") self.max_muster = User.objects.create_user("max_muster") self.client.force_login(self.max_muster) def test_append_query_params(self): url = "http://example.com/?a=1&a=5&b=4" new_url = SetupView().append_query_params(url, a=3) self.assertIn('a=1', new_url) self.assertIn('a=3', new_url) self.assertIn('a=5', new_url) self.assertIn('b=4', new_url) def test_get__disconnected_user(self): getData = { "next": "/test-redirect-uri/" } res = self.client.get(self.url, getData) res = self.client.get(self.url) def test_get__connected_user(self): ClockUserSettings.objects.create(user=self.max_muster, utcoffset=0) res = self.client.get(self.url) def test_post__error(self): postData = { "offset": 418230471203841723042 } res = self.client.post(self.url, postData) def test_post__success(self): postData = { "offset": 42 } res = self.client.post(self.url, postData)
mit
ganga-devs/ganga
ganga/GangaCore/old_test/Internals/TestTransitions.py
1
1829
########################################################################## # Ganga Project. http://cern.ch/ganga # # $Id: TestTransitions.py,v 1.1 2008-07-17 16:41:16 moscicki Exp $ ########################################################################## from GangaCore.GPIDev.Schema import * from GangaCore.Lib.Executable import Executable, RTHandler # a list of states we must see otherwise the test fails expected_minimal_states = ['completed', 'running', 'submitted'] class MockExeApplication(Executable): _category = 'applications' _hidden = 0 _name = 'MockExeApplication' _schema = Executable._schema.inherit_copy() _schema.datadict['called'] = SimpleItem(defvalue=False) def __init__(self): super(MockExeApplication, self).__init__() self.called = False def transition_update(self, new_status): if new_status in expected_minimal_states: expected_minimal_states.remove(new_status) self.called = True from GangaCore.GPIDev.Adapters.ApplicationRuntimeHandlers import allHandlers allHandlers.add('MockExeApplication', 'Local', RTHandler) from GangaCore.Utility.Plugin import allPlugins allPlugins.add(MockExeApplication, 'applications', 'MockExeApplication') from GangaTest.Framework.tests import GangaGPITestCase from GangaTest.Framework.utils import sleep_until_completed class TestTransitions(GangaGPITestCase): def testTransitionsCalled(self): m = MockExeApplication() j = Job(backend=Local()) j.application = m j.submit() assert sleep_until_completed(j), 'Job should complete' assert j.status == 'completed' assert j.application.called, 'The method should have been called' assert len(expected_minimal_states) == 0, 'We should have seen all the minimal states'
gpl-2.0
ChrisBeaumont/luigi
luigi/tools/deps.py
13
4022
#!/usr/bin/env python # Finds all tasks and task outputs on the dependency paths from the given downstream task T # up to the given source/upstream task S (optional). If the upstream task is not given, # all upstream tasks on all dependancy paths of T will be returned. # Terms: # if the execution of Task T depends on the output of task S on a dependancy graph, # T is called a downstream/sink task, S is called an upstream/source task. # This is useful and practical way to find all upstream tasks of task T. # For example suppose you have a daily computation that starts with a task named Daily. # And suppose you have another task named Aggregate. Daily triggers a few tasks # which eventually trigger Aggregate. Now, suppose you find a bug in Aggregate. # You fixed the bug and now you want to rerun it, including all it's upstream deps. # # To do that you run: # bin/deps.py --module daily_module Aggregate --daily-param1 xxx --upstream Daily # # This will output all the tasks on the dependency path between Daily and Aggregate. In # effect, this is how you find all upstream tasks for Aggregate. Now you can delete its # output and run Aggregate again. Daily will eventually trigget Aggregate and all tasks on # the way. # # The same code here might be used as a CLI tool as well as a python module. # In python, invoke find_deps(task, upstream_name) to get a set of all task instances on the # paths between task T and upstream task S. You can then use the task instances to delete their output or # perform other computation based on that. # # Example: # # PYTHONPATH=$PYTHONPATH:/path/to/your/luigi/tasks bin/deps.py \ # --module my.tasks MyDownstreamTask # --downstream_task_param1 123456 # [--upstream-task-family MyUpstreamTask] # import luigi.interface from luigi.contrib.ssh import RemoteTarget from luigi.postgres import PostgresTarget from luigi.s3 import S3Target from luigi.target import FileSystemTarget from luigi.task import flatten from luigi import parameter def get_task_requires(task): return set(flatten(task.requires())) def dfs_paths(start_task, goal_task_family, path=None): if path is None: path = [start_task] if start_task.task_family == goal_task_family or goal_task_family is None: for item in path: yield item for next in get_task_requires(start_task) - set(path): for t in dfs_paths(next, goal_task_family, path + [next]): yield t class upstream(luigi.task.Config): ''' Used to provide the parameter upstream-task-family ''' family = parameter.Parameter(default=None) def find_deps(task, upstream_task_family): ''' Finds all dependencies that start with the given task and have a path to upstream_task_family Returns all deps on all paths between task and upstream ''' return set([t for t in dfs_paths(task, upstream_task_family)]) def find_deps_cli(): ''' Finds all tasks on all paths from provided CLI task ''' interface = luigi.interface._DynamicArgParseInterface() tasks = interface.parse() task, = tasks upstream_task_family = upstream().family return find_deps(task, upstream_task_family) def main(): deps = find_deps_cli() for d in deps: task_name = d task_output = u"n/a" if isinstance(d.output(), RemoteTarget): task_output = u"[SSH] {0}:{1}".format(d.output()._fs.remote_context.host, d.output().path) elif isinstance(d.output(), S3Target): task_output = u"[S3] {0}".format(d.output().path) elif isinstance(d.output(), FileSystemTarget): task_output = u"[FileSystem] {0}".format(d.output().path) elif isinstance(d.output(), PostgresTarget): task_output = u"[DB] {0}:{1}".format(d.output().host, d.output().table) else: task_output = "to be determined" print(u""" TASK: {0} : {1}""".format(task_name, task_output)) if __name__ == '__main__': main()
apache-2.0
MDSLab/cloudwave-probe
source/cwProbe/plugins/cwpl_test.py
1
1537
# Copyright 2014 University of Messina (UniMe) # # Authors: Nicola Peditto <npeditto@unime.it>, Fabio Verboso <fverboso@unime.it> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' Created on 18/lug/2014 @authors: Nicola Peditto <npeditto@unime.it>, Fabio Verboso <fverboso@unime.it> ''' from cwProbe.plugins import CwPluginBase #USER IMPORT import time class Cwpl_Test(CwPluginBase.CwPluginBase): def __init__(self, probe): print 'INIT plugin: CW TEST.' super(Cwpl_Test, self).__init__() self.name="TestPlugin" self.loop_time=probe.loop_time self.start() def run(self): print "\t"+self.name+" plugin started" self.main() def main(self): """Main plugin method.""" print "Sending a measure..." while(1): print 'test plugin alive' time.sleep(float(self.loop_time))
apache-2.0
DoruMolnar123/Bots
plugins/minecraft_wiki.py
32
1895
import re import requests from lxml import html from cloudbot import hook from cloudbot.util import formatting api_url = "http://minecraft.gamepedia.com/api.php?action=opensearch" mc_url = "http://minecraft.gamepedia.com/" @hook.command() def mcwiki(text): """mcwiki <phrase> - gets the first paragraph of the Minecraft Wiki article on <phrase>""" try: request = requests.get(api_url, params={'search': text.strip()}) request.raise_for_status() j = request.json() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error fetching search results: {}".format(e) except ValueError as e: return "Error reading search results: {}".format(e) if not j[1]: return "No results found." # we remove items with a '/' in the name, because # gamepedia uses sub-pages for different languages # for some stupid reason items = [item for item in j[1] if "/" not in item] if items: article_name = items[0].replace(' ', '_').encode('utf8') else: # there are no items without /, just return a / one article_name = j[1][0].replace(' ', '_').encode('utf8') url = mc_url + requests.utils.quote(article_name, '') try: request_ = requests.get(url) request_.raise_for_status() except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: return "Error fetching wiki page: {}".format(e) page = html.fromstring(request_.text) for p in page.xpath('//div[@class="mw-content-ltr"]/p'): if p.text_content(): summary = " ".join(p.text_content().splitlines()) summary = re.sub("\[\d+\]", "", summary) summary = formatting.truncate(summary, 200) return "{} :: {}".format(summary, url) # this shouldn't happen return "Unknown Error."
unlicense
arameshkumar/nuxeo-drive
nuxeo-drive-client/nxdrive/client/remote_file_system_client.py
2
11756
"""API to access a remote file system for synchronization.""" import unicodedata from collections import namedtuple from datetime import datetime import os from nxdrive.logging_config import get_logger from nxdrive.client.common import NotFound from nxdrive.client.base_automation_client import BaseAutomationClient from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_PREFIX from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_SUFFIX from nxdrive.engine.activity import FileAction from threading import current_thread log = get_logger(__name__) # Data transfer objects BaseRemoteFileInfo = namedtuple('RemoteFileInfo', [ 'name', # title of the file (not guaranteed to be locally unique) 'uid', # id of the file 'parent_uid', # id of the parent file 'path', # abstract file system path: useful for ordering folder trees 'folderish', # True is can host children 'last_modification_time', # last update time 'last_contributor', # last contributor 'digest', # digest of the file 'digest_algorithm', # digest algorithm of the file 'download_url', # download URL of the file 'can_rename', # True is can rename 'can_delete', # True is can delete 'can_update', # True is can update content 'can_create_child', # True is can create child 'lock_owner', # lock owner 'lock_created', # lock creation time 'can_scroll_descendants' # True if the API to scroll through the descendants can be used ]) class RemoteFileInfo(BaseRemoteFileInfo): """Data Transfer Object for remote file info""" # Consistency with the local client API def get_digest(self): return self.digest class RemoteFileSystemClient(BaseAutomationClient): """File system oriented Automation client Uses the FileSystemItem API. """ # # API common with the local client API # def get_info(self, fs_item_id, parent_fs_item_id=None, raise_if_missing=True): fs_item = self.get_fs_item(fs_item_id, parent_fs_item_id=parent_fs_item_id) if fs_item is None: if raise_if_missing: raise NotFound("Could not find '%s' on '%s'" % ( fs_item_id, self.server_url)) return None return self.file_to_info(fs_item) def get_filesystem_root_info(self): toplevel_folder = self.execute("NuxeoDrive.GetTopLevelFolder") return self.file_to_info(toplevel_folder) def get_content(self, fs_item_id): """Download and return the binary content of a file system item Beware that the content is loaded in memory. Raises NotFound if file system item with id fs_item_id cannot be found """ fs_item_info = self.get_info(fs_item_id) download_url = self.server_url + fs_item_info.download_url FileAction("Download", None, fs_item_info.name, 0) content, _ = self.do_get(download_url, digest=fs_item_info.digest, digest_algorithm=fs_item_info.digest_algorithm) self.end_action() return content def stream_content(self, fs_item_id, file_path, parent_fs_item_id=None, fs_item_info=None, file_out=None): """Stream the binary content of a file system item to a tmp file Raises NotFound if file system item with id fs_item_id cannot be found """ if fs_item_info is None: fs_item_info = self.get_info(fs_item_id, parent_fs_item_id=parent_fs_item_id) download_url = self.server_url + fs_item_info.download_url file_name = os.path.basename(file_path) if file_out is None: file_dir = os.path.dirname(file_path) file_out = os.path.join(file_dir, DOWNLOAD_TMP_FILE_PREFIX + file_name + str(current_thread().ident) + DOWNLOAD_TMP_FILE_SUFFIX) FileAction("Download", file_out, file_name, 0) try: _, tmp_file = self.do_get(download_url, file_out=file_out, digest=fs_item_info.digest, digest_algorithm=fs_item_info.digest_algorithm) except Exception as e: if os.path.exists(file_out): os.remove(file_out) raise e finally: self.end_action() return tmp_file def get_children_info(self, fs_item_id): children = self.execute("NuxeoDrive.GetChildren", id=fs_item_id) return [self.file_to_info(fs_item) for fs_item in children] def scroll_descendants(self, fs_item_id, scroll_id, batch_size=100): res = self.execute("NuxeoDrive.ScrollDescendants", id=fs_item_id, scrollId=scroll_id, batchSize=batch_size) return { 'scroll_id': res['scrollId'], 'descendants': [self.file_to_info(fs_item) for fs_item in res['fileSystemItems']] } def is_filtered(self, path): return False def make_folder(self, parent_id, name): fs_item = self.execute("NuxeoDrive.CreateFolder", parentId=parent_id, name=name) return self.file_to_info(fs_item) def make_file(self, parent_id, name, content): """Create a document with the given name and content Creates a temporary file from the content then streams it. """ file_path = self.make_tmp_file(content) try: fs_item = self.execute_with_blob_streaming("NuxeoDrive.CreateFile", file_path, filename=name, parentId=parent_id) return self.file_to_info(fs_item) finally: os.remove(file_path) def stream_file(self, parent_id, file_path, filename=None, mime_type=None): """Create a document by streaming the file with the given path""" fs_item = self.execute_with_blob_streaming("NuxeoDrive.CreateFile", file_path, filename=filename, mime_type=mime_type, parentId=parent_id) return self.file_to_info(fs_item) def update_content(self, fs_item_id, content, filename=None, mime_type=None): """Update a document with the given content Creates a temporary file from the content then streams it. """ file_path = self.make_tmp_file(content) try: if filename is None: filename = self.get_info(fs_item_id).name fs_item = self.execute_with_blob_streaming('NuxeoDrive.UpdateFile', file_path, filename=filename, mime_type=mime_type, id=fs_item_id) return self.file_to_info(fs_item) finally: os.remove(file_path) def stream_update(self, fs_item_id, file_path, parent_fs_item_id=None, filename=None): """Update a document by streaming the file with the given path""" fs_item = self.execute_with_blob_streaming('NuxeoDrive.UpdateFile', file_path, filename=filename, id=fs_item_id, parentId=parent_fs_item_id) return self.file_to_info(fs_item) def delete(self, fs_item_id, parent_fs_item_id=None): self.execute("NuxeoDrive.Delete", id=fs_item_id, parentId=parent_fs_item_id) def exists(self, fs_item_id): return self.execute("NuxeoDrive.FileSystemItemExists", id=fs_item_id) # TODO def check_writable(self, fs_item_id): pass def rename(self, fs_item_id, new_name): return self.file_to_info(self.execute("NuxeoDrive.Rename", id=fs_item_id, name=new_name)) def move(self, fs_item_id, new_parent_id): return self.file_to_info(self.execute("NuxeoDrive.Move", srcId=fs_item_id, destId=new_parent_id)) def can_move(self, fs_item_id, new_parent_id): return self.execute("NuxeoDrive.CanMove", srcId=fs_item_id, destId=new_parent_id) def conflicted_name(self, original_name): """Generate a new name suitable for conflict deduplication.""" return self.execute("NuxeoDrive.GenerateConflictedItemName", name=original_name) def file_to_info(self, fs_item): """Convert Automation file system item description to RemoteFileInfo""" folderish = fs_item['folder'] milliseconds = fs_item['lastModificationDate'] last_update = datetime.fromtimestamp(milliseconds // 1000) last_contributor = fs_item.get('lastContributor') if folderish: digest = None digest_algorithm = None download_url = None can_update = False can_create_child = fs_item['canCreateChild'] # Scroll API availability can_scroll = fs_item.get('canScrollDescendants') if can_scroll: can_scroll_descendants = True else: can_scroll_descendants = False else: digest = fs_item['digest'] item_digest_algorithm = fs_item['digestAlgorithm'] if item_digest_algorithm is not None: digest_algorithm = item_digest_algorithm.lower().replace('-', '') else: digest_algorithm = None download_url = fs_item['downloadURL'] can_update = fs_item['canUpdate'] can_create_child = False can_scroll_descendants = False # Lock info lock_info = fs_item.get('lockInfo') if lock_info is None: lock_owner = None lock_created = None else: lock_owner = lock_info.get('owner') lock_created_millis = lock_info.get('created') if lock_created_millis is not None: lock_created = datetime.fromtimestamp(lock_created_millis // 1000) # Normalize using NFC to make the tests more intuitive name = fs_item['name'] if name is not None: name = unicodedata.normalize('NFC', name) return RemoteFileInfo( name, fs_item['id'], fs_item['parentId'], fs_item['path'], folderish, last_update, last_contributor, digest, digest_algorithm, download_url, fs_item['canRename'], fs_item['canDelete'], can_update, can_create_child, lock_owner, lock_created, can_scroll_descendants) # # API specific to the remote file system client # def get_fs_item(self, fs_item_id, parent_fs_item_id=None): return self.execute("NuxeoDrive.GetFileSystemItem", id=fs_item_id, parentId=parent_fs_item_id) def get_top_level_children(self): return self.execute("NuxeoDrive.GetTopLevelChildren") def get_changes(self, last_root_definitions, log_id=None, last_sync_date=None): if log_id: # If available, use last event log id as 'lowerBound' parameter # according to the new implementation of the audit change finder, # see https://jira.nuxeo.com/browse/NXP-14826. return self.execute('NuxeoDrive.GetChangeSummary', lowerBound=log_id, lastSyncActiveRootDefinitions=( last_root_definitions)) else: # Use last sync date as 'lastSyncDate' parameter according to the # old implementation of the audit change finder. return self.execute('NuxeoDrive.GetChangeSummary', lastSyncDate=last_sync_date, lastSyncActiveRootDefinitions=( last_root_definitions))
lgpl-2.1
espadrine/opera
chromium/src/tools/grit/grit/shortcuts.py
62
2930
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Stuff to prevent conflicting shortcuts. ''' from grit import lazy_re class ShortcutGroup(object): '''Manages a list of cliques that belong together in a single shortcut group. Knows how to detect conflicting shortcut keys. ''' # Matches shortcut keys, e.g. &J SHORTCUT_RE = lazy_re.compile('([^&]|^)(&[A-Za-z])') def __init__(self, name): self.name = name # Map of language codes to shortcut keys used (which is a map of # shortcut keys to counts). self.keys_by_lang = {} # List of cliques in this group self.cliques = [] def AddClique(self, c): for existing_clique in self.cliques: if existing_clique.GetId() == c.GetId(): # This happens e.g. when we have e.g. # <if expr1><structure 1></if> <if expr2><structure 2></if> # where only one will really be included in the output. return self.cliques.append(c) for (lang, msg) in c.clique.items(): if lang not in self.keys_by_lang: self.keys_by_lang[lang] = {} keymap = self.keys_by_lang[lang] content = msg.GetRealContent() keys = [groups[1] for groups in self.SHORTCUT_RE.findall(content)] for key in keys: key = key.upper() if key in keymap: keymap[key] += 1 else: keymap[key] = 1 def GenerateWarnings(self, tc_project): # For any language that has more than one occurrence of any shortcut, # make a list of the conflicting shortcuts. problem_langs = {} for (lang, keys) in self.keys_by_lang.items(): for (key, count) in keys.items(): if count > 1: if lang not in problem_langs: problem_langs[lang] = [] problem_langs[lang].append(key) warnings = [] if len(problem_langs): warnings.append("WARNING - duplicate keys exist in shortcut group %s" % self.name) for (lang,keys) in problem_langs.items(): warnings.append(" %6s duplicates: %s" % (lang, ', '.join(keys))) return warnings def GenerateDuplicateShortcutsWarnings(uberclique, tc_project): '''Given an UberClique and a project name, will print out helpful warnings if there are conflicting shortcuts within shortcut groups in the provided UberClique. Args: uberclique: clique.UberClique() tc_project: 'MyProjectNameInTheTranslationConsole' Returns: ['warning line 1', 'warning line 2', ...] ''' warnings = [] groups = {} for c in uberclique.AllCliques(): for group in c.shortcut_groups: if group not in groups: groups[group] = ShortcutGroup(group) groups[group].AddClique(c) for group in groups.values(): warnings += group.GenerateWarnings(tc_project) return warnings
bsd-3-clause
Mickey32111/pogom
pogom/pgoapi/protos/POGOProtos/Settings/Master/PokemonSettings_pb2.py
9
14192
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Settings/Master/PokemonSettings.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from POGOProtos.Enums import PokemonId_pb2 as POGOProtos_dot_Enums_dot_PokemonId__pb2 from POGOProtos.Enums import PokemonRarity_pb2 as POGOProtos_dot_Enums_dot_PokemonRarity__pb2 from POGOProtos.Enums import PokemonType_pb2 as POGOProtos_dot_Enums_dot_PokemonType__pb2 from POGOProtos.Enums import PokemonMove_pb2 as POGOProtos_dot_Enums_dot_PokemonMove__pb2 from POGOProtos.Enums import PokemonFamilyId_pb2 as POGOProtos_dot_Enums_dot_PokemonFamilyId__pb2 from POGOProtos.Settings.Master.Pokemon import StatsAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_StatsAttributes__pb2 from POGOProtos.Settings.Master.Pokemon import CameraAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_CameraAttributes__pb2 from POGOProtos.Settings.Master.Pokemon import EncounterAttributes_pb2 as POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_EncounterAttributes__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Settings/Master/PokemonSettings.proto', package='POGOProtos.Settings.Master', syntax='proto3', serialized_pb=_b('\n0POGOProtos/Settings/Master/PokemonSettings.proto\x12\x1aPOGOProtos.Settings.Master\x1a POGOProtos/Enums/PokemonId.proto\x1a$POGOProtos/Enums/PokemonRarity.proto\x1a\"POGOProtos/Enums/PokemonType.proto\x1a\"POGOProtos/Enums/PokemonMove.proto\x1a&POGOProtos/Enums/PokemonFamilyId.proto\x1a\x38POGOProtos/Settings/Master/Pokemon/StatsAttributes.proto\x1a\x39POGOProtos/Settings/Master/Pokemon/CameraAttributes.proto\x1a<POGOProtos/Settings/Master/Pokemon/EncounterAttributes.proto\"\x94\x07\n\x0fPokemonSettings\x12/\n\npokemon_id\x18\x01 \x01(\x0e\x32\x1b.POGOProtos.Enums.PokemonId\x12\x13\n\x0bmodel_scale\x18\x03 \x01(\x02\x12+\n\x04type\x18\x04 \x01(\x0e\x32\x1d.POGOProtos.Enums.PokemonType\x12-\n\x06type_2\x18\x05 \x01(\x0e\x32\x1d.POGOProtos.Enums.PokemonType\x12\x44\n\x06\x63\x61mera\x18\x06 \x01(\x0b\x32\x34.POGOProtos.Settings.Master.Pokemon.CameraAttributes\x12J\n\tencounter\x18\x07 \x01(\x0b\x32\x37.POGOProtos.Settings.Master.Pokemon.EncounterAttributes\x12\x42\n\x05stats\x18\x08 \x01(\x0b\x32\x33.POGOProtos.Settings.Master.Pokemon.StatsAttributes\x12\x32\n\x0bquick_moves\x18\t \x03(\x0e\x32\x1d.POGOProtos.Enums.PokemonMove\x12\x36\n\x0f\x63inematic_moves\x18\n \x03(\x0e\x32\x1d.POGOProtos.Enums.PokemonMove\x12\x16\n\x0e\x61nimation_time\x18\x0b \x03(\x02\x12\x32\n\revolution_ids\x18\x0c \x03(\x0e\x32\x1b.POGOProtos.Enums.PokemonId\x12\x16\n\x0e\x65volution_pips\x18\r \x01(\x05\x12/\n\x06rarity\x18\x0e \x01(\x0e\x32\x1f.POGOProtos.Enums.PokemonRarity\x12\x18\n\x10pokedex_height_m\x18\x0f \x01(\x02\x12\x19\n\x11pokedex_weight_kg\x18\x10 \x01(\x02\x12\x36\n\x11parent_pokemon_id\x18\x11 \x01(\x0e\x32\x1b.POGOProtos.Enums.PokemonId\x12\x16\n\x0eheight_std_dev\x18\x12 \x01(\x02\x12\x16\n\x0eweight_std_dev\x18\x13 \x01(\x02\x12\x1c\n\x14km_distance_to_hatch\x18\x14 \x01(\x02\x12\x34\n\tfamily_id\x18\x15 \x01(\x0e\x32!.POGOProtos.Enums.PokemonFamilyId\x12\x17\n\x0f\x63\x61ndy_to_evolve\x18\x16 \x01(\x05\x62\x06proto3') , dependencies=[POGOProtos_dot_Enums_dot_PokemonId__pb2.DESCRIPTOR,POGOProtos_dot_Enums_dot_PokemonRarity__pb2.DESCRIPTOR,POGOProtos_dot_Enums_dot_PokemonType__pb2.DESCRIPTOR,POGOProtos_dot_Enums_dot_PokemonMove__pb2.DESCRIPTOR,POGOProtos_dot_Enums_dot_PokemonFamilyId__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_StatsAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_CameraAttributes__pb2.DESCRIPTOR,POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_EncounterAttributes__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _POKEMONSETTINGS = _descriptor.Descriptor( name='PokemonSettings', full_name='POGOProtos.Settings.Master.PokemonSettings', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='pokemon_id', full_name='POGOProtos.Settings.Master.PokemonSettings.pokemon_id', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='model_scale', full_name='POGOProtos.Settings.Master.PokemonSettings.model_scale', index=1, number=3, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='POGOProtos.Settings.Master.PokemonSettings.type', index=2, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type_2', full_name='POGOProtos.Settings.Master.PokemonSettings.type_2', index=3, number=5, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='camera', full_name='POGOProtos.Settings.Master.PokemonSettings.camera', index=4, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='encounter', full_name='POGOProtos.Settings.Master.PokemonSettings.encounter', index=5, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='stats', full_name='POGOProtos.Settings.Master.PokemonSettings.stats', index=6, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='quick_moves', full_name='POGOProtos.Settings.Master.PokemonSettings.quick_moves', index=7, number=9, type=14, cpp_type=8, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='cinematic_moves', full_name='POGOProtos.Settings.Master.PokemonSettings.cinematic_moves', index=8, number=10, type=14, cpp_type=8, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='animation_time', full_name='POGOProtos.Settings.Master.PokemonSettings.animation_time', index=9, number=11, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='evolution_ids', full_name='POGOProtos.Settings.Master.PokemonSettings.evolution_ids', index=10, number=12, type=14, cpp_type=8, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='evolution_pips', full_name='POGOProtos.Settings.Master.PokemonSettings.evolution_pips', index=11, number=13, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='rarity', full_name='POGOProtos.Settings.Master.PokemonSettings.rarity', index=12, number=14, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='pokedex_height_m', full_name='POGOProtos.Settings.Master.PokemonSettings.pokedex_height_m', index=13, number=15, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='pokedex_weight_kg', full_name='POGOProtos.Settings.Master.PokemonSettings.pokedex_weight_kg', index=14, number=16, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='parent_pokemon_id', full_name='POGOProtos.Settings.Master.PokemonSettings.parent_pokemon_id', index=15, number=17, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='height_std_dev', full_name='POGOProtos.Settings.Master.PokemonSettings.height_std_dev', index=16, number=18, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='weight_std_dev', full_name='POGOProtos.Settings.Master.PokemonSettings.weight_std_dev', index=17, number=19, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='km_distance_to_hatch', full_name='POGOProtos.Settings.Master.PokemonSettings.km_distance_to_hatch', index=18, number=20, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='family_id', full_name='POGOProtos.Settings.Master.PokemonSettings.family_id', index=19, number=21, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='candy_to_evolve', full_name='POGOProtos.Settings.Master.PokemonSettings.candy_to_evolve', index=20, number=22, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=444, serialized_end=1360, ) _POKEMONSETTINGS.fields_by_name['pokemon_id'].enum_type = POGOProtos_dot_Enums_dot_PokemonId__pb2._POKEMONID _POKEMONSETTINGS.fields_by_name['type'].enum_type = POGOProtos_dot_Enums_dot_PokemonType__pb2._POKEMONTYPE _POKEMONSETTINGS.fields_by_name['type_2'].enum_type = POGOProtos_dot_Enums_dot_PokemonType__pb2._POKEMONTYPE _POKEMONSETTINGS.fields_by_name['camera'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_CameraAttributes__pb2._CAMERAATTRIBUTES _POKEMONSETTINGS.fields_by_name['encounter'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_EncounterAttributes__pb2._ENCOUNTERATTRIBUTES _POKEMONSETTINGS.fields_by_name['stats'].message_type = POGOProtos_dot_Settings_dot_Master_dot_Pokemon_dot_StatsAttributes__pb2._STATSATTRIBUTES _POKEMONSETTINGS.fields_by_name['quick_moves'].enum_type = POGOProtos_dot_Enums_dot_PokemonMove__pb2._POKEMONMOVE _POKEMONSETTINGS.fields_by_name['cinematic_moves'].enum_type = POGOProtos_dot_Enums_dot_PokemonMove__pb2._POKEMONMOVE _POKEMONSETTINGS.fields_by_name['evolution_ids'].enum_type = POGOProtos_dot_Enums_dot_PokemonId__pb2._POKEMONID _POKEMONSETTINGS.fields_by_name['rarity'].enum_type = POGOProtos_dot_Enums_dot_PokemonRarity__pb2._POKEMONRARITY _POKEMONSETTINGS.fields_by_name['parent_pokemon_id'].enum_type = POGOProtos_dot_Enums_dot_PokemonId__pb2._POKEMONID _POKEMONSETTINGS.fields_by_name['family_id'].enum_type = POGOProtos_dot_Enums_dot_PokemonFamilyId__pb2._POKEMONFAMILYID DESCRIPTOR.message_types_by_name['PokemonSettings'] = _POKEMONSETTINGS PokemonSettings = _reflection.GeneratedProtocolMessageType('PokemonSettings', (_message.Message,), dict( DESCRIPTOR = _POKEMONSETTINGS, __module__ = 'POGOProtos.Settings.Master.PokemonSettings_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.PokemonSettings) )) _sym_db.RegisterMessage(PokemonSettings) # @@protoc_insertion_point(module_scope)
mit
ScreamingUdder/mantid
Framework/PythonInterface/test/python/mantid/kernel/FacilityInfoTest.py
3
1135
from __future__ import (absolute_import, division, print_function) import unittest from mantid.kernel import FacilityInfo, InstrumentInfo, ConfigService class FacilityInfoTest(unittest.TestCase): def test_construction_raies_an_error(self): self.assertRaises(RuntimeError, FacilityInfo) def _get_test_facility(self): return ConfigService.getFacility("ISIS") def test_attributes_are_as_expected(self): test_facility = self._get_test_facility() self.assertEquals(test_facility.name(), "ISIS") self.assertEquals(test_facility.zeroPadding(), 5) self.assertEquals(test_facility.delimiter(), "") self.assertEquals(len(test_facility.extensions()), 7) self.assertEquals(test_facility.preferredExtension(), ".nxs") self.assertEquals(len(test_facility.archiveSearch()), 1) self.assertTrue(len(test_facility.instruments()) > 30) self.assertTrue(len(test_facility.instruments("Neutron Diffraction"))> 10) self.assertTrue(isinstance(test_facility.instrument("WISH"), InstrumentInfo)) if __name__ == '__main__': unittest.main()
gpl-3.0
ToontownUprising/src
toontown/classicchars/DistributedCCharBase.py
3
12802
import copy from direct.controls.ControlManager import CollisionHandlerRayStart from direct.directnotify import DirectNotifyGlobal from direct.fsm import ClassicFSM from direct.fsm import State from direct.interval.IntervalGlobal import * from pandac.PandaModules import * import string import CCharChatter import CCharPaths from otp.avatar import Avatar from toontown.char import CharDNA from toontown.char import DistributedChar from toontown.chat.ChatGlobals import * from toontown.effects import DustCloud from toontown.toonbase import ToontownGlobals from toontown.toonbase.TTLocalizer import Donald, DonaldDock, WesternPluto, Pluto class DistributedCCharBase(DistributedChar.DistributedChar): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCCharBase') def __init__(self, cr, name, dnaName): try: self.DistributedCCharBase_initialized return except: self.DistributedCCharBase_initialized = 1 DistributedChar.DistributedChar.__init__(self, cr) dna = CharDNA.CharDNA() dna.newChar(dnaName) self.setDNA(dna) self.setName(name) self.setTransparency(TransparencyAttrib.MDual, 1) fadeIn = self.colorScaleInterval(0.5, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0), blendType='easeInOut') fadeIn.start() self.diffPath = None self.transitionToCostume = 0 self.__initCollisions() return def __initCollisions(self): self.cSphere = CollisionSphere(0.0, 0.0, 0.0, 8.0) self.cSphere.setTangible(0) self.cSphereNode = CollisionNode(self.getName() + 'BlatherSphere') self.cSphereNode.addSolid(self.cSphere) self.cSphereNodePath = self.attachNewNode(self.cSphereNode) self.cSphereNodePath.hide() self.cSphereNode.setCollideMask(ToontownGlobals.WallBitmask) self.acceptOnce('enter' + self.cSphereNode.getName(), self.__handleCollisionSphereEnter) self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0) self.cRayNode = CollisionNode(self.getName() + 'cRay') self.cRayNode.addSolid(self.cRay) self.cRayNodePath = self.attachNewNode(self.cRayNode) self.cRayNodePath.hide() self.cRayBitMask = ToontownGlobals.FloorBitmask self.cRayNode.setFromCollideMask(self.cRayBitMask) self.cRayNode.setIntoCollideMask(BitMask32.allOff()) self.lifter = CollisionHandlerFloor() self.lifter.setOffset(ToontownGlobals.FloorOffset) self.lifter.setReach(10.0) self.lifter.setMaxVelocity(0.0) self.lifter.addCollider(self.cRayNodePath, self) self.cTrav = base.localAvatar.cTrav def __deleteCollisions(self): del self.cSphere del self.cSphereNode self.cSphereNodePath.removeNode() del self.cSphereNodePath self.cRay = None self.cRayNode = None self.cRayNodePath = None self.lifter = None self.cTrav = None return def disable(self): self.stopBlink() self.ignoreAll() self.chatTrack.finish() del self.chatTrack if self.chatterDialogue: self.chatterDialogue.stop() del self.chatterDialogue DistributedChar.DistributedChar.disable(self) self.stopEarTask() def delete(self): try: self.DistributedCCharBase_deleted except: self.setParent(NodePath('Temp')) self.DistributedCCharBase_deleted = 1 self.__deleteCollisions() DistributedChar.DistributedChar.delete(self) def generate(self, diffPath = None): DistributedChar.DistributedChar.generate(self) if diffPath == None: self.setPos(CCharPaths.getNodePos(CCharPaths.startNode, CCharPaths.getPaths(self.getName(), self.getCCLocation()))) else: self.setPos(CCharPaths.getNodePos(CCharPaths.startNode, CCharPaths.getPaths(diffPath, self.getCCLocation()))) self.setHpr(0, 0, 0) self.setParent(ToontownGlobals.SPRender) self.startBlink() self.startEarTask() self.chatTrack = Sequence() self.chatterDialogue = None self.acceptOnce('enter' + self.cSphereNode.getName(), self.__handleCollisionSphereEnter) self.accept('exitSafeZone', self.__handleExitSafeZone) return def __handleExitSafeZone(self): self.__handleCollisionSphereExit(None) return def __handleCollisionSphereEnter(self, collEntry): self.notify.debug('Entering collision sphere...') self.sendUpdate('avatarEnter', []) self.accept('chatUpdate', self.__handleChatUpdate) self.accept('chatUpdateSC', self.__handleChatUpdateSC) self.accept('chatUpdateSCCustom', self.__handleChatUpdateSCCustom) self.accept('chatUpdateSCToontask', self.__handleChatUpdateSCToontask) self.nametag3d.setBin('transparent', 100) self.acceptOnce('exit' + self.cSphereNode.getName(), self.__handleCollisionSphereExit) def __handleCollisionSphereExit(self, collEntry): self.notify.debug('Exiting collision sphere...') self.sendUpdate('avatarExit', []) self.ignore('chatUpdate') self.ignore('chatUpdateSC') self.ignore('chatUpdateSCCustom') self.ignore('chatUpdateSCToontask') self.acceptOnce('enter' + self.cSphereNode.getName(), self.__handleCollisionSphereEnter) def __handleChatUpdate(self, msg, chatFlags): self.sendUpdate('setNearbyAvatarChat', [msg]) def __handleChatUpdateSC(self, msgIndex): self.sendUpdate('setNearbyAvatarSC', [msgIndex]) def __handleChatUpdateSCCustom(self, msgIndex): self.sendUpdate('setNearbyAvatarSCCustom', [msgIndex]) def __handleChatUpdateSCToontask(self, taskId, toNpcId, toonProgress, msgIndex): self.sendUpdate('setNearbyAvatarSCToontask', [taskId, toNpcId, toonProgress, msgIndex]) def makeTurnToHeadingTrack(self, heading): curHpr = self.getHpr() destHpr = self.getHpr() destHpr.setX(heading) if destHpr[0] - curHpr[0] > 180.0: destHpr.setX(destHpr[0] - 360) elif destHpr[0] - curHpr[0] < -180.0: destHpr.setX(destHpr[0] + 360) turnSpeed = 180.0 time = abs(destHpr[0] - curHpr[0]) / turnSpeed turnTracks = Parallel() if time > 0.2: turnTracks.append(Sequence(Func(self.loop, 'walk'), Wait(time), Func(self.loop, 'neutral'))) turnTracks.append(LerpHprInterval(self, time, destHpr, name='lerp' + self.getName() + 'Hpr')) return turnTracks def setChat(self, category, msg, avId): if avId in self.cr.doId2do: avatar = self.cr.doId2do[avId] chatter = CCharChatter.getChatter(self.getName(), self.getCCChatter()) if category >= len(chatter): self.notify.debug("Chatter's changed") return elif len(chatter[category]) <= msg: self.notify.debug("Chatter's changed") return str = chatter[category][msg] if '%' in str: str = copy.deepcopy(str) avName = avatar.getName() str = str.replace('%', avName) track = Sequence() if category != CCharChatter.GOODBYE: curHpr = self.getHpr() self.headsUp(avatar) destHpr = self.getHpr() self.setHpr(curHpr) track.append(self.makeTurnToHeadingTrack(destHpr[0])) if self.getName() == Donald or self.getName() == WesternPluto or self.getName() == Pluto: chatFlags = CFThought | CFTimeout if hasattr(base.cr, 'newsManager') and base.cr.newsManager: holidayIds = base.cr.newsManager.getHolidayIdList() if ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds: if self.getName() == Pluto: chatFlags = CFTimeout | CFSpeech elif self.getName() == DonaldDock: chatFlags = CFTimeout | CFSpeech self.nametag3d.hide() else: chatFlags = CFTimeout | CFSpeech self.chatterDialogue = self.getChatterDialogue(category, msg) track.append(Func(self.setChatAbsolute, str, chatFlags, self.chatterDialogue)) self.chatTrack.finish() self.chatTrack = track self.chatTrack.start() def setWalk(self, srcNode, destNode, timestamp): pass def walkSpeed(self): return 0.1 def enableRaycast(self, enable = 1): if not self.cTrav or not hasattr(self, 'cRayNode') or not self.cRayNode: self.notify.debug('raycast info not found for ' + self.getName()) return self.cTrav.removeCollider(self.cRayNodePath) if enable: if self.notify.getDebug(): self.notify.debug('enabling raycast for ' + self.getName()) self.cTrav.addCollider(self.cRayNodePath, self.lifter) elif self.notify.getDebug(): self.notify.debug('disabling raycast for ' + self.getName()) def getCCLocation(self): return 0 def getCCChatter(self): self.handleHolidays() return self.CCChatter def handleHolidays(self): self.CCChatter = 0 if hasattr(base.cr, 'newsManager') and base.cr.newsManager: holidayIds = base.cr.newsManager.getHolidayIdList() if ToontownGlobals.CRASHED_LEADERBOARD in holidayIds: self.CCChatter = ToontownGlobals.CRASHED_LEADERBOARD elif ToontownGlobals.CIRCUIT_RACING_EVENT in holidayIds: self.CCChatter = ToontownGlobals.CIRCUIT_RACING_EVENT elif ToontownGlobals.WINTER_CAROLING in holidayIds: self.CCChatter = ToontownGlobals.WINTER_CAROLING elif ToontownGlobals.WINTER_DECORATIONS in holidayIds: self.CCChatter = ToontownGlobals.WINTER_DECORATIONS elif ToontownGlobals.WACKY_WINTER_CAROLING in holidayIds: self.CCChatter = ToontownGlobals.WACKY_WINTER_CAROLING elif ToontownGlobals.WACKY_WINTER_DECORATIONS in holidayIds: self.CCChatter = ToontownGlobals.WACKY_WINTER_DECORATIONS elif ToontownGlobals.VALENTINES_DAY in holidayIds: self.CCChatter = ToontownGlobals.VALENTINES_DAY elif ToontownGlobals.APRIL_FOOLS_COSTUMES in holidayIds: self.CCChatter = ToontownGlobals.APRIL_FOOLS_COSTUMES elif ToontownGlobals.SILLY_CHATTER_ONE in holidayIds: self.CCChatter = ToontownGlobals.SILLY_CHATTER_ONE elif ToontownGlobals.SILLY_CHATTER_TWO in holidayIds: self.CCChatter = ToontownGlobals.SILLY_CHATTER_TWO elif ToontownGlobals.SILLY_CHATTER_THREE in holidayIds: self.CCChatter = ToontownGlobals.SILLY_CHATTER_THREE elif ToontownGlobals.SILLY_CHATTER_FOUR in holidayIds: self.CCChatter = ToontownGlobals.SILLY_CHATTER_FOUR elif ToontownGlobals.SILLY_CHATTER_FIVE in holidayIds: self.CCChatter = ToontownGlobals.SILLY_CHATTER_FOUR elif ToontownGlobals.HALLOWEEN_COSTUMES in holidayIds: self.CCChatter = ToontownGlobals.HALLOWEEN_COSTUMES elif ToontownGlobals.SPOOKY_COSTUMES in holidayIds: self.CCChatter = ToontownGlobals.SPOOKY_COSTUMES elif ToontownGlobals.SELLBOT_FIELD_OFFICE in holidayIds: self.CCChatter = ToontownGlobals.SELLBOT_FIELD_OFFICE def fadeAway(self): fadeOut = self.colorScaleInterval(0.5, Vec4(1, 1, 1, 0.5), startColorScale=Vec4(1, 1, 1, 1), blendType='easeInOut') fadeOut.start() self.loop('neutral') if self.fsm: self.fsm.addState(State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, ['Off'])) self.fsm.request('TransitionToCostume', force=1) self.ignoreAll() def enterTransitionToCostume(self): def getDustCloudIval(): dustCloud = DustCloud.DustCloud(fBillboard=0, wantSound=1) dustCloud.setBillboardAxis(2.0) dustCloud.setZ(4) dustCloud.setScale(0.6) dustCloud.createTrack() return Sequence(Func(dustCloud.reparentTo, self), dustCloud.track, Func(dustCloud.destroy), name='dustCloadIval') dust = getDustCloudIval() dust.start() def exitTransitionToCostume(self): pass
mit
hgeg/SESender
sesender.py
1
1198
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import with_statement import boto.ses,sys,time #aws configuration AWS_KEY = '' AWS_SECRET = '' AWS_REGION = '' #Put an email address verified in SES console #Set sender name to anything you want VER_EMAIL = 'SENDER_NAME <SENDER_EMAIL>' #emails per second #by default, You can send 5 emails per second max or #program fails to send emails. #if your account has different limits, feel free to #increase this value eps = 5 def send_bulk_emails(listfile,mailfile): #establish connection with SES conn = boto.ses.connect_to_region(AWS_REGION, aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET) delay = 1.0/eps #Read email data with open(mailfile) as f: subject, body = f.read().split('\n\n') with open(listfile,'r') as f: maillist = f.read().split('\n')[:-1] for e in maillist: #send to each recipient conn.send_email(VER_EMAIL, subject, body, e) time.sleep(delay) print "sent to %s"%e print "finished" if __name__ == "__main__": try: send_bulk_emails(sys.argv[1],sys.argv[2]) except Exception, e: print "Exception Ocurred:\n%s"%e
mit
timbooo/traktforalfred
requests/packages/chardet/langthaimodel.py
2930
11275
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # The following result for thai was collected from a limited sample (1M). # Character Mapping Table: TIS620CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40 188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50 253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70 209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222, 223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235, 236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57, 49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54, 45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63, 22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244, 11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247, 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253, ) # Model Table: # total sequences: 100% # first 512 sequences: 92.6386% # first 1024 sequences:7.3177% # rest sequences: 1.0230% # negative sequences: 0.0436% ThaiLangModel = ( 0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3, 0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2, 3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3, 0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2, 3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2, 3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1, 3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1, 3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1, 2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1, 3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2, 1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3, 3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0, 1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2, 0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3, 0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0, 3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1, 2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2, 0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2, 3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, 3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0, 2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2, 3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1, 2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1, 3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0, 3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1, 3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1, 3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1, 1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2, 0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3, 0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1, 3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0, 3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1, 1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0, 3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1, 3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2, 0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0, 0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0, 1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1, 1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1, 3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1, 0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0, 3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0, 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1, 0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1, 0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1, 0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0, 0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1, 0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0, 0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0, 0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0, 3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1, 2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1, 0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0, 3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0, 1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0, 1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0, 1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) TIS620ThaiModel = { 'charToOrderMap': TIS620CharToOrderMap, 'precedenceMatrix': ThaiLangModel, 'mTypicalPositiveRatio': 0.926386, 'keepEnglishLetter': False, 'charsetName': "TIS-620" } # flake8: noqa
mit
sujitfulse/ostree
tests/syslinux-entries-crosscheck.py
2
3627
#!/usr/bin/python # # Copyright (C) 2015 Red Hat # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. import os import sys if len(sys.argv) == 1: sysroot = '' else: sysroot = sys.argv[1] loaderpath = sysroot + '/boot/loader/entries' syslinuxpath = sysroot + '/boot/syslinux/syslinux.cfg' def fatal(msg): sys.stderr.write(msg) sys.stderr.write('\n') sys.exit(1) def compare_entries_descending(a, b): return int(b['version']) - int(a['version']) def get_ostree_option(optionstring): for o in optionstring.split(): if o.startswith('ostree='): return o[8:] raise ValueError('ostree= not found') entries = [] syslinux_entries = [] # Parse loader configs for fname in os.listdir(loaderpath): path = os.path.join(loaderpath, fname) with open(path) as f: entry = {} for line in f: line = line.strip() if (line == '' or line.startswith('#')): continue s = line.find(' ') assert s > 0 k = line[0:s] v = line[s+1:] entry[k] = v entries.append(entry) entries.sort(compare_entries_descending) # Parse SYSLINUX config with open(syslinuxpath) as f: in_ostree_config = False syslinux_entry = None syslinux_default = None for line in f: line = line.strip() if line.startswith('DEFAULT '): if syslinux_entry is not None: syslinux_default = line.split(' ', 1)[1] elif line.startswith('LABEL '): if syslinux_entry is not None: syslinux_entries.append(syslinux_entry) syslinux_entry = {} syslinux_entry['title'] = line.split(' ', 1)[1] elif line.startswith('KERNEL '): syslinux_entry['linux'] = line.split(' ', 1)[1] elif line.startswith('INITRD '): syslinux_entry['initrd'] = line.split(' ', 1)[1] elif line.startswith('APPEND '): syslinux_entry['options'] = line.split(' ', 1)[1] if syslinux_entry is not None: syslinux_entries.append(syslinux_entry) if len(entries) != len(syslinux_entries): fatal("Found {0} loader entries, but {1} SYSLINUX entries\n".format(len(entries), len(syslinux_entries))) def assert_matches_key(a, b, key): aval = a[key] bval = b[key] if aval != bval: fatal("Mismatch on {0}: {1} != {2}".format(key, aval, bval)) for i,(entry,syslinuxentry) in enumerate(zip(entries, syslinux_entries)): assert_matches_key(entry, syslinuxentry, 'linux') assert_matches_key(entry, syslinuxentry, 'initrd') entry_ostree = get_ostree_option(entry['options']) syslinux_ostree = get_ostree_option(syslinuxentry['options']) if entry_ostree != syslinux_ostree: fatal("Mismatch on ostree option: {0} != {1}".format(entry_ostree, syslinux_ostree)) sys.stdout.write('SYSLINUX configuration validated\n') sys.exit(0)
lgpl-2.1
luisgg/iteexe
exe/engine/externalurlidevice.py
14
4476
# =========================================================================== # eXe # Copyright 2004-2006, University of Auckland # Copyright 2004-2008 eXe Project, http://eXeLearning.org/ # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # =========================================================================== """ ExternalUrlIdevice: just has a block of text """ import logging from exe.engine.idevice import Idevice from exe.engine.translate import lateTranslate log = logging.getLogger(__name__) # =========================================================================== class ExternalUrlIdevice(Idevice): """ ExternalUrlIdevice: just has a field for the url """ persistenceVersion = 3 def __init__(self, content=""): Idevice.__init__(self, x_(u"External Web Site"), x_(u"University of Auckland"), x_(u"""The external website iDevice loads an external website into an inline frame in your eXe content rather then opening it in a popup box. This means learners are not having to juggle windows. This iDevice should only be used if your content will be viewed by learners online."""), "", "") self.emphasis = Idevice.NoEmphasis self.url = "" self.height = "300" self._urlInstruc = x_(u"""Enter the URL you wish to display and select the size of the area to display it in.""") #Properties urlInstruc = lateTranslate('urlInstruc') def getResourcesField(self, this_resource): """ implement the specific resource finding mechanism for this iDevice: """ # NOTE that the ExternalURL iDevice has NO additional resources: return None def getRichTextFields(self): """ Like getResourcesField(), a general helper to allow nodes to search through all of its fields without having to know the specifics of each iDevice type. """ # ExternalURL has no rich-text fields: return [] def burstHTML(self, i): """ takes a BeautifulSoup fragment (i) and bursts its contents to import this idevice from a CommonCartridge export """ # External Web Site Idevice: #title = i.find(name='span', attrs={'class' : 'iDeviceTitle' }) #idevice.title = title.renderContents().decode('utf-8') # no title for this iDevice. inner = i.find(name='iframe').__str__() # 1. the url: <iframe src="HERE" ... ></iframe> url_start_pos = inner.find('src=\"') if url_start_pos >= 0: url_start_pos += len('src=\"') url_end_pos = inner.find('\"', url_start_pos) if url_end_pos >= url_start_pos: self.url = inner[url_start_pos : url_end_pos].decode('utf-8') # 2. the height: <iframe height="###px" ... ></iframe> height_start_pos = inner.find('height=\"') if height_start_pos >= 0: height_start_pos += len('height=\"') height_end_pos = inner.find('px\"', height_start_pos) if height_end_pos >= height_start_pos: self.height = \ inner[height_start_pos : height_end_pos].decode('utf-8') def upgradeToVersion1(self): """ Upgrades exe to v0.10 """ self._upgradeIdeviceToVersion1() def upgradeToVersion2(self): """ Upgrades to v0.12 """ self._upgradeIdeviceToVersion2() def upgradeToVersion3(self): """ add _urlInstruc """ self._urlInstruc = x_(u"""Enter the URL you wish to display and select the size of the area to display it in.""") # ===========================================================================
gpl-2.0
CLVsol/odoo_api
clv_insured.py
1
15740
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### from __future__ import print_function from erppeek import * import csv from base import * import argparse import getpass from clv_insurance_client import * def get_insured_category_id(client, category_name): clv_insured_category = client.model('clv_insured.category') insured_category_browse = clv_insured_category.browse([('name', '=', category_name),]) insured_category_id = insured_category_browse.id if insured_category_id == []: values = { 'name': category_name, } insured_category_id = clv_insured_category.create(values).id else: insured_category_id = insured_category_id[0] return insured_category_id def clv_insured_export_VCAS(client, file_path, date_inclusion): clv_insurance_client = client.model('clv_insurance_client') insurance_client_browse = clv_insurance_client.browse(\ [('name', '=', 'VCAS - Vera Cruz Associação de Saúde'),]) client_id_VCAS = insurance_client_browse[0].id headings_insured = ['no', 'name', 'code', 'birthday', 'gender', 'insured_category', 'insurance_client', 'reg_number', 'insurance', 'state', 'date_activation', ] file_insured = open(file_path, 'wb') writer_insured = csv.writer(file_insured, delimiter = ';', quotechar = '"', quoting=csv.QUOTE_ALL) writer_insured.writerow(headings_insured) clv_insured = client.model('clv_insured') insured_browse = clv_insured.browse([('state', '!=', 'canceled'), ('date_inclusion', '<=', date_inclusion), ('insurance_client_id', '=', client_id_VCAS),]) i = 0 for insured in insured_browse: i += 1 name = insured.name.encode("utf-8") code = insured.code birthday = insured.birthday gender = insured.gender insured_category = insured.category_ids[0].name.encode("utf-8") insurance_client = insured.insurance_client_id.name.encode("utf-8") reg_number = insured.reg_number insurance = insured.insurance_id.name.encode("utf-8") state = insured.state date_inclusion = insured.date_inclusion print(i, insured.name.encode("utf-8")) row_insured = [i, name, code, birthday, gender, insured_category, insurance_client, reg_number, insurance, state, date_inclusion, ] writer_insured.writerow(row_insured) file_insured.close() print('i: ', i) def clv_insured_export_HVC(client, file_path, date_inclusion): clv_insurance_client = client.model('clv_insurance_client') insurance_client_browse = clv_insurance_client.browse(\ [('name', '=', 'HVC - Hospital Vera Cruz'),]) client_id_HVC = insurance_client_browse[0].id headings_insured = ['no', 'name', 'code', 'birthday', 'gender', 'insured_category', 'insurance_client', 'reg_number', 'insurance', 'state', 'date_activation', ] file_insured = open(file_path, 'wb') writer_insured = csv.writer(file_insured, delimiter = ';', quotechar = '"', quoting=csv.QUOTE_ALL) writer_insured.writerow(headings_insured) clv_insured = client.model('clv_insured') insured_browse = clv_insured.browse([('state', '!=', 'canceled'), ('date_inclusion', '<=', date_inclusion), ('insurance_client_id', '=', client_id_HVC),]) i = 0 for insured in insured_browse: i += 1 name = insured.name.encode("utf-8") code = insured.code birthday = insured.birthday gender = insured.gender insured_category = insured.category_ids[0].name.encode("utf-8") insurance_client = insured.insurance_client_id.name.encode("utf-8") reg_number = insured.reg_number insurance = insured.insurance_id.name.encode("utf-8") state = insured.state date_inclusion = insured.date_inclusion print(i, insured.name.encode("utf-8")) row_insured = [i, name, code, birthday, gender, insured_category, insurance_client, reg_number, insurance, state, date_inclusion, ] writer_insured.writerow(row_insured) file_insured.close() print('i: ', i) def clv_insured_export_RMC(client, file_path, date_inclusion): clv_insurance_client = client.model('clv_insurance_client') insurance_client_browse = clv_insurance_client.browse( [('name', '=', 'RMC - Ressonância Magnética Campinas'), ]) client_id_RMC = insurance_client_browse[0].id headings_insured = ['no', 'name', 'code', 'birthday', 'gender', 'insured_category', 'insurance_client', 'reg_number', 'insurance', 'state', 'date_activation', ] file_insured = open(file_path, 'wb') writer_insured = csv.writer(file_insured, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL) writer_insured.writerow(headings_insured) clv_insured = client.model('clv_insured') insured_browse = clv_insured.browse([('state', '!=', 'canceled'), ('date_inclusion', '<=', date_inclusion), ('insurance_client_id', '=', client_id_RMC), ]) i = 0 for insured in insured_browse: i += 1 name = insured.name.encode("utf-8") code = insured.code birthday = insured.birthday gender = insured.gender insured_category = insured.category_ids[0].name.encode("utf-8") insurance_client = insured.insurance_client_id.name.encode("utf-8") reg_number = insured.reg_number insurance = insured.insurance_id.name.encode("utf-8") state = insured.state date_inclusion = insured.date_inclusion print(i, insured.name.encode("utf-8")) row_insured = [i, name, code, birthday, gender, insured_category, insurance_client, reg_number, insurance, state, date_inclusion, ] writer_insured.writerow(row_insured) file_insured.close() print('i: ', i) def clv_insured_updt_reg_number(client, client_name): insurance_client_id = get_insurance_client_id(client, client_name) clv_insured = client.model('clv_insured') insured_browse = clv_insured.browse([('insurance_client_id', '=', insurance_client_id), ]) insured_count = 0 for insured in insured_browse: insured_count += 1 reg_number = insured.reg_number print(insured_count, reg_number, insured.name.encode("utf-8")) if reg_number[0] == '0': while reg_number[0] == '0': reg_number = reg_number[1:] print('>>>>>', reg_number) values = { "reg_number": reg_number, } clv_insured.write(insured.id, values) print('insured_count: ', insured_count) def get_arguments(): global username global password global dbname parser = argparse.ArgumentParser() parser.add_argument('--user', action="store", dest="username") parser.add_argument('--pw', action="store", dest="password") parser.add_argument('--db', action="store", dest="dbname") args = parser.parse_args() print('%s%s' % ('--> ', args)) if args.dbname != None: dbname = args.dbname elif dbname == '*': dbname = raw_input('dbname: ') if args.username != None: username = args.username elif username == '*': username = raw_input('username: ') if args.password != None: password = args.password elif password == '*': password = getpass.getpass('password: ') if __name__ == '__main__': server = 'http://localhost:8069' # username = 'username' username = '*' # paswword = 'paswword' paswword = '*' dbname = 'odoo' # dbname = '*' get_arguments() from time import time start = time() print('--> clv_insured.py...') client = erppeek.Client(server, dbname, username, password) # file_path = '/opt/openerp/biobox/data/insured_2015_09_30.csv' # print('-->', client, file_path) # print('--> Executing clv_insured_export()...') # clv_insured_export(client, file_path) # file_path = '/opt/openerp/biobox/data/insured_VCAS_2015_10_31.csv' # date_inclusion = '2015-10-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2015_10_31.csv' # date_inclusion = '2015-10-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_VCAS_2015_11_30.csv' # date_inclusion = '2015-11-30' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2015_11_30.csv' # date_inclusion = '2015-11-30' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) ########################################## # file_path = '/opt/openerp/biobox/data/insured_VCAS_2015_12_31.csv' # date_inclusion = '2015-12-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2015_12_31.csv' # date_inclusion = '2015-12-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) ########################################## # file_path = '/opt/openerp/biobox/data/insured_VCAS_2016_01_31.csv' # date_inclusion = '2016-01-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2016_01_31.csv' # date_inclusion = '2016-01-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) ########################################## # file_path = '/opt/openerp/biobox/data/insured_VCAS_2016_02_29.csv' # date_inclusion = '2016-02-29' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2016-02-29.csv' # date_inclusion = '2016-02-29' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) # 2016-04-02 ######################################### # file_path = '/opt/openerp/biobox/data/insured_VCAS_2016_03_31.csv' # date_inclusion = '2016-03-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2016-03-31.csv' # date_inclusion = '2016-03-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) # 2016-05-03 ######################################### # file_path = '/opt/openerp/biobox/data/insured_VCAS_2016_04_30.csv' # date_inclusion = '2016-04-30' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2016_04_30.csv' # date_inclusion = '2016-04-30' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_RMC_2016_04_30.csv' # date_inclusion = '2016-04-30' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_RMC()...') # clv_insured_export_RMC(client, file_path, date_inclusion) # 2016-06-02 ######################################### # file_path = '/opt/openerp/biobox/data/insured_VCAS_2016_05_31.csv' # date_inclusion = '2016-05-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_VCAS()...') # clv_insured_export_VCAS(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_HVC_2016_05_31.csv' # date_inclusion = '2016-05-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_HVC()...') # clv_insured_export_HVC(client, file_path, date_inclusion) # file_path = '/opt/openerp/biobox/data/insured_RMC_2016_05_31.csv' # date_inclusion = '2016-05-31' # print('-->', client, file_path, date_inclusion) # print('--> Executing clv_insured_export_RMC()...') # clv_insured_export_RMC(client, file_path, date_inclusion) print('--> clv_insured.py') print('--> Execution time:', secondsToStr(time() - start))
agpl-3.0
goyalankit/po-compiler
object_files/networkx-1.8.1/networkx/generators/small.py
48
12862
# -*- coding: utf-8 -*- """ Various small and named graphs, together with some compact generators. """ __author__ ="""Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)""" # Copyright (C) 2004-2008 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. __all__ = ['make_small_graph', 'LCF_graph', 'bull_graph', 'chvatal_graph', 'cubical_graph', 'desargues_graph', 'diamond_graph', 'dodecahedral_graph', 'frucht_graph', 'heawood_graph', 'house_graph', 'house_x_graph', 'icosahedral_graph', 'krackhardt_kite_graph', 'moebius_kantor_graph', 'octahedral_graph', 'pappus_graph', 'petersen_graph', 'sedgewick_maze_graph', 'tetrahedral_graph', 'truncated_cube_graph', 'truncated_tetrahedron_graph', 'tutte_graph'] import networkx as nx from networkx.generators.classic import empty_graph, cycle_graph, path_graph, complete_graph from networkx.exception import NetworkXError #------------------------------------------------------------------------------ # Tools for creating small graphs #------------------------------------------------------------------------------ def make_small_undirected_graph(graph_description, create_using=None): """ Return a small undirected graph described by graph_description. See make_small_graph. """ if create_using is not None and create_using.is_directed(): raise NetworkXError("Directed Graph not supported") return make_small_graph(graph_description, create_using) def make_small_graph(graph_description, create_using=None): """ Return the small graph described by graph_description. graph_description is a list of the form [ltype,name,n,xlist] Here ltype is one of "adjacencylist" or "edgelist", name is the name of the graph and n the number of nodes. This constructs a graph of n nodes with integer labels 0,..,n-1. If ltype="adjacencylist" then xlist is an adjacency list with exactly n entries, in with the j'th entry (which can be empty) specifies the nodes connected to vertex j. e.g. the "square" graph C_4 can be obtained by >>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[1,3],[2,4],[1,3]]]) or, since we do not need to add edges twice, >>> G=nx.make_small_graph(["adjacencylist","C_4",4,[[2,4],[3],[4],[]]]) If ltype="edgelist" then xlist is an edge list written as [[v1,w2],[v2,w2],...,[vk,wk]], where vj and wj integers in the range 1,..,n e.g. the "square" graph C_4 can be obtained by >>> G=nx.make_small_graph(["edgelist","C_4",4,[[1,2],[3,4],[2,3],[4,1]]]) Use the create_using argument to choose the graph class/type. """ ltype=graph_description[0] name=graph_description[1] n=graph_description[2] G=empty_graph(n, create_using) nodes=G.nodes() if ltype=="adjacencylist": adjlist=graph_description[3] if len(adjlist) != n: raise NetworkXError("invalid graph_description") G.add_edges_from([(u-1,v) for v in nodes for u in adjlist[v]]) elif ltype=="edgelist": edgelist=graph_description[3] for e in edgelist: v1=e[0]-1 v2=e[1]-1 if v1<0 or v1>n-1 or v2<0 or v2>n-1: raise NetworkXError("invalid graph_description") else: G.add_edge(v1,v2) G.name=name return G def LCF_graph(n,shift_list,repeats,create_using=None): """ Return the cubic graph specified in LCF notation. LCF notation (LCF=Lederberg-Coxeter-Fruchte) is a compressed notation used in the generation of various cubic Hamiltonian graphs of high symmetry. See, for example, dodecahedral_graph, desargues_graph, heawood_graph and pappus_graph below. n (number of nodes) The starting graph is the n-cycle with nodes 0,...,n-1. (The null graph is returned if n < 0.) shift_list = [s1,s2,..,sk], a list of integer shifts mod n, repeats integer specifying the number of times that shifts in shift_list are successively applied to each v_current in the n-cycle to generate an edge between v_current and v_current+shift mod n. For v1 cycling through the n-cycle a total of k*repeats with shift cycling through shiftlist repeats times connect v1 with v1+shift mod n The utility graph K_{3,3} >>> G=nx.LCF_graph(6,[3,-3],3) The Heawood graph >>> G=nx.LCF_graph(14,[5,-5],7) See http://mathworld.wolfram.com/LCFNotation.html for a description and references. """ if create_using is not None and create_using.is_directed(): raise NetworkXError("Directed Graph not supported") if n <= 0: return empty_graph(0, create_using) # start with the n-cycle G=cycle_graph(n, create_using) G.name="LCF_graph" nodes=G.nodes() n_extra_edges=repeats*len(shift_list) # edges are added n_extra_edges times # (not all of these need be new) if n_extra_edges < 1: return G for i in range(n_extra_edges): shift=shift_list[i%len(shift_list)] #cycle through shift_list v1=nodes[i%n] # cycle repeatedly through nodes v2=nodes[(i + shift)%n] G.add_edge(v1, v2) return G #------------------------------------------------------------------------------- # Various small and named graphs #------------------------------------------------------------------------------- def bull_graph(create_using=None): """Return the Bull graph. """ description=[ "adjacencylist", "Bull Graph", 5, [[2,3],[1,3,4],[1,2,5],[2],[3]] ] G=make_small_undirected_graph(description, create_using) return G def chvatal_graph(create_using=None): """Return the Chvátal graph.""" description=[ "adjacencylist", "Chvatal Graph", 12, [[2,5,7,10],[3,6,8],[4,7,9],[5,8,10], [6,9],[11,12],[11,12],[9,12], [11],[11,12],[],[]] ] G=make_small_undirected_graph(description, create_using) return G def cubical_graph(create_using=None): """Return the 3-regular Platonic Cubical graph.""" description=[ "adjacencylist", "Platonic Cubical Graph", 8, [[2,4,5],[1,3,8],[2,4,7],[1,3,6], [1,6,8],[4,5,7],[3,6,8],[2,5,7]] ] G=make_small_undirected_graph(description, create_using) return G def desargues_graph(create_using=None): """ Return the Desargues graph.""" G=LCF_graph(20, [5,-5,9,-9], 5, create_using) G.name="Desargues Graph" return G def diamond_graph(create_using=None): """Return the Diamond graph. """ description=[ "adjacencylist", "Diamond Graph", 4, [[2,3],[1,3,4],[1,2,4],[2,3]] ] G=make_small_undirected_graph(description, create_using) return G def dodecahedral_graph(create_using=None): """ Return the Platonic Dodecahedral graph. """ G=LCF_graph(20, [10,7,4,-4,-7,10,-4,7,-7,4], 2, create_using) G.name="Dodecahedral Graph" return G def frucht_graph(create_using=None): """Return the Frucht Graph. The Frucht Graph is the smallest cubical graph whose automorphism group consists only of the identity element. """ G=cycle_graph(7, create_using) G.add_edges_from([[0,7],[1,7],[2,8],[3,9],[4,9],[5,10],[6,10], [7,11],[8,11],[8,9],[10,11]]) G.name="Frucht Graph" return G def heawood_graph(create_using=None): """ Return the Heawood graph, a (3,6) cage. """ G=LCF_graph(14, [5,-5], 7, create_using) G.name="Heawood Graph" return G def house_graph(create_using=None): """Return the House graph (square with triangle on top).""" description=[ "adjacencylist", "House Graph", 5, [[2,3],[1,4],[1,4,5],[2,3,5],[3,4]] ] G=make_small_undirected_graph(description, create_using) return G def house_x_graph(create_using=None): """Return the House graph with a cross inside the house square.""" description=[ "adjacencylist", "House-with-X-inside Graph", 5, [[2,3,4],[1,3,4],[1,2,4,5],[1,2,3,5],[3,4]] ] G=make_small_undirected_graph(description, create_using) return G def icosahedral_graph(create_using=None): """Return the Platonic Icosahedral graph.""" description=[ "adjacencylist", "Platonic Icosahedral Graph", 12, [[2,6,8,9,12],[3,6,7,9],[4,7,9,10],[5,7,10,11], [6,7,11,12],[7,12],[],[9,10,11,12], [10],[11],[12],[]] ] G=make_small_undirected_graph(description, create_using) return G def krackhardt_kite_graph(create_using=None): """ Return the Krackhardt Kite Social Network. A 10 actor social network introduced by David Krackhardt to illustrate: degree, betweenness, centrality, closeness, etc. The traditional labeling is: Andre=1, Beverley=2, Carol=3, Diane=4, Ed=5, Fernando=6, Garth=7, Heather=8, Ike=9, Jane=10. """ description=[ "adjacencylist", "Krackhardt Kite Social Network", 10, [[2,3,4,6],[1,4,5,7],[1,4,6],[1,2,3,5,6,7],[2,4,7], [1,3,4,7,8],[2,4,5,6,8],[6,7,9],[8,10],[9]] ] G=make_small_undirected_graph(description, create_using) return G def moebius_kantor_graph(create_using=None): """Return the Moebius-Kantor graph.""" G=LCF_graph(16, [5,-5], 8, create_using) G.name="Moebius-Kantor Graph" return G def octahedral_graph(create_using=None): """Return the Platonic Octahedral graph.""" description=[ "adjacencylist", "Platonic Octahedral Graph", 6, [[2,3,4,5],[3,4,6],[5,6],[5,6],[6],[]] ] G=make_small_undirected_graph(description, create_using) return G def pappus_graph(): """ Return the Pappus graph.""" G=LCF_graph(18,[5,7,-7,7,-7,-5],3) G.name="Pappus Graph" return G def petersen_graph(create_using=None): """Return the Petersen graph.""" description=[ "adjacencylist", "Petersen Graph", 10, [[2,5,6],[1,3,7],[2,4,8],[3,5,9],[4,1,10],[1,8,9],[2,9,10], [3,6,10],[4,6,7],[5,7,8]] ] G=make_small_undirected_graph(description, create_using) return G def sedgewick_maze_graph(create_using=None): """ Return a small maze with a cycle. This is the maze used in Sedgewick,3rd Edition, Part 5, Graph Algorithms, Chapter 18, e.g. Figure 18.2 and following. Nodes are numbered 0,..,7 """ G=empty_graph(0, create_using) G.add_nodes_from(range(8)) G.add_edges_from([[0,2],[0,7],[0,5]]) G.add_edges_from([[1,7],[2,6]]) G.add_edges_from([[3,4],[3,5]]) G.add_edges_from([[4,5],[4,7],[4,6]]) G.name="Sedgewick Maze" return G def tetrahedral_graph(create_using=None): """ Return the 3-regular Platonic Tetrahedral graph.""" G=complete_graph(4, create_using) G.name="Platonic Tetrahedral graph" return G def truncated_cube_graph(create_using=None): """Return the skeleton of the truncated cube.""" description=[ "adjacencylist", "Truncated Cube Graph", 24, [[2,3,5],[12,15],[4,5],[7,9], [6],[17,19],[8,9],[11,13], [10],[18,21],[12,13],[15], [14],[22,23],[16],[20,24], [18,19],[21],[20],[24], [22],[23],[24],[]] ] G=make_small_undirected_graph(description, create_using) return G def truncated_tetrahedron_graph(create_using=None): """Return the skeleton of the truncated Platonic tetrahedron.""" G=path_graph(12, create_using) # G.add_edges_from([(1,3),(1,10),(2,7),(4,12),(5,12),(6,8),(9,11)]) G.add_edges_from([(0,2),(0,9),(1,6),(3,11),(4,11),(5,7),(8,10)]) G.name="Truncated Tetrahedron Graph" return G def tutte_graph(create_using=None): """Return the Tutte graph.""" description=[ "adjacencylist", "Tutte's Graph", 46, [[2,3,4],[5,27],[11,12],[19,20],[6,34], [7,30],[8,28],[9,15],[10,39],[11,38], [40],[13,40],[14,36],[15,16],[35], [17,23],[18,45],[19,44],[46],[21,46], [22,42],[23,24],[41],[25,28],[26,33], [27,32],[34],[29],[30,33],[31], [32,34],[33],[],[],[36,39], [37],[38,40],[39],[],[], [42,45],[43],[44,46],[45],[],[]] ] G=make_small_undirected_graph(description, create_using) return G
apache-2.0
adityadharne/TestObento
obi/ui/management/commands/index_all.py
1
2373
from optparse import make_option import requests import solr from django.conf import settings from django.core.management.base import BaseCommand from ui.models import Database, Journal COMMIT_BLOCK_SIZE = 100 class Command(BaseCommand): help = 'index the database list and journal titles list' option_list = BaseCommand.option_list + ( make_option('--no-clear', action="store_true", default=False, help='Do not clear out the solr index.'), ) def handle(self, *args, **options): s = solr.SolrConnection(settings.SOLR_URL) # unless the user gives a --no-clear option, clear the solr index. if not options.get('no-clear', None): requests.get(settings.SOLR_URL + '/update' + '?stream.body=<delete><query>*:*</query></delete>') requests.get(settings.SOLR_URL + '/update' + '?stream.body=<commit/>') qs_databases = Database.objects.all() total_indexed = 0 block = [] for db in qs_databases: block.append({'id': 'db-%s' % db.id, 'name': db.name, 'url': db.url, 'description': db.description}) if len(block) == COMMIT_BLOCK_SIZE: s.add_many(block, _commit=True) total_indexed += len(block) print 'indexed:', total_indexed, 'databases' block = [] if block: s.add_many(block, _commit=True) total_indexed += len(block) print 'indexed:', total_indexed, '. done.' block = [] total_indexed = 0 qs_journals = Journal.objects.distinct('ssid') for journal in qs_journals: block.append({'id': 'j-%s' % journal.id, 'name': journal.title, 'issn': journal.issn, 'eissn': journal.eissn}) if len(block) == COMMIT_BLOCK_SIZE: s.add_many(block, _commit=True) total_indexed += len(block) print 'indexed:', total_indexed, 'journals' block = [] if block: s.add_many(block, _commit=True) total_indexed += len(block) print 'indexed:', total_indexed, 'journals'
mit
risicle/django
django/core/serializers/__init__.py
347
8194
""" Interfaces for serializing Django objects. Usage:: from django.core import serializers json = serializers.serialize("json", some_queryset) objects = list(serializers.deserialize("json", json)) To add your own serializers, use the SERIALIZATION_MODULES setting:: SERIALIZATION_MODULES = { "csv": "path.to.csv.serializer", "txt": "path.to.txt.serializer", } """ import importlib from django.apps import apps from django.conf import settings from django.core.serializers.base import SerializerDoesNotExist from django.utils import six # Built-in serializers BUILTIN_SERIALIZERS = { "xml": "django.core.serializers.xml_serializer", "python": "django.core.serializers.python", "json": "django.core.serializers.json", "yaml": "django.core.serializers.pyyaml", } _serializers = {} class BadSerializer(object): """ Stub serializer to hold exception raised during registration This allows the serializer registration to cache serializers and if there is an error raised in the process of creating a serializer it will be raised and passed along to the caller when the serializer is used. """ internal_use_only = False def __init__(self, exception): self.exception = exception def __call__(self, *args, **kwargs): raise self.exception def register_serializer(format, serializer_module, serializers=None): """Register a new serializer. ``serializer_module`` should be the fully qualified module name for the serializer. If ``serializers`` is provided, the registration will be added to the provided dictionary. If ``serializers`` is not provided, the registration will be made directly into the global register of serializers. Adding serializers directly is not a thread-safe operation. """ if serializers is None and not _serializers: _load_serializers() try: module = importlib.import_module(serializer_module) except ImportError as exc: bad_serializer = BadSerializer(exc) module = type('BadSerializerModule', (object,), { 'Deserializer': bad_serializer, 'Serializer': bad_serializer, }) if serializers is None: _serializers[format] = module else: serializers[format] = module def unregister_serializer(format): "Unregister a given serializer. This is not a thread-safe operation." if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) del _serializers[format] def get_serializer(format): if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) return _serializers[format].Serializer def get_serializer_formats(): if not _serializers: _load_serializers() return list(_serializers) def get_public_serializer_formats(): if not _serializers: _load_serializers() return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only] def get_deserializer(format): if not _serializers: _load_serializers() if format not in _serializers: raise SerializerDoesNotExist(format) return _serializers[format].Deserializer def serialize(format, queryset, **options): """ Serialize a queryset (or any iterator that returns database objects) using a certain serializer. """ s = get_serializer(format)() s.serialize(queryset, **options) return s.getvalue() def deserialize(format, stream_or_string, **options): """ Deserialize a stream or a string. Returns an iterator that yields ``(obj, m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* -- object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name : list_of_related_objects}``. """ d = get_deserializer(format) return d(stream_or_string, **options) def _load_serializers(): """ Register built-in and settings-defined serializers. This is done lazily so that user code has a chance to (e.g.) set up custom settings without needing to be careful of import order. """ global _serializers serializers = {} for format in BUILTIN_SERIALIZERS: register_serializer(format, BUILTIN_SERIALIZERS[format], serializers) if hasattr(settings, "SERIALIZATION_MODULES"): for format in settings.SERIALIZATION_MODULES: register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers) _serializers = serializers def sort_dependencies(app_list): """Sort a list of (app_config, models) pairs into a single list of models. The single list of models is sorted so that any model with a natural key is serialized before a normal model, and any model with a natural key dependency has it's dependencies serialized first. """ # Process the list of models, and get the list of dependencies model_dependencies = [] models = set() for app_config, model_list in app_list: if model_list is None: model_list = app_config.get_models() for model in model_list: models.add(model) # Add any explicitly defined dependencies if hasattr(model, 'natural_key'): deps = getattr(model.natural_key, 'dependencies', []) if deps: deps = [apps.get_model(dep) for dep in deps] else: deps = [] # Now add a dependency for any FK relation with a model that # defines a natural key for field in model._meta.fields: if field.remote_field: rel_model = field.remote_field.model if hasattr(rel_model, 'natural_key') and rel_model != model: deps.append(rel_model) # Also add a dependency for any simple M2M relation with a model # that defines a natural key. M2M relations with explicit through # models don't count as dependencies. for field in model._meta.many_to_many: if field.remote_field.through._meta.auto_created: rel_model = field.remote_field.model if hasattr(rel_model, 'natural_key') and rel_model != model: deps.append(rel_model) model_dependencies.append((model, deps)) model_dependencies.reverse() # Now sort the models to ensure that dependencies are met. This # is done by repeatedly iterating over the input list of models. # If all the dependencies of a given model are in the final list, # that model is promoted to the end of the final list. This process # continues until the input list is empty, or we do a full iteration # over the input models without promoting a model to the final list. # If we do a full iteration without a promotion, that means there are # circular dependencies in the list. model_list = [] while model_dependencies: skipped = [] changed = False while model_dependencies: model, deps = model_dependencies.pop() # If all of the models in the dependency list are either already # on the final model list, or not on the original serialization list, # then we've found another model with all it's dependencies satisfied. found = True for candidate in ((d not in models or d in model_list) for d in deps): if not candidate: found = False if found: model_list.append(model) changed = True else: skipped.append((model, deps)) if not changed: raise RuntimeError("Can't resolve dependencies for %s in serialized app list." % ', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name) for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__)) ) model_dependencies = skipped return model_list
bsd-3-clause
smartforceplus/SmartForceplus
openerp/tools/test_config.py
456
1418
# -*- coding: utf-8 -*- """ Tests for the configuration file/command-line arguments. """ # This test should be run from its directory. # TODO A configmanager object cannot parse multiple times a config file # and/or the command line, preventing to 'reload' a configuration. import os import config config_file_00 = os.path.join(os.path.dirname(__file__),'test-config-values-00.conf') # 1. No config file, no command-line arguments (a.k.a. default values) conf = config.configmanager() conf.parse_config() assert conf['osv_memory_age_limit'] == 1.0 assert os.path.join(conf['root_path'], 'addons') == conf['addons_path'] # 2. No config file, some command-line arguments conf = config.configmanager() # mess with the optparse.Option definition to allow an invalid path conf.casts['addons_path'].action = 'store' conf.parse_config(['--addons-path=/xyz/dont-exist', '--osv-memory-age-limit=2.3']) assert conf['osv_memory_age_limit'] == 2.3 assert conf['addons_path'] == '/xyz/dont-exist' # 3. Config file, no command-line arguments conf = config.configmanager() conf.parse_config(['-c', config_file_00]) assert conf['osv_memory_age_limit'] == 3.4 # 4. Config file, and command-line arguments conf = config.configmanager() conf.parse_config(['-c', config_file_00, '--osv-memory-age-limit=2.3']) assert conf['osv_memory_age_limit'] == 2.3 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
danieljaouen/ansible
lib/ansible/module_utils/facts/virtual/openbsd.py
199
2319
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin from ansible.module_utils.facts.utils import get_file_content class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin): """ This is a OpenBSD-specific subclass of Virtual. It defines - virtualization_type - virtualization_role """ platform = 'OpenBSD' DMESG_BOOT = '/var/run/dmesg.boot' def get_virtual_facts(self): virtual_facts = {} # Set empty values as default virtual_facts['virtualization_type'] = '' virtual_facts['virtualization_role'] = '' virtual_product_facts = self.detect_virt_product('hw.product') virtual_facts.update(virtual_product_facts) if virtual_facts['virtualization_type'] == '': virtual_vendor_facts = self.detect_virt_vendor('hw.vendor') virtual_facts.update(virtual_vendor_facts) # Check the dmesg if vmm(4) attached, indicating the host is # capable of virtualization. dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT) for line in dmesg_boot.splitlines(): match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line) if match: virtual_facts['virtualization_type'] = 'vmm' virtual_facts['virtualization_role'] = 'host' return virtual_facts class OpenBSDVirtualCollector(VirtualCollector): _fact_class = OpenBSDVirtual _platform = 'OpenBSD'
gpl-3.0
EDUlib/edx-platform
scripts/xsslint/xsslint/linters.py
1
66925
""" Linter classes containing logic for checking various filetypes. """ import ast import io import os import re import textwrap from xsslint import visitors from xsslint.reporting import ExpressionRuleViolation, FileResults, RuleViolation from xsslint.rules import RuleSet from xsslint.utils import Expression, ParseString, StringLines, is_skip_dir from xsslint.django_linter import TransExpression, BlockTransExpression, HtmlInterpolateExpression class BaseLinter: """ BaseLinter provides some helper functions that are used by multiple linters. """ LINE_COMMENT_DELIM = None def _is_valid_directory(self, skip_dirs, directory): """ Determines if the provided directory is a directory that could contain a file that needs to be linted. Arguments: skip_dirs: The directories to be skipped. directory: The directory to be linted. Returns: True if this directory should be linted for violations and False otherwise. """ if is_skip_dir(skip_dirs, directory): return False return True def _load_file(self, file_full_path): """ Loads a file into a string. Arguments: file_full_path: The full path of the file to be loaded. Returns: A string containing the files contents. """ with open(file_full_path, 'r') as input_file: file_contents = input_file.read() return file_contents def _load_and_check_file_is_safe(self, file_full_path, lint_function, results): """ Loads the Python file and checks if it is in violation. Arguments: file_full_path: The file to be loaded and linted. lint_function: A function that will lint for violations. It must take two arguments: 1) string contents of the file 2) results object results: A FileResults to be used for this file Returns: The file results containing any violations. """ file_contents = self._load_file(file_full_path) lint_function(file_contents, results) return results def _find_closing_char_index( self, start_delim, open_char, close_char, template, start_index, num_open_chars=0, strings=None ): """ Finds the index of the closing char that matches the opening char. For example, this could be used to find the end of a Mako expression, where the open and close characters would be '{' and '}'. Arguments: start_delim: If provided (e.g. '${' for Mako expressions), the closing character must be found before the next start_delim. open_char: The opening character to be matched (e.g '{') close_char: The closing character to be matched (e.g '}') template: The template to be searched. start_index: The start index of the last open char. num_open_chars: The current number of open chars. strings: A list of ParseStrings already parsed Returns: A dict containing the following, or None if unparseable: close_char_index: The index of the closing character strings: a list of ParseStrings """ strings = [] if strings is None else strings # Find start index of an uncommented line. start_index = self._uncommented_start_index(template, start_index) # loop until we found something useful on an uncommented out line while start_index is not None: close_char_index = template.find(close_char, start_index) if close_char_index < 0: # If we can't find a close char, let's just quit. return None open_char_index = template.find(open_char, start_index, close_char_index) parse_string = ParseString(template, start_index, close_char_index) valid_index_list = [close_char_index] if 0 <= open_char_index: valid_index_list.append(open_char_index) if parse_string.start_index is not None: valid_index_list.append(parse_string.start_index) min_valid_index = min(valid_index_list) start_index = self._uncommented_start_index(template, min_valid_index) if start_index == min_valid_index: break if start_index is None: # No uncommented code to search. return None if parse_string.start_index == min_valid_index: strings.append(parse_string) if parse_string.end_index is None: return None else: return self._find_closing_char_index( start_delim, open_char, close_char, template, start_index=parse_string.end_index, num_open_chars=num_open_chars, strings=strings ) if open_char_index == min_valid_index: if start_delim is not None: # if we find another starting delim, consider this unparseable start_delim_index = template.find(start_delim, start_index, close_char_index) if 0 <= start_delim_index < open_char_index: return None return self._find_closing_char_index( start_delim, open_char, close_char, template, start_index=open_char_index + 1, num_open_chars=num_open_chars + 1, strings=strings ) if num_open_chars == 0: return { 'close_char_index': close_char_index, 'strings': strings, } else: return self._find_closing_char_index( start_delim, open_char, close_char, template, start_index=close_char_index + 1, num_open_chars=num_open_chars - 1, strings=strings ) def _uncommented_start_index(self, template, start_index): """ Finds the first start_index that is on an uncommented line. Arguments: template: The template to be searched. start_index: The start index of the last open char. Returns: If start_index is on an uncommented out line, returns start_index. Otherwise, returns the start_index of the first line that is uncommented, if there is one. Otherwise, returns None. """ if self.LINE_COMMENT_DELIM is not None: line_start_index = StringLines(template).index_to_line_start_index(start_index) uncommented_line_start_index_regex = re.compile(fr"^(?!\s*{self.LINE_COMMENT_DELIM})", re.MULTILINE) # Finds the line start index of the first uncommented line, including the current line. match = uncommented_line_start_index_regex.search(template, line_start_index) if match is None: # No uncommented lines. return None elif match.start() < start_index: # Current line is uncommented, so return original start_index. return start_index else: # Return start of first uncommented line. return match.start() else: # No line comment delimeter, so this acts as a no-op. return start_index class UnderscoreTemplateLinter(BaseLinter): """ The linter for Underscore.js template files. """ ruleset = RuleSet( underscore_not_escaped='underscore-not-escaped', ) def __init__(self, skip_dirs=None): """ Init method. """ super().__init__() self._skip_underscore_dirs = skip_dirs or () def process_file(self, directory, file_name): """ Process file to determine if it is an Underscore template file and if it is safe. Arguments: directory (string): The directory of the file to be checked file_name (string): A filename for a potential underscore file Returns: The file results containing any violations. """ full_path = os.path.normpath(directory + '/' + file_name) results = FileResults(full_path) if not self._is_valid_directory(self._skip_underscore_dirs, directory): return results if not file_name.lower().endswith('.underscore'): return results return self._load_and_check_file_is_safe(full_path, self.check_underscore_file_is_safe, results) def check_underscore_file_is_safe(self, underscore_template, results): """ Checks for violations in an Underscore.js template. Arguments: underscore_template: The contents of the Underscore.js template. results: A file results objects to which violations will be added. """ self._check_underscore_expressions(underscore_template, results) results.prepare_results(underscore_template) def _check_underscore_expressions(self, underscore_template, results): """ Searches for Underscore.js expressions that contain violations. Arguments: underscore_template: The contents of the Underscore.js template. results: A list of results into which violations will be added. """ expressions = self._find_unescaped_expressions(underscore_template) for expression in expressions: if not self._is_safe_unescaped_expression(expression): results.violations.append(ExpressionRuleViolation( self.ruleset.underscore_not_escaped, expression )) def _is_safe_unescaped_expression(self, expression): """ Determines whether an expression is safely escaped, even though it is using the expression syntax that doesn't itself escape (i.e. <%= ). In some cases it is ok to not use the Underscore.js template escape (i.e. <%- ) because the escaping is happening inside the expression. Safe examples:: <%= edx.HtmlUtils.ensureHtml(message) %> <%= HtmlUtils.ensureHtml(message) %> <%= _.escape(message) %> Arguments: expression: The Expression being checked. Returns: True if the Expression has been safely escaped, and False otherwise. """ if expression.expression_inner.startswith('edx.HtmlUtils.'): return True if expression.expression_inner.startswith('HtmlUtils.'): return True if expression.expression_inner.startswith('_.escape('): return True return False def _find_unescaped_expressions(self, underscore_template): """ Returns a list of unsafe expressions. At this time all expressions that are unescaped are considered unsafe. Arguments: underscore_template: The contents of the Underscore.js template. Returns: A list of Expressions. """ unescaped_expression_regex = re.compile("<%=.*?%>", re.DOTALL) expressions = [] for match in unescaped_expression_regex.finditer(underscore_template): expression = Expression( match.start(), match.end(), template=underscore_template, start_delim="<%=", end_delim="%>" ) expressions.append(expression) return expressions class JavaScriptLinter(BaseLinter): """ The linter for JavaScript files. """ LINE_COMMENT_DELIM = "//" ruleset = RuleSet( javascript_jquery_append='javascript-jquery-append', javascript_jquery_prepend='javascript-jquery-prepend', javascript_jquery_insertion='javascript-jquery-insertion', javascript_jquery_insert_into_target='javascript-jquery-insert-into-target', javascript_jquery_html='javascript-jquery-html', javascript_concat_html='javascript-concat-html', javascript_escape='javascript-escape', ) def __init__(self, underscore_linter, javascript_skip_dirs=None): """ Init method. """ super().__init__() self.underscore_linter = underscore_linter self.ruleset = self.ruleset + self.underscore_linter.ruleset self._skip_javascript_dirs = javascript_skip_dirs or () def process_file(self, directory, file_name): """ Process file to determine if it is a JavaScript file and if it is safe. Arguments: directory (string): The directory of the file to be checked file_name (string): A filename for a potential JavaScript file Returns: The file results containing any violations. """ file_full_path = os.path.normpath(directory + '/' + file_name) results = FileResults(file_full_path) if not results.is_file: return results if file_name.lower().endswith('.js') and not file_name.lower().endswith('.min.js'): skip_dirs = self._skip_javascript_dirs else: return results if not self._is_valid_directory(skip_dirs, directory): return results return self._load_and_check_file_is_safe(file_full_path, self.check_javascript_file_is_safe, results) def check_javascript_file_is_safe(self, file_contents, results): """ Checks for violations in a JavaScript file. Arguments: file_contents: The contents of the JavaScript file. results: A file results objects to which violations will be added. """ no_caller_check = None no_argument_check = None self._check_jquery_function( file_contents, "append", self.ruleset.javascript_jquery_append, no_caller_check, self._is_jquery_argument_safe, results ) self._check_jquery_function( file_contents, "prepend", self.ruleset.javascript_jquery_prepend, no_caller_check, self._is_jquery_argument_safe, results ) self._check_jquery_function( file_contents, "unwrap|wrap|wrapAll|wrapInner|after|before|replaceAll|replaceWith", self.ruleset.javascript_jquery_insertion, no_caller_check, self._is_jquery_argument_safe, results ) self._check_jquery_function( file_contents, "appendTo|prependTo|insertAfter|insertBefore", self.ruleset.javascript_jquery_insert_into_target, self._is_jquery_insert_caller_safe, no_argument_check, results ) self._check_jquery_function( file_contents, "html", self.ruleset.javascript_jquery_html, no_caller_check, self._is_jquery_html_argument_safe, results ) self._check_javascript_escape(file_contents, results) self._check_concat_with_html(file_contents, self.ruleset.javascript_concat_html, results) self.underscore_linter.check_underscore_file_is_safe(file_contents, results) results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM) def _get_expression_for_function(self, file_contents, function_start_match): """ Returns an expression that matches the function call opened with function_start_match. Arguments: file_contents: The contents of the JavaScript file. function_start_match: A regex match representing the start of the function call (e.g. ".escape("). Returns: An Expression that best matches the function. """ start_index = function_start_match.start() inner_start_index = function_start_match.end() result = self._find_closing_char_index( None, "(", ")", file_contents, start_index=inner_start_index ) if result is not None: end_index = result['close_char_index'] + 1 expression = Expression( start_index, end_index, template=file_contents, start_delim=function_start_match.group(), end_delim=")" ) else: expression = Expression(start_index) return expression def _check_javascript_escape(self, file_contents, results): """ Checks that escape() is not used. escape() is not recommended. ref. https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/escape Arguments: file_contents: The contents of the JavaScript file. results: A file results objects to which violations will be added. """ # Regex to match uses of escape() or window.escape(). regex = re.compile(r"(?:^|(?<=window\.)|(?<![\w.$]))escape\(") for function_match in regex.finditer(file_contents): expression = self._get_expression_for_function(file_contents, function_match) results.violations.append(ExpressionRuleViolation(self.ruleset.javascript_escape, expression)) def _check_jquery_function(self, file_contents, function_names, rule, is_caller_safe, is_argument_safe, results): """ Checks that the JQuery function_names (e.g. append(), prepend()) calls are safe. Arguments: file_contents: The contents of the JavaScript file. function_names: A pipe delimited list of names of the functions (e.g. "wrap|after|before"). rule: The name of the rule to use for validation errors (e.g. self.ruleset.javascript_jquery_append). is_caller_safe: A function to test if caller of the JQuery function is safe. is_argument_safe: A function to test if the argument passed to the JQuery function is safe. results: A file results objects to which violations will be added. """ # Ignores calls starting with "HtmlUtils.", because those are safe regex = re.compile(fr"(?<!HtmlUtils).(?:{function_names})\(") for function_match in regex.finditer(file_contents): is_violation = True expression = self._get_expression_for_function(file_contents, function_match) if expression.end_index is not None: start_index = expression.start_index inner_start_index = function_match.end() close_paren_index = expression.end_index - 1 function_argument = file_contents[inner_start_index:close_paren_index].strip() if is_argument_safe is not None and is_caller_safe is None: is_violation = is_argument_safe(function_argument) is False elif is_caller_safe is not None and is_argument_safe is None: line_start_index = StringLines(file_contents).index_to_line_start_index(start_index) caller_line_start = file_contents[line_start_index:start_index] is_violation = is_caller_safe(caller_line_start) is False else: raise ValueError("Must supply either is_argument_safe, or is_caller_safe, but not both.") if is_violation: results.violations.append(ExpressionRuleViolation(rule, expression)) def _is_jquery_argument_safe_html_utils_call(self, argument): """ Checks that the argument sent to a jQuery DOM insertion function is a safe call to HtmlUtils. A safe argument is of the form: - HtmlUtils.xxx(anything).toString() - edx.HtmlUtils.xxx(anything).toString() Arguments: argument: The argument sent to the jQuery function (e.g. append(argument)). Returns: True if the argument is safe, and False otherwise. """ # match on HtmlUtils.xxx().toString() or edx.HtmlUtils match = re.search(r"(?:edx\.)?HtmlUtils\.[a-zA-Z0-9]+\(.*\)\.toString\(\)", argument) return match is not None and match.group() == argument def _is_jquery_argument_safe(self, argument): """ Check the argument sent to a jQuery DOM insertion function (e.g. append()) to check if it is safe. Safe arguments include: - the argument can end with ".el", ".$el" (with no concatenation) - the argument can be a single variable ending in "El" or starting with "$". For example, "testEl" or "$test". - the argument can be a single string literal with no HTML tags - the argument can be a call to $() with the first argument a string literal with a single HTML tag. For example, ".append($('<br/>'))" or ".append($('<br/>'))". - the argument can be a call to HtmlUtils.xxx(html).toString() Arguments: argument: The argument sent to the jQuery function (e.g. append(argument)). Returns: True if the argument is safe, and False otherwise. """ match_variable_name = re.search("[_$a-zA-Z]+[_$a-zA-Z0-9]*", argument) if match_variable_name is not None and match_variable_name.group() == argument: if argument.endswith('El') or argument.startswith('$'): return True elif argument.startswith('"') or argument.startswith("'"): # a single literal string with no HTML is ok # 1. it gets rid of false negatives for non-jquery calls (e.g. graph.append("g")) # 2. JQuery will treat this as a plain text string and will escape any & if needed. string = ParseString(argument, 0, len(argument)) if string.string == argument and "<" not in argument: return True elif argument.startswith('$('): # match on JQuery calls with single string and single HTML tag # Examples: # $("<span>") # $("<div/>") # $("<div/>", {...}) match = re.search(r"""\$\(\s*['"]<[a-zA-Z0-9]+\s*[/]?>['"]\s*[,)]""", argument) if match is not None: return True elif self._is_jquery_argument_safe_html_utils_call(argument): return True # check rules that shouldn't use concatenation elif "+" not in argument: if argument.endswith('.el') or argument.endswith('.$el'): return True return False def _is_jquery_html_argument_safe(self, argument): """ Check the argument sent to the jQuery html() function to check if it is safe. Safe arguments to html(): - no argument (i.e. getter rather than setter) - empty string is safe - the argument can be a call to HtmlUtils.xxx(html).toString() Arguments: argument: The argument sent to html() in code (i.e. html(argument)). Returns: True if the argument is safe, and False otherwise. """ if argument == "" or argument == "''" or argument == '""': return True elif self._is_jquery_argument_safe_html_utils_call(argument): return True return False def _is_jquery_insert_caller_safe(self, caller_line_start): """ Check that the caller of a jQuery DOM insertion function that takes a target is safe (e.g. thisEl.appendTo(target)). If original line was:: draggableObj.iconEl.appendTo(draggableObj.containerEl); Parameter caller_line_start would be: draggableObj.iconEl Safe callers include: - the caller can be ".el", ".$el" - the caller can be a single variable ending in "El" or starting with "$". For example, "testEl" or "$test". Arguments: caller_line_start: The line leading up to the jQuery function call. Returns: True if the caller is safe, and False otherwise. """ # matches end of line for caller, which can't itself be a function caller_match = re.search(r"(?:\s*|[.])([_$a-zA-Z]+[_$a-zA-Z0-9])*$", caller_line_start) if caller_match is None: return False caller = caller_match.group(1) if caller is None: return False elif caller.endswith('El') or caller.startswith('$'): return True elif caller == 'el' or caller == 'parentNode': return True return False def _check_concat_with_html(self, file_contents, rule, results): """ Checks that strings with HTML are not concatenated Arguments: file_contents: The contents of the JavaScript file. rule: The rule that was violated if this fails. results: A file results objects to which violations will be added. """ lines = StringLines(file_contents) last_expression = None # Match quoted strings that starts with '<' or ends with '>'. regex_string_with_html = r""" {quote} # Opening quote. ( \s*< # Starts with '<' (ignoring spaces) ([^{quote}]|[\\]{quote})* # followed by anything but a closing quote. | # Or, ([^{quote}]|[\\]{quote})* # Anything but a closing quote >\s* # ending with '>' (ignoring spaces) ) {quote} # Closing quote. """ # Match single or double quote. regex_string_with_html = "({}|{})".format( regex_string_with_html.format(quote="'"), regex_string_with_html.format(quote='"'), ) # Match quoted HTML strings next to a '+'. regex_concat_with_html = re.compile( r"(\+\s*{string_with_html}|{string_with_html}\s*\+)".format( string_with_html=regex_string_with_html, ), re.VERBOSE ) for match in regex_concat_with_html.finditer(file_contents): found_new_violation = False if last_expression is not None: last_line = lines.index_to_line_number(last_expression.start_index) # check if violation should be expanded to more of the same line if last_line == lines.index_to_line_number(match.start()): last_expression = Expression( last_expression.start_index, match.end(), template=file_contents ) else: results.violations.append(ExpressionRuleViolation( rule, last_expression )) found_new_violation = True else: found_new_violation = True if found_new_violation: last_expression = Expression( match.start(), match.end(), template=file_contents ) # add final expression if last_expression is not None: results.violations.append(ExpressionRuleViolation( rule, last_expression )) class PythonLinter(BaseLinter): """ The linter for Python files. The current implementation of the linter does naive Python parsing. It does not use the parser. One known issue is that parsing errors found inside a docstring need to be disabled, rather than being automatically skipped. Skipping docstrings is an enhancement that could be added. """ LINE_COMMENT_DELIM = "#" ruleset = RuleSet( python_parse_error='python-parse-error', python_custom_escape='python-custom-escape', # The Visitor classes are python-specific and should be moved into the PythonLinter once they have # been decoupled from the MakoTemplateLinter. ) + visitors.ruleset def __init__(self, skip_dirs=None): """ Init method. """ super().__init__() self._skip_python_dirs = skip_dirs or () def process_file(self, directory, file_name): """ Process file to determine if it is a Python file and if it is safe. Arguments: directory (string): The directory of the file to be checked file_name (string): A filename for a potential Python file Returns: The file results containing any violations. """ file_full_path = os.path.normpath(directory + '/' + file_name) results = FileResults(file_full_path) if not results.is_file: return results if file_name.lower().endswith('.py') is False: return results # skip tests.py files # TODO: Add configuration for files and paths if file_name.lower().endswith('tests.py'): return results # skip this linter code (i.e. xss_linter.py) if file_name == os.path.basename(__file__): return results if not self._is_valid_directory(self._skip_python_dirs, directory): return results return self._load_and_check_file_is_safe(file_full_path, self.check_python_file_is_safe, results) def check_python_file_is_safe(self, file_contents, results): """ Checks for violations in a Python file. Arguments: file_contents: The contents of the Python file. results: A file results objects to which violations will be added. """ root_node = self.parse_python_code(file_contents, results) self.check_python_code_is_safe(file_contents, root_node, results) # Check rules specific to .py files only # Note that in template files, the scope is different, so you can make # different assumptions. if root_node is not None: # check format() rules that can be run on outer-most format() calls visitor = visitors.OuterFormatVisitor(file_contents, results) visitor.visit(root_node) results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM) def check_python_code_is_safe(self, python_code, root_node, results): """ Checks for violations in Python code snippet. This can also be used for Python that appears in files other than .py files, like in templates. Arguments: python_code: The contents of the Python code. root_node: The root node of the Python code parsed by AST. results: A file results objects to which violations will be added. """ if root_node is not None: # check illegal concatenation and interpolation visitor = visitors.AllNodeVisitor(python_code, results) visitor.visit(root_node) # check rules parse with regex self._check_custom_escape(python_code, results) def parse_python_code(self, python_code, results): """ Parses Python code. Arguments: python_code: The Python code to be parsed. Returns: The root node that was parsed, or None for SyntaxError. """ python_code = self._strip_file_encoding(python_code) try: return ast.parse(python_code) except SyntaxError as e: if e.offset is None: expression = Expression(0) else: lines = StringLines(python_code) line_start_index = lines.line_number_to_start_index(e.lineno) expression = Expression(line_start_index + e.offset) results.violations.append(ExpressionRuleViolation( self.ruleset.python_parse_error, expression )) return None def _strip_file_encoding(self, file_contents): """ Removes file encoding from file_contents because the file was already read into Unicode, and the AST parser complains. Arguments: file_contents: The Python file contents. Returns: The Python file contents with the encoding stripped. """ # PEP-263 Provides Regex for Declaring Encoding # Example: -*- coding: <encoding name> -*- # This is only allowed on the first two lines, and it must be stripped # before parsing, because we have already read into Unicode and the # AST parser complains. encoding_regex = re.compile(r"^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)") encoding_match = encoding_regex.search(file_contents) # If encoding comment not found on first line, search second line. if encoding_match is None: lines = StringLines(file_contents) if lines.line_count() >= 2: encoding_match = encoding_regex.search(lines.line_number_to_line(2)) # If encoding was found, strip it if encoding_match is not None: file_contents = file_contents.replace(encoding_match.group(), '#', 1) return file_contents def _check_custom_escape(self, file_contents, results): """ Checks for custom escaping calls, rather than using a standard escaping method. Arguments: file_contents: The contents of the Python file results: A list of results into which violations will be added. """ for match in re.finditer("(<.*&lt;|&lt;.*<)", file_contents): expression = Expression(match.start(), match.end()) results.violations.append(ExpressionRuleViolation( self.ruleset.python_custom_escape, expression )) class MakoTemplateLinter(BaseLinter): """ The linter for Mako template files. """ LINE_COMMENT_DELIM = "##" ruleset = RuleSet( mako_missing_default='mako-missing-default', mako_multiple_page_tags='mako-multiple-page-tags', mako_unparseable_expression='mako-unparseable-expression', mako_unwanted_html_filter='mako-unwanted-html-filter', mako_invalid_html_filter='mako-invalid-html-filter', mako_invalid_js_filter='mako-invalid-js-filter', mako_js_missing_quotes='mako-js-missing-quotes', mako_js_html_string='mako-js-html-string', mako_html_entities='mako-html-entities', mako_unknown_context='mako-unknown-context', # NOTE The MakoTemplateLinter directly checks for python_wrap_html and directly # instantiates Visitor instances to check for python issues. This logic should # be moved into the PythonLinter. The MakoTemplateLinter should only check for # Mako-specific issues. python_wrap_html='python-wrap-html', ) + visitors.ruleset def __init__(self, javascript_linter, python_linter, skip_dirs=None): """ Init method. """ super().__init__() self.javascript_linter = javascript_linter self.python_linter = python_linter self.ruleset = self.ruleset + self.javascript_linter.ruleset + self.python_linter.ruleset self._skip_mako_dirs = skip_dirs or () def process_file(self, directory, file_name): """ Process file to determine if it is a Mako template file and if it is safe. Arguments: directory (string): The directory of the file to be checked file_name (string): A filename for a potential Mako file Returns: The file results containing any violations. """ mako_file_full_path = os.path.normpath(directory + '/' + file_name) results = FileResults(mako_file_full_path) if not results.is_file: return results if not self._is_valid_directory(directory): return results # TODO: When safe-by-default is turned on at the platform level, will we: # 1. Turn it on for .html only, or # 2. Turn it on for all files, and have different rulesets that have # different rules of .xml, .html, .js, .txt Mako templates (e.g. use # the n filter to turn off h for some of these)? # For now, we only check .html and .xml files if not (file_name.lower().endswith('.html') or file_name.lower().endswith('.xml')): return results return self._load_and_check_file_is_safe(mako_file_full_path, self._check_mako_file_is_safe, results) def _is_valid_directory(self, directory): """ Determines if the provided directory is a directory that could contain Mako template files that need to be linted. Arguments: directory: The directory to be linted. Returns: True if this directory should be linted for Mako template violations and False otherwise. """ if is_skip_dir(self._skip_mako_dirs, directory): return False # TODO: This is an imperfect guess concerning the Mako template # directories. This needs to be reviewed before turning on safe by # default at the platform level. if ('/templates/' in directory) or directory.endswith('/templates'): return True return False def _check_mako_file_is_safe(self, mako_template, results): """ Checks for violations in a Mako template. Arguments: mako_template: The contents of the Mako template. results: A file results objects to which violations will be added. """ if self._is_django_template(mako_template): return has_page_default = self._has_page_default(mako_template, results) self._check_mako_expressions(mako_template, has_page_default, results) self._check_mako_python_blocks(mako_template, has_page_default, results) results.prepare_results(mako_template, line_comment_delim=self.LINE_COMMENT_DELIM) def _is_django_template(self, mako_template): """ Determines if the template is actually a Django template. Arguments: mako_template: The template code. Returns: True if this is really a Django template, and False otherwise. """ if re.search('({%.*%})|({{.*}})|({#.*#})', mako_template) is not None: return True return False def _get_page_tag_count(self, mako_template): """ Determines the number of page expressions in the Mako template. Ignores page expressions that are commented out. Arguments: mako_template: The contents of the Mako template. Returns: The number of page expressions """ count = len(re.findall('<%page ', mako_template, re.IGNORECASE)) count_commented = len(re.findall(r'##\s+<%page ', mako_template, re.IGNORECASE)) return max(0, count - count_commented) def _has_page_default(self, mako_template, results): """ Checks if the Mako template contains the page expression marking it as safe by default. Arguments: mako_template: The contents of the Mako template. results: A list of results into which violations will be added. Side effect: Adds violations regarding page default if necessary Returns: True if the template has the page default, and False otherwise. """ page_tag_count = self._get_page_tag_count(mako_template) # check if there are too many page expressions if 2 <= page_tag_count: results.violations.append(RuleViolation(self.ruleset.mako_multiple_page_tags)) return False # make sure there is exactly 1 page expression, excluding commented out # page expressions, before proceeding elif page_tag_count != 1: results.violations.append(RuleViolation(self.ruleset.mako_missing_default)) return False # check that safe by default (h filter) is turned on page_h_filter_regex = re.compile('<%page[^>]*expression_filter=(?:"h"|\'h\')[^>]*/>') page_match = page_h_filter_regex.search(mako_template) if not page_match: results.violations.append(RuleViolation(self.ruleset.mako_missing_default)) return page_match def _check_mako_expressions(self, mako_template, has_page_default, results): """ Searches for Mako expressions and then checks if they contain violations, including checking JavaScript contexts for JavaScript violations. Arguments: mako_template: The contents of the Mako template. has_page_default: True if the page is marked as default, False otherwise. results: A list of results into which violations will be added. """ expressions = self._find_mako_expressions(mako_template) contexts = self._get_contexts(mako_template) self._check_javascript_contexts(mako_template, contexts, results) for expression in expressions: if expression.end_index is None: results.violations.append(ExpressionRuleViolation( self.ruleset.mako_unparseable_expression, expression )) continue context = self._get_context(contexts, expression.start_index) self._check_expression_and_filters(mako_template, expression, context, has_page_default, results) def _check_javascript_contexts(self, mako_template, contexts, results): """ Lint the JavaScript contexts for JavaScript violations inside a Mako template. Arguments: mako_template: The contents of the Mako template. contexts: A list of context dicts with 'type' and 'index'. results: A list of results into which violations will be added. Side effect: Adds JavaScript violations to results. """ javascript_start_index = None for context in contexts: if context['type'] == 'javascript': if javascript_start_index is None: javascript_start_index = context['index'] else: if javascript_start_index is not None: javascript_end_index = context['index'] javascript_code = mako_template[javascript_start_index:javascript_end_index] self._check_javascript_context(javascript_code, javascript_start_index, results) javascript_start_index = None if javascript_start_index is not None: javascript_code = mako_template[javascript_start_index:] self._check_javascript_context(javascript_code, javascript_start_index, results) def _check_javascript_context(self, javascript_code, start_offset, results): """ Lint a single JavaScript context for JavaScript violations inside a Mako template. Arguments: javascript_code: The template contents of the JavaScript context. start_offset: The offset of the JavaScript context inside the original Mako template. results: A list of results into which violations will be added. Side effect: Adds JavaScript violations to results. """ javascript_results = FileResults("") self.javascript_linter.check_javascript_file_is_safe(javascript_code, javascript_results) self._shift_and_add_violations(javascript_results, start_offset, results) def _check_mako_python_blocks(self, mako_template, has_page_default, results): """ Searches for Mako python blocks and checks if they contain violations. Arguments: mako_template: The contents of the Mako template. has_page_default: True if the page is marked as default, False otherwise. results: A list of results into which violations will be added. """ # Finds Python blocks such as <% ... %>, skipping other Mako start tags # such as <%def> and <%page>. python_block_regex = re.compile(r'<%\s(?P<code>.*?)%>', re.DOTALL) for python_block_match in python_block_regex.finditer(mako_template): self._check_expression_python( python_code=python_block_match.group('code'), start_offset=(python_block_match.start() + len('<% ')), has_page_default=has_page_default, results=results ) def _check_expression_python(self, python_code, start_offset, has_page_default, results): """ Lint the Python inside a single Python expression in a Mako template. Arguments: python_code: The Python contents of an expression. start_offset: The offset of the Python content inside the original Mako template. has_page_default: True if the page is marked as default, False otherwise. results: A list of results into which violations will be added. Side effect: Adds Python violations to results. """ python_results = FileResults("") # Dedent expression internals so it is parseable. # Note that the final columns reported could be off somewhat. adjusted_python_code = textwrap.dedent(python_code) first_letter_match = re.search(r'\w', python_code) adjusted_first_letter_match = re.search(r'\w', adjusted_python_code) if first_letter_match is not None and adjusted_first_letter_match is not None: start_offset += (first_letter_match.start() - adjusted_first_letter_match.start()) python_code = adjusted_python_code root_node = self.python_linter.parse_python_code(python_code, python_results) self.python_linter.check_python_code_is_safe(python_code, root_node, python_results) # Check mako expression specific Python rules. if root_node is not None: visitor = visitors.HtmlStringVisitor(python_code, python_results, True) visitor.visit(root_node) for unsafe_html_string_node in visitor.unsafe_html_string_nodes: python_results.violations.append(ExpressionRuleViolation( self.ruleset.python_wrap_html, visitor.node_to_expression(unsafe_html_string_node) )) if has_page_default: for over_escaped_entity_string_node in visitor.over_escaped_entity_string_nodes: python_results.violations.append(ExpressionRuleViolation( self.ruleset.mako_html_entities, visitor.node_to_expression(over_escaped_entity_string_node) )) python_results.prepare_results(python_code, line_comment_delim=self.LINE_COMMENT_DELIM) self._shift_and_add_violations(python_results, start_offset, results) def _shift_and_add_violations(self, other_linter_results, start_offset, results): """ Adds results from a different linter to the Mako results, after shifting the offset into the original Mako template. Arguments: other_linter_results: Results from another linter. start_offset: The offset of the linted code, a part of the template, inside the original Mako template. results: A list of results into which violations will be added. Side effect: Adds violations to results. """ # translate the violations into the proper location within the original # Mako template for violation in other_linter_results.violations: expression = violation.expression expression.start_index += start_offset if expression.end_index is not None: expression.end_index += start_offset results.violations.append(ExpressionRuleViolation(violation.rule, expression)) def _check_expression_and_filters(self, mako_template, expression, context, has_page_default, results): """ Checks that the filters used in the given Mako expression are valid for the given context. Adds violation to results if there is a problem. Arguments: mako_template: The contents of the Mako template. expression: A Mako Expression. context: The context of the page in which the expression was found (e.g. javascript, html). has_page_default: True if the page is marked as default, False otherwise. results: A list of results into which violations will be added. """ if context == 'unknown': results.violations.append(ExpressionRuleViolation( self.ruleset.mako_unknown_context, expression )) return # Example: finds "| n, h}" when given "${x | n, h}" filters_regex = re.compile(r'\|([.,\w\s]*)\}') filters_match = filters_regex.search(expression.expression) # Check Python code inside expression. if filters_match is None: python_code = expression.expression[2:-1] else: python_code = expression.expression[2:filters_match.start()] self._check_expression_python(python_code, expression.start_index + 2, has_page_default, results) # Check filters. if filters_match is None: if context == 'javascript': results.violations.append(ExpressionRuleViolation( self.ruleset.mako_invalid_js_filter, expression )) return filters = filters_match.group(1).replace(" ", "").split(",") if filters == ['n', 'decode.utf8']: # {x | n, decode.utf8} is valid in any context pass elif context == 'html': if filters == ['h']: if has_page_default: # suppress this violation if the page default hasn't been set, # otherwise the template might get less safe results.violations.append(ExpressionRuleViolation( self.ruleset.mako_unwanted_html_filter, expression )) elif filters == ['n', 'strip_all_tags_but_br']: # {x | n, strip_all_tags_but_br} is valid in html context pass else: results.violations.append(ExpressionRuleViolation( self.ruleset.mako_invalid_html_filter, expression )) elif context == 'javascript': self._check_js_expression_not_with_html(mako_template, expression, results) if filters == ['n', 'dump_js_escaped_json']: # {x | n, dump_js_escaped_json} is valid pass elif filters == ['n', 'js_escaped_string']: # {x | n, js_escaped_string} is valid, if surrounded by quotes self._check_js_string_expression_in_quotes(mako_template, expression, results) else: results.violations.append(ExpressionRuleViolation( self.ruleset.mako_invalid_js_filter, expression )) def _check_js_string_expression_in_quotes(self, mako_template, expression, results): """ Checks that a Mako expression using js_escaped_string is surrounded by quotes. Arguments: mako_template: The contents of the Mako template. expression: A Mako Expression. results: A list of results into which violations will be added. """ parse_string = self._find_string_wrapping_expression(mako_template, expression) if parse_string is None: results.violations.append(ExpressionRuleViolation( self.ruleset.mako_js_missing_quotes, expression )) def _check_js_expression_not_with_html(self, mako_template, expression, results): """ Checks that a Mako expression in a JavaScript context does not appear in a string that also contains HTML. Arguments: mako_template: The contents of the Mako template. expression: A Mako Expression. results: A list of results into which violations will be added. """ parse_string = self._find_string_wrapping_expression(mako_template, expression) if parse_string is not None and re.search('[<>]', parse_string.string) is not None: results.violations.append(ExpressionRuleViolation( self.ruleset.mako_js_html_string, expression )) def _find_string_wrapping_expression(self, mako_template, expression): """ Finds the string wrapping the Mako expression if there is one. Arguments: mako_template: The contents of the Mako template. expression: A Mako Expression. Returns: ParseString representing a scrubbed version of the wrapped string, where the Mako expression was replaced with "${...}", if a wrapped string was found. Otherwise, returns None if none found. """ lines = StringLines(mako_template) start_index = lines.index_to_line_start_index(expression.start_index) if expression.end_index is not None: end_index = lines.index_to_line_end_index(expression.end_index) else: return None # scrub out the actual expression so any code inside the expression # doesn't interfere with rules applied to the surrounding code (i.e. # checking JavaScript). scrubbed_lines = "".join(( mako_template[start_index:expression.start_index], "${...}", mako_template[expression.end_index:end_index] )) adjusted_start_index = expression.start_index - start_index start_index = 0 while True: parse_string = ParseString(scrubbed_lines, start_index, len(scrubbed_lines)) # check for validly parsed string if (parse_string.start_index is not None and parse_string.end_index is not None) \ and (0 <= parse_string.start_index < parse_string.end_index): # check if expression is contained in the given string if parse_string.start_index < adjusted_start_index < parse_string.end_index: return parse_string else: # move to check next string start_index = parse_string.end_index else: break return None def _get_contexts(self, mako_template): """ Returns a data structure that represents the indices at which the template changes from HTML context to JavaScript and back. Return: A list of dicts where each dict contains: - index: the index of the context. - type: the context type (e.g. 'html' or 'javascript'). """ contexts_re = re.compile( r""" <script.*?(?<!/)> | # script tag start </script> | # script tag end <%static:require_module(_async)?.*?(?<!/)> | # require js script tag start (optionally the _async version) </%static:require_module(_async)?> | # require js script tag end (optionally the _async version) <%static:webpack.*(?<!/)> | # webpack script tag start </%static:webpack> | # webpack script tag end <%static:studiofrontend.*?(?<!/)> | # studiofrontend script tag start </%static:studiofrontend> | # studiofrontend script tag end <%block[ ]*name=['"]requirejs['"]\w*(?<!/)> | # require js tag start </%block> # require js tag end """, re.VERBOSE | re.IGNORECASE ) media_type_re = re.compile(r"""type=['"].*?['"]""", re.IGNORECASE) contexts = [{'index': 0, 'type': 'html'}] javascript_types = [ 'text/javascript', 'text/ecmascript', 'application/ecmascript', 'application/javascript', 'text/x-mathjax-config', 'json/xblock-args', 'application/json', ] html_types = ['text/template'] for context in contexts_re.finditer(mako_template): match_string = context.group().lower() if match_string.startswith("<script"): match_type = media_type_re.search(match_string) context_type = 'javascript' if match_type is not None: # get media type (e.g. get text/javascript from # type="text/javascript") match_type = match_type.group()[6:-1].lower() if match_type in html_types: context_type = 'html' elif match_type not in javascript_types: context_type = 'unknown' contexts.append({'index': context.end(), 'type': context_type}) elif match_string.startswith("</"): contexts.append({'index': context.start(), 'type': 'html'}) else: contexts.append({'index': context.end(), 'type': 'javascript'}) return contexts def _get_context(self, contexts, index): """ Gets the context (e.g. javascript, html) of the template at the given index. Arguments: contexts: A list of dicts where each dict contains the 'index' of the context and the context 'type' (e.g. 'html' or 'javascript'). index: The index for which we want the context. Returns: The context (e.g. javascript or html) for the given index. """ current_context = contexts[0]['type'] for context in contexts: if context['index'] <= index: current_context = context['type'] else: break return current_context def _find_mako_expressions(self, mako_template): """ Finds all the Mako expressions in a Mako template and creates a list of dicts for each expression. Arguments: mako_template: The content of the Mako template. Returns: A list of Expressions. """ start_delim = '${' start_index = 0 expressions = [] while True: start_index = mako_template.find(start_delim, start_index) if start_index < 0: break # If start of mako expression is commented out, skip it. uncommented_start_index = self._uncommented_start_index(mako_template, start_index) if uncommented_start_index != start_index: start_index = uncommented_start_index continue result = self._find_closing_char_index( start_delim, '{', '}', mako_template, start_index=start_index + len(start_delim) ) if result is None: expression = Expression(start_index) # for parsing error, restart search right after the start of the # current expression start_index = start_index + len(start_delim) else: close_char_index = result['close_char_index'] expression = mako_template[start_index:close_char_index + 1] expression = Expression( start_index, end_index=close_char_index + 1, template=mako_template, start_delim=start_delim, end_delim='}', strings=result['strings'], ) # restart search after the current expression start_index = expression.end_index expressions.append(expression) return expressions class DjangoTemplateLinter(BaseLinter): """ The linter for Django template files """ LINE_COMMENT_DELIM = "{#" ruleset = RuleSet( django_trans_missing_escape='django-trans-missing-escape', django_trans_invalid_escape_filter='django-trans-invalid-escape-filter', django_trans_escape_variable_mismatch='django-trans-escape-variable-mismatch', django_blocktrans_missing_escape_filter='django-blocktrans-missing-escape-filter', django_blocktrans_parse_error='django-blocktrans-parse-error', django_blocktrans_escape_filter_parse_error='django-blocktrans-escape-filter-parse-error', django_html_interpolation_missing_safe_filter='django-html-interpolation-missing-safe-filter', django_html_interpolation_missing='django-html-interpolation-missing', django_html_interpolation_invalid_tag='django-html-interpolation-invalid-tag', ) def __init__(self, skip_dirs=None): """ Init method. """ super().__init__() self._skip_django_dirs = skip_dirs or () def process_file(self, directory, file_name): """ Process file to determine if it is a Django template file and if it is safe. Arguments: directory (string): The directory of the file to be checked file_name (string): A filename for a potential Django file Returns: The file results containing any violations. """ django_file_full_path = os.path.normpath(directory + '/' + file_name) results = FileResults(django_file_full_path) if not results.is_file: return results if not self._is_valid_directory(directory): return results if not (file_name.lower().endswith('.html')): return results return self._load_and_check_file_is_safe(django_file_full_path, self._check_django_file_is_safe, results) def _is_valid_directory(self, directory): """ Determines if the provided directory is a directory that could contain Django template files that need to be linted. Arguments: directory: The directory to be linted. Returns: True if this directory should be linted for Django template violations and False otherwise. """ if is_skip_dir(self._skip_django_dirs, directory): return False if ('/templates/' in directory) or directory.endswith('/templates'): return True return False def _is_django_template(self, django_template): """ Determines if the template is actually a Django template. Arguments: mako_template: The template code. Returns: True if this is really a Django template, and False otherwise. """ if re.search('({%.*%})|({{.*}})|({#.*#})', django_template) is not None: return True return False def _check_django_file_is_safe(self, django_template, results): if not self._is_django_template(django_template): return self._check_django_expression(django_template, results) results.prepare_results(django_template, line_comment_delim=self.LINE_COMMENT_DELIM) def _check_django_expression(self, django_template, results): """ Searches for django trans and blocktrans expression and then checks if they contain violations Arguments: django_template: The contents of the Django template. results: A list of results into which violations will be added. """ expressions = [] self._find_django_expressions(django_template, results, expressions) for expr in expressions: expr.validate_expression(django_template, expressions) def _find_django_expressions(self, django_template, results, expressions): """ Finds all the Django trans/blocktrans expressions in a Django template and creates a list of dicts for each expression. Arguments: django_template: The content of the Django template. Returns: A list of Expressions. """ comments = list(re.finditer(r'{% comment .*%}', django_template, re.I)) endcomments = list(re.finditer(r'{% endcomment .*%}', django_template, re.I)) trans_iterator = re.finditer(r'{% trans .*?%}', django_template, re.I) for t in trans_iterator: if self._check_expression_not_commented(t, comments, endcomments): continue trans_expr = TransExpression(self.ruleset, results, t.start(), t.end(), start_delim='{%', end_delim='%}', template=django_template) if trans_expr: expressions.append(trans_expr) block_trans_iterator = re.finditer(r'{% blocktrans .*?%}', django_template, re.I) for bt in block_trans_iterator: if self._check_expression_not_commented(bt, comments, endcomments): continue trans_expr = BlockTransExpression(self.ruleset, results, bt.start(), bt.end(), start_delim='{%', end_delim='%}', template=django_template) if trans_expr: expressions.append(trans_expr) interpolation_iterator = re.finditer(r'{% interpolate_html .*?%}', django_template, re.I) for it in interpolation_iterator: if self._check_expression_not_commented(it, comments, endcomments): continue trans_expr = HtmlInterpolateExpression(self.ruleset, results, it.start(), it.end(), start_delim='{%', end_delim='%}', template=django_template) if trans_expr: expressions.append(trans_expr) def _check_expression_not_commented(self, expr, comments, endcomments): for i in range(len(endcomments)): start_comment = comments[i] end_comment = endcomments[i] if (expr.start() >= start_comment.start()) and \ (expr.start() <= end_comment.start()): return True
agpl-3.0
taion/flask-jsonapiview
tests/test_misc.py
2
5596
import pytest from marshmallow import Schema, fields from sqlalchemy import Column, Integer from flask_resty import Api, GenericModelView, StrictRule from flask_resty.testing import assert_response # ----------------------------------------------------------------------------- @pytest.yield_fixture def models(db): class Widget(db.Model): __tablename__ = "widgets" id = Column(Integer, primary_key=True) db.create_all() yield {"widget": Widget} db.drop_all() @pytest.fixture def schemas(): class WidgetSchema(Schema): id = fields.Integer(as_string=True) return {"widget": WidgetSchema()} @pytest.fixture def views(models, schemas): class WidgetViewBase(GenericModelView): model = models["widget"] schema = schemas["widget"] class WidgetListView(WidgetViewBase): def get(self): return self.list() def post(self): return self.create(allow_client_id=True) class WidgetView(WidgetViewBase): def get(self, id): return self.retrieve(id) class CustomWidgetView(WidgetViewBase): def delete(self, id): return self.destroy(id) def update_item_raw(self, widget, data): return self.model(id=9) def delete_item_raw(self, widget): return self.model(id=9) def make_deleted_response(self, widget): return self.make_item_response(widget) return { "widget_list": WidgetListView, "widget": WidgetView, "custom_widget": CustomWidgetView, } @pytest.fixture(autouse=True) def data(db, models): db.session.add(models["widget"]()) db.session.commit() # ----------------------------------------------------------------------------- def test_api_prefix(app, views, client, base_client): api = Api(app, "/api") api.add_resource("/widgets", views["widget_list"]) response = client.get("/widgets") assert_response(response, 200, [{"id": "1"}]) response = base_client.get("/api/widgets") assert_response(response, 200, [{"id": "1"}]) def test_rule_without_slash(app, views, client): api = Api(app, "/api") api.add_resource("/widgets", views["widget_list"]) response = client.get("/widgets") assert_response(response, 200) response = client.get("/widgets/") assert_response(response, 404) def test_rule_with_slash(app, views, client): api = Api(app, "/api") api.add_resource("/widgets/", views["widget_list"]) response = client.get("/widgets") assert_response(response, 308) response = client.get("/widgets/") assert_response(response, 200) def test_no_append_slash(monkeypatch, app, views, client): monkeypatch.setattr(app, "url_rule_class", StrictRule) api = Api(app, "/api") api.add_resource("/widgets/", views["widget_list"]) response = client.get("/widgets") assert_response(response, 404) response = client.get("/widgets/") assert_response(response, 200) def test_create_client_id(app, views, client): api = Api(app) api.add_resource("/widgets", views["widget_list"], views["widget"]) response = client.post("/widgets", data={"id": "100"}) assert response.headers["Location"] == "http://localhost/widgets/100" assert_response(response, 201, {"id": "100"}) def test_create_no_location(app, views, client): views["widget_list"].get_location = lambda self, item: None api = Api(app) api.add_resource("/widgets", views["widget_list"], views["widget"]) response = client.post("/widgets", data={}) assert "Location" not in response.headers assert_response(response, 201, {"id": "2"}) def test_training_slash(app, views, client): api = Api(app) api.add_resource( "/widgets/", views["widget_list"], views["widget"], id_rule="<id>/" ) response = client.post("/widgets/", data={"id": "100"}) assert response.headers["Location"] == "http://localhost/widgets/100/" assert_response(response, 201, {"id": "100"}) response = client.get("/widgets/100/") assert response.status_code == 200 def test_resource_rules(app, views, client): api = Api(app) api.add_resource( base_rule="/widget/<id>", base_view=views["widget"], alternate_rule="/widgets", alternate_view=views["widget_list"], ) get_response = client.get("/widget/1") assert_response(get_response, 200, {"id": "1"}) post_response = client.post("/widgets", data={}) assert post_response.headers["Location"] == "http://localhost/widget/2" assert_response(post_response, 201, {"id": "2"}) def test_factory_pattern(app, views, client): api = Api() api.init_app(app) with pytest.raises(AssertionError, match="no application specified"): api.add_resource("/widgets", views["widget_list"]) api.add_resource("/widgets", views["widget_list"], app=app) response = client.get("/widgets") assert_response(response, 200, [{"id": "1"}]) def test_view_func_wrapper(app, views): api = Api(app) api.add_resource("/widgets", views["widget_list"], views["widget"]) # This is really a placeholder for asserting that e.g. custom New Relic # view information gets passed through. assert app.view_functions["WidgetView"].__name__ == "WidgetView" def test_delete_return_item(app, views, client): api = Api(app) api.add_resource("/widgets/<int:id>", views["custom_widget"]) response = client.delete("/widgets/1") assert_response(response, 200, {"id": "9"})
mit