repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
MobSF/Mobile-Security-Framework-MobSF | mobsf/DynamicAnalyzer/views/android/frida_server_download.py | 2 | 2270 | # -*- coding: utf_8 -*-
"""Download Frida Server."""
import logging
from pathlib import Path
from lzma import LZMAFile
from shutil import copyfileobj
import requests
from django.conf import settings
from mobsf.MobSF.utils import (
is_internet_available,
upstream_proxy,
)
logger = logging.getLogger(__name__)
def clean_up_old_binaries(dirc, version):
"""Delete Old Binaries."""
for f in Path(dirc).iterdir():
if f.is_file() and f.name.startswith('frida-server'):
if version in f.name:
continue
try:
f.unlink()
except Exception:
pass
def download_frida_server(url, version, fname):
"""Download frida-server-binary."""
try:
download_dir = Path(settings.DWD_DIR)
logger.info('Downloading binary %s', fname)
dwd_loc = download_dir / fname
with requests.get(url, stream=True) as r:
with LZMAFile(r.raw) as f:
with open(dwd_loc, 'wb') as flip:
copyfileobj(f, flip)
clean_up_old_binaries(download_dir, version)
return True
except Exception:
logger.exception('[ERROR] Downloading Frida Server Binary')
return False
def update_frida_server(arch, version):
"""Download Assets of a given version."""
download_dir = Path(settings.DWD_DIR)
fserver = f'frida-server-{version}-android-{arch}'
frida_bin = download_dir / fserver
if frida_bin.is_file():
return True
if not is_internet_available():
return False
try:
proxies, verify = upstream_proxy('https')
except Exception:
logger.exception('[ERROR] Setting upstream proxy')
try:
response = requests.get(f'{settings.FRIDA_SERVER}{version}',
timeout=3,
proxies=proxies,
verify=verify)
for item in response.json()['assets']:
if item['name'] == f'{fserver}.xz':
url = item['browser_download_url']
return download_frida_server(url, version, fserver)
return False
except Exception:
logger.exception('[ERROR] Fetching Frida Server Release')
return False
| gpl-3.0 |
knehez/edx-platform | lms/djangoapps/shoppingcart/tests/test_payment_fake.py | 147 | 4130 | """
Tests for the fake payment page used in acceptance tests.
"""
from django.test import TestCase
from shoppingcart.processors.CyberSource2 import sign, verify_signatures
from shoppingcart.processors.exceptions import CCProcessorSignatureException
from shoppingcart.tests.payment_fake import PaymentFakeView
from collections import OrderedDict
class PaymentFakeViewTest(TestCase):
"""
Test that the fake payment view interacts
correctly with the shopping cart.
"""
CLIENT_POST_PARAMS = OrderedDict([
('amount', '25.00'),
('currency', 'usd'),
('transaction_type', 'sale'),
('orderNumber', '33'),
('access_key', '123456789'),
('merchantID', 'edx'),
('djch', '012345678912'),
('orderPage_version', 2),
('orderPage_serialNumber', '1234567890'),
('profile_id', "00000001"),
('reference_number', 10),
('locale', 'en'),
('signed_date_time', '2014-08-18T13:59:31Z'),
])
def setUp(self):
super(PaymentFakeViewTest, self).setUp()
# Reset the view state
PaymentFakeView.PAYMENT_STATUS_RESPONSE = "success"
def test_accepts_client_signatures(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Simulate a POST request from the payment workflow
# page to the fake payment page.
resp = self.client.post(
'/shoppingcart/payment_fake', dict(post_params)
)
# Expect that the response was successful
self.assertEqual(resp.status_code, 200)
# Expect that we were served the payment page
# (not the error page)
self.assertIn("Payment Form", resp.content)
def test_rejects_invalid_signature(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Tamper with the signature
post_params['signature'] = "invalid"
# Simulate a POST request from the payment workflow
# page to the fake payment page.
resp = self.client.post(
'/shoppingcart/payment_fake', dict(post_params)
)
# Expect that we got an error
self.assertIn("Error", resp.content)
def test_sends_valid_signature(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Get the POST params that the view would send back to us
resp_params = PaymentFakeView.response_post_params(post_params)
# Check that the client accepts these
try:
verify_signatures(resp_params)
except CCProcessorSignatureException:
self.fail("Client rejected signatures.")
def test_set_payment_status(self):
# Generate shoppingcart signatures
post_params = sign(self.CLIENT_POST_PARAMS)
# Configure the view to declined payments
resp = self.client.put(
'/shoppingcart/payment_fake',
data="decline", content_type='text/plain'
)
self.assertEqual(resp.status_code, 200)
# Check that the decision is "DECLINE"
resp_params = PaymentFakeView.response_post_params(post_params)
self.assertEqual(resp_params.get('decision'), 'DECLINE')
# Configure the view to fail payments
resp = self.client.put(
'/shoppingcart/payment_fake',
data="failure", content_type='text/plain'
)
self.assertEqual(resp.status_code, 200)
# Check that the decision is "REJECT"
resp_params = PaymentFakeView.response_post_params(post_params)
self.assertEqual(resp_params.get('decision'), 'REJECT')
# Configure the view to accept payments
resp = self.client.put(
'/shoppingcart/payment_fake',
data="success", content_type='text/plain'
)
self.assertEqual(resp.status_code, 200)
# Check that the decision is "ACCEPT"
resp_params = PaymentFakeView.response_post_params(post_params)
self.assertEqual(resp_params.get('decision'), 'ACCEPT')
| agpl-3.0 |
andreif/django | django/contrib/messages/storage/cookie.py | 471 | 6545 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in six.iteritems(obj)}
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
else:
response.delete_cookie(self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/lxml/ElementInclude.py | 121 | 7641 | #
# ElementTree
# $Id: ElementInclude.py 1862 2004-06-18 07:31:02Z Fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
Limited XInclude support for the ElementTree package.
While lxml.etree has full support for XInclude (see
`etree.ElementTree.xinclude()`), this module provides a simpler, pure
Python, ElementTree compatible implementation that supports a simple
form of custom URL resolvers.
"""
from lxml import etree
import copy
try:
from urlparse import urljoin
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.parse import urljoin
from urllib.request import urlopen
try:
set
except NameError:
# Python 2.3
from sets import Set as set
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(etree.LxmlSyntaxError):
pass
##
# ET compatible default loader.
# This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
file = open(href, 'rb')
if parse == "xml":
data = etree.parse(file).getroot()
else:
data = file.read()
if not encoding:
encoding = 'utf-8'
data = data.decode(encoding)
file.close()
return data
##
# Default loader used by lxml.etree - handles custom resolvers properly
#
def _lxml_default_loader(href, parse, encoding=None, parser=None):
if parse == "xml":
data = etree.parse(href, parser).getroot()
else:
if "://" in href:
f = urlopen(href)
else:
f = open(href, 'rb')
data = f.read()
f.close()
if not encoding:
encoding = 'utf-8'
data = data.decode(encoding)
return data
##
# Wrapper for ET compatibility - drops the parser
def _wrap_et_loader(loader):
def load(href, parse, encoding=None, parser=None):
return loader(href, parse, encoding)
return load
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
# @returns the node or its replacement if it was an XInclude node
def include(elem, loader=None, base_url=None):
if base_url is None:
if hasattr(elem, 'getroot'):
tree = elem
elem = elem.getroot()
else:
tree = elem.getroottree()
if hasattr(tree, 'docinfo'):
base_url = tree.docinfo.URL
elif hasattr(elem, 'getroot'):
elem = elem.getroot()
_include(elem, loader, base_url=base_url)
def _include(elem, loader=None, _parent_hrefs=None, base_url=None):
if loader is not None:
load_include = _wrap_et_loader(loader)
else:
load_include = _lxml_default_loader
if _parent_hrefs is None:
_parent_hrefs = set()
parser = elem.getroottree().parser
include_elements = list(
elem.iter('{http://www.w3.org/2001/XInclude}*'))
for e in include_elements:
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = urljoin(base_url, e.get("href"))
parse = e.get("parse", "xml")
parent = e.getparent()
if parse == "xml":
if href in _parent_hrefs:
raise FatalIncludeError(
"recursive include of %r detected" % href
)
_parent_hrefs.add(href)
node = load_include(href, parse, parser=parser)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = _include(node, loader, _parent_hrefs)
if e.tail:
node.tail = (node.tail or "") + e.tail
if parent is None:
return node # replaced the root node!
parent.replace(e, node)
elif parse == "text":
text = load_include(href, parse, encoding=e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
predecessor = e.getprevious()
if predecessor is not None:
predecessor.tail = (predecessor.tail or "") + text
elif parent is None:
return text # replaced the root node!
else:
parent.text = (parent.text or "") + text + (e.tail or "")
parent.remove(e)
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
parent = e.getparent()
if parent is not None and parent.tag != XINCLUDE_INCLUDE:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
raise FatalIncludeError(
"Invalid element found in XInclude namespace (%r)" % e.tag
)
return elem
| mit |
Habitissimo/vespapp-web | vespapp/settings.py | 1 | 4155 | """
Django settings for vespapp project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
from decouple import config
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@7xvv!(2p469*!j7d)g^tc-e*$spkkj+g#rly6m+b7%p3hift&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
BASE_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
VENDOR_APPS = (
'rest_framework',
)
MY_APPS = (
'web',
'api',
)
INSTALLED_APPS = BASE_APPS + VENDOR_APPS + MY_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vespapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vespapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if not DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config("DB_NAME"),
'USER': config("DB_USER"),
'PASSWORD': config("DB_PASSWORD", default='supersecret'),
'HOST': config("DB_HOST"),
'PORT': config("DB_PORT", default="5432"),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es-ES'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'staticfiles'),
)
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
MEDIA_URL = '/media/'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
}
| gpl-3.0 |
brandones/shell | docs/conf.py | 3 | 6993 | # -*- coding: utf-8 -*-
#
# shell documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 2 12:36:25 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'shell'
copyright = u'2013, Daniel Lindsley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'shelldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'shell.tex', u'shell Documentation',
u'Daniel Lindsley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'shell', u'shell Documentation',
[u'Daniel Lindsley'], 1)
]
| bsd-3-clause |
chongtianfeiyu/kbengine | kbe/src/lib/python/Lib/encodings/cp857.py | 272 | 33908 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp857',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: None, # UNDEFINED
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: 0x00d7, # MULTIPLICATION SIGN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: None, # UNDEFINED
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\xae' # 0x00a9 -> REGISTERED SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa9' # 0x00b8 -> COPYRIGHT SIGN
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\xa2' # 0x00bd -> CENT SIGN
'\xa5' # 0x00be -> YEN SIGN
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
'\ufffe' # 0x00d5 -> UNDEFINED
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\xa6' # 0x00dd -> BROKEN BAR
'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xb5' # 0x00e6 -> MICRO SIGN
'\ufffe' # 0x00e7 -> UNDEFINED
'\xd7' # 0x00e8 -> MULTIPLICATION SIGN
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xaf' # 0x00ee -> MACRON
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\ufffe' # 0x00f2 -> UNDEFINED
'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
'\xb6' # 0x00f4 -> PILCROW SIGN
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\xb7' # 0x00fa -> MIDDLE DOT
'\xb9' # 0x00fb -> SUPERSCRIPT ONE
'\xb3' # 0x00fc -> SUPERSCRIPT THREE
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x00e8, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| lgpl-3.0 |
wemanuel/smry | ee/ee_number.py | 9 | 1597 | #!/usr/bin/env python
"""A wrapper for numbers."""
import numbers
import apifunction
import computedobject
import ee_exception
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
class Number(computedobject.ComputedObject):
"""An object to represent numbers."""
_initialized = False
def __init__(self, number):
"""Construct a number wrapper.
This constuctor accepts the following args:
1) A bare number.
2) A ComputedObject returning a number.
Args:
number: The number to wrap.
"""
self.initialize()
if isinstance(number, numbers.Number):
super(Number, self).__init__(None, None)
self._number = number
elif isinstance(number, computedobject.ComputedObject):
super(Number, self).__init__(number.func, number.args, number.varName)
self._number = None
else:
raise ee_exception.EEException(
'Invalid argument specified for ee.Number(): %s' % number)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Number', 'Number')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
@staticmethod
def name():
return 'Number'
def encode(self, opt_encoder=None):
if isinstance(self._number, numbers.Number):
return self._number
else:
return super(Number, self).encode(opt_encoder)
| apache-2.0 |
slohse/ansible | lib/ansible/modules/cloud/vmware/vmware_host_service_facts.py | 22 | 4673 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_service_facts
short_description: Gathers facts about an ESXi host's services
description:
- This module can be used to gather facts about an ESXi host's services.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Service facts about each ESXi server will be returned for given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Service facts about this ESXi server will be returned.
- If C(cluster_name) is not given, this parameter is required.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather facts about all ESXi Host in given Cluster
vmware_host_service_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
delegate_to: localhost
register: cluster_host_services
- name: Gather facts about ESXi Host
vmware_host_service_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: host_services
'''
RETURN = r'''
host_service_facts:
description:
- dict with hostname as key and dict with host service config facts
returned: always
type: dict
sample: {
"10.76.33.226": [
{
"key": "DCUI",
"label": "Direct Console UI",
"policy": "on",
"required": false,
"running": true,
"uninstallable": false
},
{
"key": "TSM",
"label": "ESXi Shell",
"policy": "off",
"required": false,
"running": false,
"uninstallable": false
},
]
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VmwareServiceManager(PyVmomi):
def __init__(self, module):
super(VmwareServiceManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
def gather_host_facts(self):
hosts_facts = {}
for host in self.hosts:
host_service_facts = []
host_service_system = host.configManager.serviceSystem
if host_service_system:
services = host_service_system.serviceInfo.service
for service in services:
host_service_facts.append(
dict(
key=service.key,
label=service.label,
required=service.required,
uninstallable=service.uninstallable,
running=service.running,
policy=service.policy,
source_package_name=service.sourcePackage.sourcePackageName if service.sourcePackage else 'NA',
source_package_desc=service.sourcePackage.description if service.sourcePackage else 'NA',
)
)
hosts_facts[host.name] = host_service_facts
return hosts_facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
vmware_host_service_config = VmwareServiceManager(module)
module.exit_json(changed=False, host_service_facts=vmware_host_service_config.gather_host_facts())
if __name__ == "__main__":
main()
| gpl-3.0 |
iam-TJ/node-gyp | gyp/test/configurations/invalid/gyptest-configurations.py | 57 | 1029 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
test = TestGyp.TestGyp()
if test.format == 'scons':
test.skip_test('TODO: http://code.google.com/p/gyp/issues/detail?id=176\n')
for test_key in invalid_configuration_keys:
test.run_gyp('%s.gyp' % test_key, status=1, stderr=None)
expect = ['%s not allowed in the Debug configuration, found in target '
'%s.gyp:configurations#target' % (test_key, test_key)]
test.must_contain_all_lines(test.stderr(), expect)
test.pass_test()
| mit |
masschallenge/django-accelerator | accelerator/tests/contexts/judge_feedback_context.py | 1 | 13749 | from accelerator_abstract.models import (
FORM_ELEM_FEEDBACK_TO_MC,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_OVERALL_RECOMMENDATION,
)
from accelerator.models import (
ACTIVE_PROGRAM_STATUS,
ASSIGNED_PANEL_ASSIGNMENT_STATUS,
COMPLETE_PANEL_ASSIGNMENT_STATUS,
FEEDBACK_DISPLAY_DISABLED as DISABLED,
FEEDBACK_DISPLAY_ENABLED as ENABLED,
IN_PERSON_JUDGING_ROUND_TYPE,
ONLINE_JUDGING_ROUND_TYPE,
JUDGING_FEEDBACK_STATUS_INCOMPLETE as INCOMPLETE,
PREVIEW_PANEL_STATUS,
SUBMITTED_APP_STATUS,
UserRole,
)
from accelerator.tests.factories import (
ApplicationAnswerFactory,
ApplicationFactory,
ApplicationPanelAssignmentFactory,
ExpertFactory,
JudgeApplicationFeedbackFactory,
JudgeFeedbackComponentFactory,
JudgePanelAssignmentFactory,
JudgeRoundCommitmentFactory,
JudgingFormElementFactory,
PanelFactory,
ProgramCycleFactory,
ProgramRoleFactory,
ProgramRoleGrantFactory,
ScenarioFactory,
StartupCycleInterestFactory,
StartupProgramInterestFactory,
)
from accelerator.tests.contexts.context_utils import get_user_role_by_name
from .judging_round_context import JudgingRoundContext
ELEMENT_NAMES = [
FORM_ELEM_OVERALL_RECOMMENDATION,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_FEEDBACK_TO_MC,
]
_round_type = {True: ONLINE_JUDGING_ROUND_TYPE,
False: IN_PERSON_JUDGING_ROUND_TYPE}
class JudgeFeedbackContext:
def __init__(self,
application=None,
num_components=1,
complete=True,
panel_status=PREVIEW_PANEL_STATUS,
display_feedback=False,
merge_feedback_with=None,
cycle_based_round=False,
online_round=True,
is_active=True,
judge_capacity=10,
program_status=ACTIVE_PROGRAM_STATUS):
self.judging_capacity = 0
if application:
self.application = application
self.cycle = application.cycle
else:
self.cycle = ProgramCycleFactory()
self.application = ApplicationFactory(
application_status=SUBMITTED_APP_STATUS,
application_type=self.cycle.default_application_type,
cycle=self.cycle)
self.application_type = self.application.application_type
self.applications = [self.application]
self.startup = self.application.startup
self.industry = self.startup.primary_industry
feedback_display = ENABLED if display_feedback else DISABLED
jr_kwargs = {
'program__cycle': self.cycle,
'round_type': _round_type[online_round],
'feedback_display': feedback_display,
'cycle_based_round': cycle_based_round,
'application_type': self.application_type,
'is_active': False,
'program__program_status': program_status,
}
if merge_feedback_with:
jr_kwargs['feedback_merge_with'] = merge_feedback_with
self.judging_round = JudgingRoundContext(**jr_kwargs).judging_round
self.program = self.judging_round.program
self.panel = PanelFactory(status=panel_status,
panel_time__judging_round=self.judging_round)
self.scenario = ScenarioFactory(judging_round=self.judging_round)
user_role = get_user_role_by_name(UserRole.JUDGE)
self.judge_role = ProgramRoleFactory(program=self.program,
user_role=user_role)
self.judges = []
self.judge = self.add_judge(complete=complete,
capacity=judge_capacity)
self.feedback = JudgeApplicationFeedbackFactory(
judge=self.judge,
application=self.application,
panel=self.panel,
form_type=self.judging_round.judging_form)
self.judging_form = self.feedback.form_type
self.application_assignment = ApplicationPanelAssignmentFactory(
application=self.application,
panel=self.panel,
scenario=self.scenario)
cycle_interest = StartupCycleInterestFactory(cycle=self.program.cycle,
startup=self.startup)
StartupProgramInterestFactory(program=self.program,
startup=self.startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
self.components = []
self.elements = []
self.application_questions = []
self.application_answers = []
for element_name in ELEMENT_NAMES:
self.add_component(element_name=element_name)
if complete:
self.feedback.save()
for _ in range(num_components):
self.add_component()
else:
for _ in range(num_components):
self.add_element()
self.judging_round.is_active = is_active
self.judging_round.save()
def add_application_answer(self, question=None, answer_text=None):
question = question or self.application_questions[0]
kwargs = {"application_question": question,
"application": self.application}
if answer_text:
kwargs["answer_text"] = answer_text
app_answer = ApplicationAnswerFactory(**kwargs)
self.application_answers.append(app_answer)
return app_answer
def add_component(self, element_name=None,
feedback_element=None,
add_answer=True,
answer_text=None):
factory_params = {
"judge_feedback": self.feedback, }
if feedback_element is None:
app_type_key = "__".join(["feedback_element",
"application_question",
"application_type"])
factory_params.update(
{
"feedback_element__form_type": self.judging_form,
"feedback_element__element_type": "feedback",
"feedback_element__mandatory": True,
"feedback_element__sharing": "share-with-startup",
app_type_key: self.application_type}
)
if element_name:
factory_params['feedback_element__element_name'] = element_name
else:
factory_params.update({"feedback_element": feedback_element})
if answer_text:
factory_params["answer_text"] = answer_text
component = JudgeFeedbackComponentFactory(
**factory_params)
self.components.append(component)
question = component.feedback_element.application_question
self.application_questions.append(question)
if add_answer:
app_answer = ApplicationAnswerFactory(
application_question=question,
application=self.application)
self.application_answers.append(app_answer)
if feedback_element is None:
self.elements.append(component.feedback_element)
self.feedback.save()
return component
def add_element(self,
feedback_type="",
element_type="feedback",
choice_layout="",
mandatory=True,
text_minimum=0,
text_minimum_units="",
answer_text=None,
text_limit=0,
text_limit_units=""):
element = JudgingFormElementFactory(
form_type=self.judging_form,
mandatory=mandatory,
element_type=element_type,
feedback_type=feedback_type,
choice_layout=choice_layout,
sharing="share-with-startup",
application_question__application_type=self.application_type,
text_minimum=text_minimum,
text_minimum_units=text_minimum_units,
text_limit=text_limit,
text_limit_units=text_limit_units,
)
application_question = element.application_question
self.application_questions.append(application_question)
answer_kwargs = {"application_question": application_question,
"application": self.application}
if answer_text:
answer_kwargs["answer_text"] = answer_text
application_answer = ApplicationAnswerFactory(**answer_kwargs)
self.application_answers.append(application_answer)
self.elements.append(element)
self.feedback.save()
return element
def add_extra_scenario(self):
return ScenarioFactory(judging_round=self.judging_round)
def add_panel(self):
return PanelFactory(
panel_time__judging_round=self.judging_round,
panel_type__judging_round=self.judging_round,
location__judging_round=self.judging_round)
def add_assignment(self,
judge=None,
panel=None,
scenario=None):
scenario = scenario or self.scenario
judge = judge or self.judge
panel = panel or self.panel
return JudgePanelAssignmentFactory(
judge=judge,
panel=panel,
scenario=scenario)
def add_feedback(self,
application=None,
judge=None,
panel=None,
feedback_status=INCOMPLETE):
judge = judge or self.judge
application = application or self.application
panel = panel or self.panel
if not panel.applicationpanelassignment_set.filter(
application=application).exists():
ApplicationPanelAssignmentFactory(
application=application,
panel=panel,
scenario=self.scenario)
return JudgeApplicationFeedbackFactory(
feedback_status=feedback_status,
judge=judge,
application=application,
panel=panel,
form_type=self.judging_round.judging_form)
def add_application(self,
application=None,
field=None,
option=None,
program=None):
program = program or self.program
if application is None:
fields = {
"application_status": SUBMITTED_APP_STATUS,
"application_type": self.application_type,
}
if field:
fields[field] = option
application = ApplicationFactory(**fields)
self.applications.append(application)
startup = application.startup
cycle_interest = StartupCycleInterestFactory(cycle=program.cycle,
startup=startup)
StartupProgramInterestFactory(program=program,
startup=startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
return application
def add_applications(self, count, field=None, options=[], programs=[]):
result = []
option_count = len(options)
option = None
program_count = len(programs)
program = None
for i in range(count):
if option_count > 0:
option = options[i % option_count]
if program_count > 0:
program = programs[i % program_count]
result.append(self.add_application(field=field,
option=option,
program=program))
return result
def add_judge(self,
assigned=True,
complete=True,
judge=None,
panel=None,
capacity=10):
if judge is None:
judge = ExpertFactory(
profile__primary_industry=self.industry,
profile__home_program_family=self.program.program_family)
ProgramRoleGrantFactory(person=judge, program_role=self.judge_role)
self.judging_round.confirmed_judge_label.users.add(judge)
JudgeRoundCommitmentFactory(judging_round=self.judging_round,
judge=judge,
capacity=10,
commitment_state=True)
self.judging_capacity += capacity
if assigned:
if complete:
status = COMPLETE_PANEL_ASSIGNMENT_STATUS
else:
status = ASSIGNED_PANEL_ASSIGNMENT_STATUS
JudgePanelAssignmentFactory(
judge=judge,
assignment_status=status,
panel=panel or self.panel,
scenario=self.scenario)
self.judges.append(judge)
return judge
@classmethod
def create_batch(cls, qty, *args, **kwargs):
if 'merge_feedback' in kwargs:
merge_feedback = kwargs.pop('merge_feedback')
else:
merge_feedback = False
contexts = [cls(*args, **kwargs)]
if merge_feedback:
kwargs['merge_feedback_with'] = contexts[0].judging_round
for _ in range(1, qty):
contexts.append(cls(*args, **kwargs))
return contexts
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.5.0/Lib/test/test_json/test_encode_basestring_ascii.py | 19 | 2266 | from collections import OrderedDict
from test.test_json import PyTest, CTest
from test.support import bigaddrspacetest
CASES = [
('/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
('controls', '"controls"'),
('\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
('{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(' s p a c e d ', '" s p a c e d "'),
('\U0001d120', '"\\ud834\\udd20"'),
('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
("`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
('\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBasestringAscii:
def test_encode_basestring_ascii(self):
fname = self.json.encoder.encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = self.json.encoder.encode_basestring_ascii(input_string)
self.assertEqual(result, expect,
'{0!r} != {1!r} for {2}({3!r})'.format(
result, expect, fname, input_string))
def test_ordered_dict(self):
# See issue 6105
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = self.dumps(OrderedDict(items))
self.assertEqual(s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
def test_sorted_dict(self):
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = self.dumps(dict(items), sort_keys=True)
self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}')
class TestPyEncodeBasestringAscii(TestEncodeBasestringAscii, PyTest): pass
class TestCEncodeBasestringAscii(TestEncodeBasestringAscii, CTest):
@bigaddrspacetest
def test_overflow(self):
size = (2**32)//6 + 1
s = "\x00"*size
with self.assertRaises(OverflowError):
self.json.encoder.encode_basestring_ascii(s)
| mit |
jeffrey4l/nova | nova/tests/unit/virt/hyperv/test_vmutilsv2.py | 22 | 11681 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_vmutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutilsv2
class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
"""Unit tests for the Hyper-V VMUtilsV2 class."""
_DEFINE_SYSTEM = 'DefineSystem'
_DESTROY_SYSTEM = 'DestroySystem'
_DESTROY_SNAPSHOT = 'DestroySnapshot'
_ADD_RESOURCE = 'AddResourceSettings'
_REMOVE_RESOURCE = 'RemoveResourceSettings'
_SETTING_TYPE = 'VirtualSystemType'
_VM_GEN = constants.VM_GEN_2
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
def setUp(self):
super(VMUtilsV2TestCase, self).setUp()
self._vmutils = vmutilsv2.VMUtilsV2()
self._vmutils._conn = mock.MagicMock()
def test_create_vm(self):
super(VMUtilsV2TestCase, self).test_create_vm()
mock_vssd = self._vmutils._conn.Msvm_VirtualSystemSettingData.new()
self.assertEqual(self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2,
mock_vssd.VirtualSystemSubType)
self.assertFalse(mock_vssd.SecureBootEnabled)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyResourceSettings.assert_called_with(
ResourceSettings=[self._FAKE_RES_DATA])
@mock.patch.object(vmutilsv2, 'wmi', create=True)
@mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateSnapshot.assert_called_with(
AffectedSystem=self._FAKE_VM_PATH,
SnapshotType=self._vmutils._SNAPSHOT_FULL)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
@mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
mock_add_virt_res):
self._lookup_vm()
fake_eth_port = mock_get_new_sd.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
metric_def = mock.MagicMock()
mock_disk = mock.MagicMock()
mock_disk.path_.return_value = self._FAKE_RES_PATH
mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
fake_metric_def_paths = ['fake_0', 'fake_0', None]
fake_metric_resource_paths = [self._FAKE_VM_PATH,
self._FAKE_VM_PATH,
self._FAKE_RES_PATH]
metric_def.path_.side_effect = fake_metric_def_paths
self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
metric_def]
self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
calls = [mock.call(Name=def_name)
for def_name in [self._vmutils._METRIC_AGGR_CPU_AVG,
self._vmutils._METRIC_AGGR_MEMORY_AVG]]
self._vmutils._conn.CIM_BaseMetricDefinition.assert_has_calls(calls)
calls = []
for i in range(len(fake_metric_def_paths)):
calls.append(mock.call(
Subject=fake_metric_resource_paths[i],
Definition=fake_metric_def_paths[i],
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH])
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
vs.configure_mock(**attrs)
vs2 = mock.MagicMock(ElementName='fake_name2', Notes=None)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs,
vs2]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
vm_path, dynamic_memory_ratio=1.0):
mock_vs_man_svc = mock.MagicMock()
mock_vs_data = mock.MagicMock()
mock_job = mock.MagicMock()
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
fake_vm_name = 'fake_vm_name'
_conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
mock_check_ret_val.return_value = mock_job
_conn.new.return_value = mock_vs_data
mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
vm_path,
fake_ret_val)
mock_job.associators.return_value = ['fake vm path']
response = self._vmutils._create_vm_obj(
vs_man_svc=mock_vs_man_svc,
vm_name=fake_vm_name,
vm_gen='fake vm gen',
notes='fake notes',
dynamic_memory_ratio=dynamic_memory_ratio,
instance_path=mock.sentinel.instance_path)
if not vm_path:
mock_job.associators.assert_called_once_with(
self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_data.ElementName, fake_vm_name)
mock_vs_man_svc.DefineSystem.assert_called_once_with(
ResourceSettings=[], ReferenceConfiguration=None,
SystemSettings=mock_vs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
if dynamic_memory_ratio > 1:
self.assertFalse(mock_vs_data.VirtualNumaEnabled)
mock_get_wmi_obj.assert_called_with('fake vm path')
self.assertEqual(mock_vs_data.Notes, 'fake notes')
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.ConfigurationDataRoot)
self.assertEqual(mock.sentinel.instance_path, mock_vs_data.LogDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.SnapshotDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.SuspendDataRoot)
self.assertEqual(mock.sentinel.instance_path,
mock_vs_data.SwapFileDataRoot)
self.assertEqual(response, mock_get_wmi_obj())
def test_create_vm_obj(self):
self._test_create_vm_obj(vm_path='fake vm path')
def test_create_vm_obj_no_vm_path(self):
self._test_create_vm_obj(vm_path=None)
def test_create_vm_obj_dynamic_memory(self):
self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s' OR "
"ResourceSubType='%(res_sub_type_dvd)s') AND "
"Parent = '%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"res_sub_type_dvd":
self._vmutils._DVD_DRIVE_RES_SUB_TYPE,
"parent": mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)
def test_get_vm_dvd_disk_paths(self):
mock_vm = self._lookup_vm()
mock_sasd1 = mock.MagicMock(
ResourceSubType=self._vmutils._DVD_DISK_RES_SUB_TYPE,
HostResource=[mock.sentinel.FAKE_DVD_PATH1])
mock_settings = mock.MagicMock()
mock_settings.associators.return_value = [mock_sasd1]
mock_vm.associators.return_value = [mock_settings]
ret_val = self._vmutils.get_vm_dvd_disk_paths(self._FAKE_VM_NAME)
self.assertEqual(mock.sentinel.FAKE_DVD_PATH1, ret_val[0])
| apache-2.0 |
mapennell/ansible | plugins/inventory/windows_azure.py | 129 | 8636 | #!/usr/bin/env python
'''
Windows Azure external inventory script
=======================================
Generates inventory that Ansible can understand by making API request to
Windows Azure using the azure python library.
NOTE: This script assumes Ansible is being executed where azure is already
installed.
pip install azure
Adapted from the ansible Linode plugin by Dan Slimmon.
'''
# (c) 2013, John Whitbeck
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# Standard imports
import re
import sys
import argparse
import os
from urlparse import urlparse
from time import time
try:
import json
except ImportError:
import simplejson as json
try:
import azure
from azure import WindowsAzureError
from azure.servicemanagement import ServiceManagementService
except ImportError as e:
print "failed=True msg='`azure` library required for this script'"
sys.exit(1)
# Imports for ansible
import ConfigParser
class AzureInventory(object):
def __init__(self):
"""Main execution path."""
# Inventory grouped by display group
self.inventory = {}
# Index of deployment name -> host
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.read_environment()
self.parse_cli_args()
# Initialize Azure ServiceManagementService
self.sms = ServiceManagementService(self.subscription_id, self.cert_path)
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
if self.args.list_images:
data_to_print = self.json_format_dict(self.get_images(), True)
elif self.args.list:
# Display list of nodes for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def get_images(self):
images = []
for image in self.sms.list_os_images():
if str(image.label).lower().find(self.args.list_images.lower()) >= 0:
images.append(vars(image))
return json.loads(json.dumps(images, default=lambda o: o.__dict__))
def is_cache_valid(self):
"""Determines if the cache file has expired, or if it is still valid."""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
"""Reads the settings from the .ini file."""
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini')
# Credentials related
if config.has_option('azure', 'subscription_id'):
self.subscription_id = config.get('azure', 'subscription_id')
if config.has_option('azure', 'cert_path'):
self.cert_path = config.get('azure', 'cert_path')
# Cache related
if config.has_option('azure', 'cache_path'):
cache_path = config.get('azure', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-azure.cache"
self.cache_path_index = cache_path + "/ansible-azure.index"
if config.has_option('azure', 'cache_max_age'):
self.cache_max_age = config.getint('azure', 'cache_max_age')
def read_environment(self):
''' Reads the settings from environment variables '''
# Credentials
if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH")
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Azure')
parser.add_argument('--list', action='store_true', default=True,
help='List nodes (default: True)')
parser.add_argument('--list-images', action='store',
help='Get all available images.')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Azure (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
"""Do API calls, and save data in cache files."""
self.add_cloud_services()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def add_cloud_services(self):
"""Makes an Azure API call to get the list of cloud services."""
try:
for cloud_service in self.sms.list_hosted_services():
self.add_deployments(cloud_service)
except WindowsAzureError as e:
print "Looks like Azure's API is down:"
print
print e
sys.exit(1)
def add_deployments(self, cloud_service):
"""Makes an Azure API call to get the list of virtual machines associated with a cloud service"""
try:
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
if deployment.deployment_slot == "Production":
self.add_deployment(cloud_service, deployment)
except WindowsAzureError as e:
print "Looks like Azure's API is down:"
print
print e
sys.exit(1)
def add_deployment(self, cloud_service, deployment):
"""Adds a deployment to the inventory and index"""
dest = urlparse(deployment.url).hostname
# Add to index
self.index[dest] = deployment.name
# List of all azure deployments
self.push(self.inventory, "azure", dest)
# Inventory: Group by service name
self.push(self.inventory, self.to_safe(cloud_service.service_name), dest)
# Inventory: Group by region
self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), dest)
def push(self, my_dict, key, element):
"""Pushed an element onto an array that may not have been defined in the dict."""
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
"""Reads the inventory from the cache file and returns it as a JSON object."""
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
"""Reads the index from the cache file and sets self.index."""
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
"""Writes data in JSON format to a file."""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
"""Escapes any characters that would be invalid in an ansible group name."""
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string."""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
AzureInventory()
| gpl-3.0 |
samanehsan/osf.io | tests/webtest_tests.py | 5 | 35201 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import httplib as http
import logging
import mock
import re
import unittest
import markupsafe
from nose.tools import * # flake8: noqa (PEP8 asserts)
from framework.mongo.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase, fake
from tests.factories import (UserFactory, AuthUserFactory, ProjectFactory,
WatchConfigFactory,
NodeFactory, NodeWikiFactory, RegistrationFactory,
UnregUserFactory, UnconfirmedUserFactory,
PrivateLinkFactory)
from tests.test_features import requires_piwik
from website import settings, language
from website.security import random_string
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project.model import ensure_schemas
from website.util import web_url_for, api_url_for
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('/').maybe_follow() # Redirects
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
# Goes to homepage, already logged in
res = self.app.get('/', auth=self.user.auth).follow(auth=self.user.auth)
# Clicks Dashboard link in navbar
res = res.click('My Dashboard', index=0, auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
res = self.app.get('/{0}/settings/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
@unittest.skip("Can't test this, since logs are dynamically loaded")
def test_sees_log_events_on_watched_projects(self):
# Another user has a public project
u2 = UserFactory(username='bono@u2.com', fullname='Bono')
project = ProjectFactory(creator=u2, is_public=True)
project.add_contributor(u2)
auth = Auth(user=u2)
project.save()
# User watches the project
watch_config = WatchConfigFactory(node=project)
self.user.watch(watch_config)
self.user.save()
# Goes to her dashboard, already logged in
res = self.app.get('/dashboard/', auth=self.auth, auto_follow=True)
# Sees logs for the watched project
assert_in('Watched Projects', res) # Watched Projects header
# The log action is in the feed
assert_in(project.title, res)
def test_sees_correct_title_home_page(self):
# User goes to homepage
res = self.app.get('/', auto_follow=True)
title = res.html.title.string
# page title is correct
assert_equal('OSF | Home', title)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/dashboard/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | Dashboard', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('No wiki content', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page = 'home'
wiki_content = 'Kittens'
NodeWikiFactory(user=self.user, node=project, content=wiki_content, page_name=wiki_page)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page,
), auth=self.auth)
assert_not_in('No wiki content', res)
assert_in(wiki_content, res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
non_ascii
), auth=self.auth, expect_errors=True)
project.update_node_wiki(non_ascii, 'new content', Auth(self.user))
assert_in(non_ascii, project.wiki_pages_current)
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('No wiki content', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('No wiki content', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1320
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_reset_password(self, mock_send_mail):
# A registered user
user = UserFactory()
# goes to the login page
url = web_url_for('forgot_password_get')
res = self.app.get(url)
# and fills out forgot password form
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = user.username
# submits
res = form.submit()
# mail was sent
mock_send_mail.assert_called
# gets 200 response
assert_equal(res.status_code, 200)
# URL is /forgotpassword
assert_equal(res.request.path, web_url_for('forgot_password_post'))
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_can_create_component_from_a_project(self):
res = self.app.get(self.project.url, auth=self.user.auth).maybe_follow()
assert_in('Add Component', res)
def test_can_create_component_from_a_component(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
assert_in('Add Component', res)
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Configure Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Configure commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.append(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in("Anonymous Contributors", res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.append(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': "not_valid"},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
# FIXME: These affect search in development environment. So need to migrate solr after running.
# # Remove this side effect.
@unittest.skipIf(not settings.SEARCH_ENGINE, 'Skipping because search is disabled')
class TestSearching(OsfTestCase):
'''Test searching using the search bar. NOTE: These may affect the
Solr database. May need to migrate after running these.
'''
def setUp(self):
super(TestSearching, self).setUp()
import website.search.search as search
search.delete_all()
self.user = AuthUserFactory()
self.auth = self.user.auth
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_user_from_home_page(self):
user = UserFactory()
# Goes to home page
res = self.app.get('/').maybe_follow()
# Fills search form
form = res.forms['searchBar']
form['q'] = user.fullname
res = form.submit().maybe_follow()
# The username shows as a search result
assert_in(user.fullname, res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_project_from_home_page(self):
project = ProjectFactory(title='Foobar Project', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
project.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the project is shown as a result
assert_in('Foobar Project', res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_component_from_home_page(self):
component = NodeFactory(title='Foobar Component', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
component.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the component is shown as a result
assert_in('Foobar Component', res)
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(category='hypothesis', creator=self.user)
self.project.nodes.append(self.component)
self.component.save()
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = NodeWikiFactory(user=self.user, node=self.component)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
@requires_piwik
class TestPiwik(OsfTestCase):
def setUp(self):
super(TestPiwik, self).setUp()
self.users = [
AuthUserFactory()
for _ in range(3)
]
self.consolidate_auth = Auth(user=self.users[0])
self.project = ProjectFactory(creator=self.users[0], is_public=True)
self.project.add_contributor(contributor=self.users[1])
self.project.save()
def test_contains_iframe_and_src(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[0].auth
).maybe_follow()
assert_in('iframe', res)
assert_in('src', res)
assert_in(settings.PIWIK_HOST, res)
def test_anonymous_no_token(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[2].auth
).maybe_follow()
assert_in('token_auth=anonymous', res)
def test_contributor_token(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[1].auth
).maybe_follow()
assert_in(self.users[1].piwik_token, res)
def test_no_user_token(self):
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key)
).maybe_follow()
assert_in('token_auth=anonymous', res)
def test_private_alert(self):
self.project.set_privacy('private', auth=self.consolidate_auth)
self.project.save()
res = self.app.get(
'/{0}/statistics/'.format(self.project._primary_key),
auth=self.users[0].auth
).maybe_follow().normal_body
assert_in(
'Usage statistics are collected only for public resources.',
res
)
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake.email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake.email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake.email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip("as long as E-mails cannot be changed")
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
user1.emails.append(email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'password'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'password'
res = form.submit(auth=reg_user.auth).follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user._primary_key, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user._primary_key, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
class TestExplorePublicActivity(OsfTestCase):
def setUp(self):
super(TestExplorePublicActivity, self).setUp()
self.project = ProjectFactory(is_public=True)
self.registration = RegistrationFactory(project=self.project)
self.private_project = ProjectFactory(title="Test private project")
def test_newest_public_project_and_registrations_show_in_explore_activity(self):
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_in(str(self.project.title), res)
assert_in(str(self.project.date_created.date()), res)
assert_in(str(self.registration.title), res)
assert_in(str(self.registration.registered_date.date()), res)
assert_not_in(str(self.private_project.title), res)
class TestForgotAndResetPasswordViews(OsfTestCase):
def setUp(self):
super(TestForgotAndResetPasswordViews, self).setUp()
self.user = AuthUserFactory()
self.key = random_string(20)
# manually set verifification key
self.user.verification_key = self.key
self.user.save()
self.url = web_url_for('reset_password', verification_key=self.key)
def test_reset_password_view_returns_200(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
def test_can_reset_password_if_form_success(self):
res = self.app.get(self.url)
form = res.forms['resetPasswordForm']
form['password'] = 'newpassword'
form['password2'] = 'newpassword'
res = form.submit()
# password was updated
self.user.reload()
assert_true(self.user.check_password('newpassword'))
@unittest.skip('TODO: Get this working with CAS setup')
def test_reset_password_logs_out_user(self):
another_user = AuthUserFactory()
# visits reset password link while another user is logged in
res = self.app.get(self.url, auth=another_user.auth)
assert_equal(res.status_code, 200)
# We check if another_user is logged in by checking if
# their full name appears on the page (it should be in the navbar).
# Yes, this is brittle.
assert_not_in(another_user.fullname, res)
# make sure the form is on the page
assert_true(res.forms['resetPasswordForm'])
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, project=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._primary_key)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_strftime.py | 56 | 6967 | """
Unittest for time.strftime
"""
import calendar
import sys
import os
import re
from test import test_support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace('\%', '%')
new_text = new_text.replace('\:', ':')
new_text = new_text.replace('\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def __init__(self, *k, **kw):
unittest.TestCase.__init__(self, *k, **kw)
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
import locale
locale.setlocale(locale.LC_TIME, 'C')
def test_strftime(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if test_support.verbose:
print "Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0])
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if test_support.verbose:
print "strftime test for", time.ctime(now)
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError, error:
print "Standard '%s' format gaver error:" % (e[0], error)
continue
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
print "Does not support standard '%s' format (%s)" % \
(e[0], e[2])
else:
print "Conflict for %s (%s):" % (e[0], e[2])
print " Expected %s, but got %s" % (e[1], result)
def strftest2(self, now):
nowsecs = str(long(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError, result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if test_support.verbose:
print msg
continue
if re.match(escapestr(e[1], self.ampm), result):
if test_support.verbose:
print "Supports nonstandard '%s' format (%s)" % (e[0], e[2])
elif not result or result[0] == '%':
if test_support.verbose:
print "Does not appear to support '%s' format (%s)" % \
(e[0], e[2])
else:
if test_support.verbose:
print "Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2])
print " Expected %s, but got %s" % (e[1], result)
def test_main():
test_support.run_unittest(StrftimeTest)
if __name__ == '__main__':
test_main()
| apache-2.0 |
ingadhoc/odoo-addons | stock_transfer_restrict_lot/__openerp__.py | 4 | 1586 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Transfer Restrict Lot',
'version': '8.0.1.0.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Transfer Restrict Lot
===========================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'stock',
],
'data': [
'stock_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
doomsterinc/odoo | addons/account/wizard/account_report_general_ledger.py | 267 | 3191 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_report_general_ledger(osv.osv_memory):
_inherit = "account.common.account.report"
_name = "account.report.general.ledger"
_description = "General Ledger Report"
_columns = {
'landscape': fields.boolean("Landscape Mode"),
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'sortby': fields.selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], 'Sort by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_report_general_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'landscape': True,
'amount_currency': True,
'sortby': 'sort_date',
'initial_balance': False,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear=False, context=None):
res = {}
if not fiscalyear:
res['value'] = {'initial_balance': False}
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0])
if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record
data['form'].update({'initial_balance': False})
if data['form']['landscape'] is False:
data['form'].pop('landscape')
else:
context['landscape'] = data['form']['landscape']
return self.pool['report'].get_action(cr, uid, [], 'account.report_generalledger', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tomcounsell/Cobra | apps/public/urls.py | 2 | 2201 | from django.conf.urls import url
from views import home, checkout, product, store, custom_order, cart, manual_checkout
urlpatterns = [
#homepage url defined in anou/urls.py
url(r'^load_products$', home.loadProducts, name='load products'),
url(r'^about$', home.about, name='about'),
# PRODUCT PAGE at /product/123-slug
url(r'^product/(?P<product_id>\d+)-(?P<slug>\S+)$', product.home,
name='product_w_slug'),
url(r'^product/(?P<product_id>\d+)$', product.home, name='product'),
# COMMISSIONS (COMMANDS / CUSTOM ORDERS)
url(r'^product/custom_order_estimate$', custom_order.estimate,
name='custom order estimate'),
url(r'^product/custom_order_request$', custom_order.request,
name='custom order request'),
# STORE PAGE at /store/123 represents a seller profile
url(r'^store/(?P<seller_id>\d+)-(?P<slug>\S+)', store.home,
name='store_w_slug'),
url(r'^store/(?P<seller_id>\d+)$', store.home, name='store'),
# CHECKOUT PAGES
url(r'^checkout/cart$', cart.cart, name='cart'),
url(r'^checkout/cart-add/(?P<product_id>\d+)$',
cart.cartAdd, name='cart add'),
url(r'^checkout/cart-remove/(?P<product_id>\d+)$',
cart.cartRemove, name='cart remove'),
url(r'^checkout/ajax/cart_save$', cart.cartSave, name='cart save'),
url(r'^checkout/stripe_checkout$',
checkout.stripe_checkout, name='stripe checkout'),
url(r'^checkout/confirmation/(?P<checkout_id>\w+)?$',
checkout.confirmation, name='confirmation'),
# ADMIN MANUAL ORDER CHECKOUT
url(r'^checkout/ajax/admin_create_checkout$',
manual_checkout.createCheckout, name='manual checkout create'),
url(r'^checkout/manual_checkout/(?P<checkout_public_id>\w+)$',
manual_checkout.editCheckout, name='manual checkout edit'),
url(r'^checkout/ajax/save_checkout',
manual_checkout.saveCheckout, name='manual checkout save'),
#temporary
url(r'^commonthread$', home.commonthread, name='commonthread'),
url(r'^commonthread/buy/(?P<rug_name>\S+)$', home.commonthreadAddToCart,
name='buy commonthread rug'),
#testing
url(r'^product_data', product.product_data, name='product_data'),
url(r'^test_meta$', home.test_meta, name='test meta'),
]
| gpl-2.0 |
peterayeni/libforensics | code/lf/win/guid.py | 13 | 1647 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for working with UUIDs (Deprecated)."""
# stdlib imports
from uuid import UUID
__docformat__ = "restructuredtext en"
__all__ = [
"guid_to_uuid"
]
def guid_to_uuid(data1, data2, data3, data4):
"""
Converts a Microsoft GUID to Python UUID object (Deprecated).
:parameters:
data1
The first 8 hexadecimal digits (32 bits)
data2
The first group of 4 hexadecimal digits (16 bits)
data3
The second group of 4 hexadecimal digits (16 bits)
data4
An iterable of 8 bytes, of the last hexadecimal digits.
:rtype: UUID
:returns: A Python UUID object that matches the GUID.
"""
node = (data4[2] << 40) | (data4[3] << 32) | (data4[4] << 24)
node = node | (data4[5] << 16) | (data4[6] << 8) | data4[7]
return UUID(fields=(data1, data2, data3, data4[0], data4[1], node))
# end def guid_to_uuid
| lgpl-3.0 |
superbatlc/dtailweb | phonegroups/migrations/0001_initial.py | 1 | 1870 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('systems', '0001_initial'),
('calls', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Phonegroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name=b'Nome')),
('code', models.CharField(max_length=10, verbose_name=b'Codice')),
('parent', models.ForeignKey(related_name='child_phonegroup_set', blank=True, to='phonegroups.Phonegroup', help_text=b'The father of this group', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupCall',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('call', models.ForeignKey(to='calls.Call')),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupExtension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('extension', models.CharField(max_length=4)),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
('system', models.ForeignKey(to='systems.System')),
],
options={
},
bases=(models.Model,),
),
]
| gpl-2.0 |
hiraditya/fool | tensorflow/scaling-up-ml-using-cmle.py | 1 | 6449 | '''
In this lab, you will learn how to:
Package up TensorFlow model
Run training locally
Run training on cloud
Deploy model to cloud
Invoke model to carry out predictions
'''
'''
Scaling up ML using Cloud ML Engine
In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud MLE. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates how to package up a TensorFlow model to run it within Cloud ML.
Later in the course, we will look at ways to make a more effective machine learning model.
Environment variables for project and bucket
Note that:
Your project id is the unique string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: Project ID: cloud-training-demos
Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket.
Change the cell below to reflect your Project ID and bucket name.
'''
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7' # Tensorflow version
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdin); \
print response['serviceAccount']")
echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET"
gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET
gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored
gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET
'''
Packaging up the code
Take your code and put into a standard Python package structure. model.py and task.py contain the Tensorflow code from earlier (explore the directory structure).
'''
!find taxifare
!cat taxifare/trainer/model.py
'''
Find absolute paths to your data
Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you
'''
%bash
echo $PWD
rm -rf $PWD/taxi_trained
head -1 $PWD/taxi-train.csv
head -1 $PWD/taxi-valid.csv
'''
Running the Python module from the command-line
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths="${PWD}/taxi-train*" \
--eval_data_paths=${PWD}/taxi-valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=1000 --job-dir=./tmp
%bash
ls $PWD/taxi_trained/export/exporter/
%writefile ./test.json
{"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2}
%bash
model_dir=$(ls ${PWD}/taxi_trained/export/exporter)
gcloud ml-engine local predict \
--model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \
--json-instances=./test.json
'''
Running locally using gcloud
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
-- \
--train_data_paths=${PWD}/taxi-train.csv \
--eval_data_paths=${PWD}/taxi-valid.csv \
--train_steps=1000 \
--output_dir=${PWD}/taxi_trained
'''
When I ran it (due to random seeds, your results will be different), the average_loss (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13.
'''
from google.datalab.ml import TensorBoard
TensorBoard().start('./taxi_trained')
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
'''
If the above step (to stop TensorBoard) appears stalled, just move on to the next step. You don't need to wait for it to return.
'''
!ls $PWD/taxi_trained
'''
Submit training job using gcloud
First copy the training data to the cloud. Then, launch a training job.
After you submit the job, go to the cloud console (http://console.cloud.google.com) and select Machine Learning | Jobs to monitor progress.
Note: Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job.
https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction
'''
%bash
echo $BUCKET
gsutil -m rm -rf gs://${BUCKET}/taxifare/smallinput/
gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/taxifare/smallinput/
%%bash
OUTDIR=gs://${BUCKET}/taxifare/smallinput/taxi_trained
JOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=$TFVERSION \
-- \
--train_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-train*" \
--eval_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-valid*" \
--output_dir=$OUTDIR \
--train_steps=10000
Job [lab3a_180607_192245] submitted successfully.
Your job is still active. You may view the status of your job with the command (on google cloud consile)
$ gcloud ml-engine jobs describe lab3a_180607_192245
or continue streaming the logs with the command
$ gcloud ml-engine jobs stream-logs lab3a_180607_192245
Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.
| mit |
wangyixiaohuihui/spark2-annotation | python/pyspark/mllib/stat/_statistics.py | 1 | 13703 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = str
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.test import ChiSqTestResult, KolmogorovSmirnovTestResult
__all__ = ['MultivariateStatisticalSummary', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return int(self.call("count"))
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
def normL1(self):
return self.call("normL1").toArray()
def normL2(self):
return self.call("normL2").toArray()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
@ignore_unicode_prefix
def chiSqTest(observed, expected=None):
"""
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
.. note:: `observed` cannot contain negative values
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print(pearson.statistic)
0.4
>>> pearson.degreesOfFreedom
2
>>> print(round(pearson.pValue, 4))
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print(round(pearson.pValue, 4))
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print(round(chi.statistic, 4))
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print(chi[0].statistic)
0.75
>>> print(chi[1].statistic)
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
@staticmethod
@ignore_unicode_prefix
def kolmogorovSmirnovTest(data, distName="norm", *params):
"""
Performs the Kolmogorov-Smirnov (KS) test for data sampled from
a continuous distribution. It tests the null hypothesis that
the data is generated from a particular distribution.
The given data is sorted and the Empirical Cumulative
Distribution Function (ECDF) is calculated
which for a given point is the number of points having a CDF
value lesser than it divided by the total number of points.
Since the data is sorted, this is a step function
that rises by (1 / length of data) for every ordered point.
The KS statistic gives us the maximum distance between the
ECDF and the CDF. Intuitively if this statistic is large, the
probabilty that the null hypothesis is true becomes small.
For specific details of the implementation, please have a look
at the Scala documentation.
:param data: RDD, samples from the data
:param distName: string, currently only "norm" is supported.
(Normal distribution) to calculate the
theoretical distribution of the data.
:param params: additional values which need to be provided for
a certain distribution.
If not provided, the default values are used.
:return: KolmogorovSmirnovTestResult object containing the test
statistic, degrees of freedom, p-value,
the method used, and the null hypothesis.
>>> kstest = Statistics.kolmogorovSmirnovTest
>>> data = sc.parallelize([-1.0, 0.0, 1.0])
>>> ksmodel = kstest(data, "norm")
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
>>> ksmodel.nullHypothesis
u'Sample follows theoretical distribution'
>>> data = sc.parallelize([2.0, 3.0, 4.0])
>>> ksmodel = kstest(data, "norm", 3.0, 1.0)
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD, got %s." % type(data))
if not isinstance(distName, basestring):
raise TypeError("distName should be a string, got %s." % type(distName))
params = [float(param) for param in params]
return KolmogorovSmirnovTestResult(
callMLlibFunc("kolmogorovSmirnovTest", data, distName, params))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.stat.statistics tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Avinash-Raj/appengine-django-skeleton | lib/django/contrib/gis/db/backends/spatialite/introspection.py | 391 | 3131 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import (
DatabaseIntrospection, FlexibleFieldLookupDict,
)
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point': 'GeometryField',
'linestring': 'GeometryField',
'polygon': 'GeometryField',
'multipoint': 'GeometryField',
'multilinestring': 'GeometryField',
'multipolygon': 'GeometryField',
'geometrycollection': 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
type_col = 'type' if self.connection.ops.spatial_version < (4, 0, 0) else 'geometry_type'
cursor.execute('SELECT coord_dimension, srid, %s '
'FROM geometry_columns '
'WHERE f_table_name=%%s AND f_geometry_column=%%s' % type_col,
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
ogr_type = row[2]
if isinstance(ogr_type, six.integer_types) and ogr_type > 1000:
# Spatialite versions >= 4 use the new SFSQL 1.2 offsets
# 1000 (Z), 2000 (M), and 3000 (ZM) to indicate the presence of
# higher dimensional coordinates (M not yet supported by Django).
ogr_type = ogr_type % 1000 + OGRGeomType.wkb25bit
field_type = OGRGeomType(ogr_type).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if (isinstance(dim, six.string_types) and 'Z' in dim) or dim == 3:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
def get_indexes(self, cursor, table_name):
indexes = super(SpatiaLiteIntrospection, self).get_indexes(cursor, table_name)
cursor.execute('SELECT f_geometry_column '
'FROM geometry_columns '
'WHERE f_table_name=%s AND spatial_index_enabled=1', (table_name,))
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': False, 'unique': False}
return indexes
| bsd-3-clause |
xin3liang/platform_external_chromium-trace | trace-viewer/third_party/python_gflags/setup.py | 376 | 1991 | #!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
setup(name='python-gflags',
version='2.0',
description='Google Commandline Flags Module',
license='BSD',
author='Google Inc. and others',
author_email='google-gflags@googlegroups.com',
url='http://code.google.com/p/python-gflags',
py_modules=["gflags", "gflags_validators"],
data_files=[("bin", ["gflags2man.py"])],
include_package_data=True,
)
| bsd-3-clause |
kost/volatility | volatility/plugins/mac/procdump.py | 12 | 3039 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import os
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.mac.pstasks as mac_tasks
import volatility.plugins.mac.common as mac_common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class mac_procdump(mac_tasks.mac_tasks):
""" Dumps the executable of a process """
def __init__(self, config, *args, **kwargs):
mac_tasks.mac_tasks.__init__(self, config, *args, **kwargs)
self._config.add_option('DUMP-DIR', short_option = 'D', default = None, help = 'Output directory', action = 'store', type = 'str')
def unified_output(self, data):
if (not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR)):
debug.error("Please specify an existing output dir (--dump-dir)")
return TreeGrid([("Task", str),
("Pid", int),
("Address", Address),
("Path", str),
], self.generator(data))
def generator(self, data):
for proc in data:
exe_address = proc.text_start()
if exe_address:
file_path = mac_common.write_macho_file(self._config.DUMP_DIR, proc, exe_address)
yield (0, [
str(proc.p_comm),
int(proc.p_pid),
Address(exe_address),
str(file_path),
])
def render_text(self, outfd, data):
if (not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR)):
debug.error("Please specify an existing output dir (--dump-dir)")
self.table_header(outfd, [("Task", "25"),
("Pid", "6"),
("Address", "[addrpad]"),
("Path", "")])
for proc in data:
exe_address = proc.text_start()
if exe_address:
file_path = mac_common.write_macho_file(self._config.DUMP_DIR, proc, exe_address)
self.table_row(outfd, proc.p_comm, proc.p_pid, exe_address, file_path)
| gpl-2.0 |
e-koch/VLA_Lband | 14B-088/Cal_Scripts/EVLA_pipeline1.3.0/EVLA_pipe_testBPdcals.py | 2 | 21896 | ######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
logprint ("Starting EVLA_pipe_testBPdcals.py", logfileout='logs/testBPdcals.log')
time_list=runtiming('testBPdcals', 'start')
QA2_testBPdcals='Pass'
# INITIAL TEST CALIBRATIONS USING BANDPASS AND DELAY CALIBRATORS
logprint ("Finding a reference antenna", logfileout='logs/testBPdcals.log')
refantspw=''
refantfield=calibrator_field_select_string
findrefant=RefAntHeuristics(vis=ms_active,field=refantfield,geometry=True,flagging=True)
RefAntOutput=findrefant.calculate()
refAnt=str(RefAntOutput[0])
logprint ("The pipeline will use antenna "+refAnt+" as the reference", logfileout='logs/testBPdcals.log')
logprint ("Doing test calibrations", logfileout='logs/testBPdcals.log')
# Do initial phase solutions on the delay calibrator
syscommand='rm -rf testdelayinitialgain.g'
os.system(syscommand)
if (cal3C84_d == True):
default('gaincal')
vis = ms_active
caltable = 'testdelayinitialgain.g'
field = delay_field_select_string
spw = tst_delay_spw
intent = ''
selectdata = True
uvrange = uvrange3C84
scan = delay_scan_select_string
solint = 'int'
combine = 'scan'
preavg = -1.0
refant = refAnt
minblperant = minBL_for_cal
minsnr = 3.0
solnorm = False
gaintype = 'G'
smodel = []
calmode = 'p'
append = False
docallib = False
gaintable = priorcals
gainfield = ['']
interp = ['']
spwmap = []
parang = False
async = False
gaincal()
else:
default('gaincal')
vis = ms_active
caltable = 'testdelayinitialgain.g'
field = delay_field_select_string
spw = ''
intent = ''
selectdata = True
uvrange = ''
scan = delay_scan_select_string
solint = 'int'
combine = 'scan'
preavg = -1.0
refant = refAnt
minblperant = minBL_for_cal
minsnr = 3.0
solnorm = False
gaintype = 'G'
smodel = []
calmode = 'p'
append = False
docallib = False
gaintable = priorcals
gainfield = ['']
interp = ['']
spwmap = []
parang = False
async = False
gaincal()
logprint ("Initial phase calibration on delay calibrator complete", logfileout='logs/testBPdcals.log')
# Do initial test delay calibration ("test" because more flagging may be
# needed for the final version)
# For the future: investigate multiband delay
syscommand='rm -rf testdelay.k'
os.system(syscommand)
flaggedSolnResult=testdelays(ms_active,'testdelay.k',delay_field_select_string,delay_scan_select_string,refAnt,minBL_for_cal,priorcals,cal3C84_d,uvrange3C84)
logprint("Fraction of flagged solutions = "+str(flaggedSolnResult['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult['all']['total'] > 0):
fracFlaggedSolns=flaggedSolnResult['antmedian']['fraction']
else:
fracFlaggedSolns=1.0
# NB: in case the reference antenna has a bad baseband/IF, check
# a couple of reference antennas if there is a high fraction of
# flagged solutions
if (fracFlaggedSolns > critfrac):
logprint ("Not enough good solutions, trying a different reference antenna", logfileout='logs/testBPdcals.log')
refAnt=str(RefAntOutput[1])
logprint ("The pipeline will use antenna "+refAnt+" as the reference", logfileout='logs/testBPdcals.log')
flaggedSolnResult=testdelays(ms_active,'testdelay.k',delay_field_select_string,delay_scan_select_string,refAnt,minBL_for_cal,priorcals,cal3C84_d,uvrange3C84)
logprint("Fraction of flagged solutions = "+str(flaggedSolnResult['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult['all']['total'] > 0):
fracFlaggedSolns=flaggedSolnResult['antmedian']['fraction']
else:
fracFlaggedSolns=1.0
if (fracFlaggedSolns > critfrac):
logprint ("Not enough good solutions, trying a different reference antenna", logfileout='logs/testBPdcals.log')
refAnt=str(RefAntOutput[2])
logprint ("The pipeline will use antenna "+refAnt+" as the reference", logfileout='logs/testBPdcals.log')
flaggedSolnResult=testdelays(ms_active,'testdelay.k',delay_field_select_string,delay_scan_select_string,refAnt,minBL_for_cal,priorcals,cal3C84_d,uvrange3C84)
logprint("Fraction of flagged solutions = "+str(flaggedSolnResult['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult['all']['total'] > 0):
fracFlaggedSolns=flaggedSolnResult['antmedian']['fraction']
else:
fracFlaggedSolns=1.0
if (fracFlaggedSolns > critfrac):
logprint ("Not enough good solutions, trying a different reference antenna", logfileout='logs/testBPdcals.log')
refAnt=str(RefAntOutput[3])
logprint ("The pipeline will use antenna "+refAnt+" as the reference", logfileout='logs/testBPdcals.log')
flaggedSolnResult=testdelays(ms_active,'testdelay.k',delay_field_select_string,delay_scan_select_string,refAnt,minBL_for_cal,priorcals,cal3C84_d,uvrange3C84)
logprint("Fraction of flagged solutions = "+str(flaggedSolnResult['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult['all']['total'] > 0):
fracFlaggedSolns=flaggedSolnResult['antmedian']['fraction']
else:
fracFlaggedSolns=1.0
if (fracFlaggedSolns > critfrac):
logprint ("WARNING, tried several reference antennas, there might be something wrong with your data", logfileout='logs/testBPdcals.log')
logprint ("Plotting test delays", logfileout='logs/testBPdcals.log')
nplots=int(numAntenna/3)
if ((numAntenna%3)>0):
nplots = nplots + 1
for ii in range(nplots):
filename='testdelay'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
antPlot=str(ii*3)+'~'+str(ii*3+2)
default('plotcal')
caltable='testdelay.k'
xaxis='freq'
yaxis='delay'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
async=False
plotcal()
# Do initial amplitude and phase gain solutions on the BPcalibrator and delay
# calibrator; the amplitudes are used for flagging; only phase
# calibration is applied in final BP calibration, so that solutions are
# not normalized per spw and take out the baseband filter shape
# Try running with solint of int_time, 3*int_time, and 10*int_time.
# If there is still a large fraction of failed solutions with
# solint=10*int_time the source may be too weak, and calibration via the
# pipeline has failed; will need to implement a mode to cope with weak
# calibrators (later)
if (delay_scan_select_string == bandpass_scan_select_string):
testgainscans=bandpass_scan_select_string
else:
testgainscans=bandpass_scan_select_string+','+delay_scan_select_string
if ((cal3C84_d == True) or (cal3C84_bp == True)):
cal3C84=True
else:
cal3C84=False
syscommand='rm -rf testBPdinitialgain.g'
os.system(syscommand)
soltime=int_time
solint='int'
flaggedSolnResult1=testBPdgains(ms_active,'testBPdinitialgain.g',tst_bpass_spw,testgainscans,solint,refAnt,minBL_for_cal,priorcals,cal3C84,uvrange3C84)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult1['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult1['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult1['all']['total'] > 0):
fracFlaggedSolns1=flaggedSolnResult1['antmedian']['fraction']
else:
fracFlaggedSolns1=1.0
gain_solint1=solint
shortsol1=soltime
if (fracFlaggedSolns1 > 0.05):
soltime=3.0*int_time
solint=str(soltime)+'s'
flaggedSolnResult3=testBPdgains(ms_active,'testBPdinitialgain3.g',tst_bpass_spw,testgainscans,solint,refAnt,minBL_for_cal,priorcals,cal3C84,uvrange3C84)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult3['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult3['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult3['all']['total'] > 0):
fracFlaggedSolns3=flaggedSolnResult3['antmedian']['fraction']
else:
fracFlaggedSolns3=1.0
if (fracFlaggedSolns3 < fracFlaggedSolns1):
gain_solint1=solint
shortsol1=soltime
syscommand='rm -rf testBPdinitialgain.g'
os.system(syscommand)
syscommand='mv testBPdinitialgain3.g testBPdinitialgain.g'
os.system(syscommand)
if (fracFlaggedSolns3 > 0.05):
soltime=10.0*int_time
solint=str(soltime)+'s'
flaggedSolnResult10=testBPdgains(ms_active,'testBPdinitialgain10.g',tst_bpass_spw,testgainscans,solint,refAnt,minBL_for_cal,priorcals,cal3C84,uvrange3C84)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult10['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult10['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult10['all']['total'] > 0):
fracFlaggedSolns10=flaggedSolnResult10['antmedian']['fraction']
else:
fracFlaggedSolns10=1.0
if (fracFlaggedSolns10 < fracFlaggedSolns3):
gain_solint1=solint
shortsol1=soltime
syscommand='rm -rf testBPdinitialgain.g'
os.system(syscommand)
syscommand='mv testBPdinitialgain10.g testBPdinitialgain.g'
os.system(syscommand)
if (fracFlaggedSolns10 > 0.05):
logprint ("WARNING, large fraction of flagged solutions, there might be something wrong with your data", logfileout='logs/testBPdcals.log')
logprint ("Test amp and phase calibration on delay and bandpass calibrators complete", logfileout='logs/testBPdcals.log')
logprint ("Using short solint = "+gain_solint1, logfileout='logs/testBPdcals.log')
# Plot amplitude gain solutions
logprint ("Plotting amplitude gain solutions", logfileout='logs/testBPdcals.log')
tb.open('testBPdinitialgain.g')
cpar=tb.getcol('CPARAM')
flgs=tb.getcol('FLAG')
tb.close()
amps=np.abs(cpar)
good=np.logical_not(flgs)
maxamp=np.max(amps[good])
plotmax=maxamp
for ii in range(nplots):
filename='testBPdinitialgainamp'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='testBPdinitialgain.g'
xaxis='time'
yaxis='amp'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[0,0,0,plotmax]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
async=False
plotcal()
# Plot phase gain solutions
logprint ("Plotting phase gain solutions", logfileout='logs/testBPdcals.log')
for ii in range(nplots):
filename='testBPdinitialgainphase'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='testBPdinitialgain.g'
xaxis='time'
yaxis='phase'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[0,0,-180,180]
showflags=False
plotsymbol='o-'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
async=False
plotcal()
# Now do test BPcal
logprint ("Doing test bandpass calibration", logfileout='logs/testBPdcals.log')
syscommand='rm -rf testBPcal.b'
os.system(syscommand)
BPGainTables=copy.copy(priorcals)
BPGainTables.append('testdelay.k')
BPGainTables.append('testBPdinitialgain.g')
if (cal3C84_bp == True):
default('bandpass')
vis=ms_active
caltable='testBPcal.b'
field=bandpass_field_select_string
spw=''
intent=''
selectdata=True
uvrange=uvrange3C84
scan=bandpass_scan_select_string
solint='inf'
combine='scan'
refant=refAnt
minblperant=minBL_for_cal
minsnr=5.0
solnorm=False
bandtype='B'
fillgaps=0
smodel=[]
append=False
docallib=False
gaintable=BPGainTables
gainfield=['']
interp=['']
spwmap=[]
parang=False
async=False
bandpass()
else:
default('bandpass')
vis=ms_active
caltable='testBPcal.b'
field=bandpass_field_select_string
spw=''
intent=''
selectdata=True
uvrange=''
scan=bandpass_scan_select_string
solint='inf'
combine='scan'
refant=refAnt
minblperant=minBL_for_cal
minsnr=5.0
solnorm=False
bandtype='B'
fillgaps=0
smodel=[]
append=False
docallib=False
gaintable=BPGainTables
gainfield=['']
interp=['']
spwmap=[]
parang=False
async=False
bandpass()
logprint ("Test bandpass calibration complete", logfileout='logs/testBPdcals.log')
flaggedSolnResult=getCalFlaggedSoln('testBPcal.b')
logprint("Fraction of flagged solutions = "+str(flaggedSolnResult['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
# Plot BP solutions and check for missing spws, antennas, etc.
tb.open('testBPcal.b')
dataVarCol = tb.getvarcol('CPARAM')
flagVarCol = tb.getvarcol('FLAG')
tb.close()
rowlist = dataVarCol.keys()
nrows = len(rowlist)
maxmaxamp = 0.0
maxmaxphase = 0.0
for rrow in rowlist:
dataArr = dataVarCol[rrow]
flagArr = flagVarCol[rrow]
amps=np.abs(dataArr)
phases=np.arctan2(np.imag(dataArr),np.real(dataArr))
good=np.logical_not(flagArr)
tmparr=amps[good]
if (len(tmparr)>0):
maxamp=np.max(amps[good])
if (maxamp>maxmaxamp):
maxmaxamp=maxamp
tmparr=np.abs(phases[good])
if (len(tmparr)>0):
maxphase=np.max(np.abs(phases[good]))*180./pi
if (maxphase>maxmaxphase):
maxmaxphase=maxphase
ampplotmax=maxmaxamp
phaseplotmax=maxmaxphase
for ii in range(nplots):
filename='testBPcal_amp'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='testBPcal.b'
xaxis='freq'
yaxis='amp'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[0,0,0,ampplotmax]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
async=False
plotcal()
for ii in range(nplots):
filename='testBPcal_phase'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
#
antPlot=str(ii*3)+'~'+str(ii*3+2)
#
default('plotcal')
caltable='testBPcal.b'
xaxis='freq'
yaxis='phase'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[0,0,-phaseplotmax,phaseplotmax]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
async=False
plotcal()
logprint ("Plotting of test bandpass solutions complete", logfileout='logs/testBPdcals.log')
# Do blcal to take out closure errors from source structure for plotting
# NB: would be good to be able to specify smodel=[1,0,0,0] here since
# otherwise this only works for sources that are not primary calibrators
# NB: blcal crashes if a spw is missing from gaintable, can't use this
# for now
#default('blcal')
#vis=ms_active
#caltable='testBPblcal.bl'
#field=''
#spw=''
#selectdata=True
#scans=testgainscans
#solint='30min'
#combine='scan'
#freqdep=False
#calmode='ap'
#solnorm=False
#gaintable=[priorcals,'testdelay.k','testBPdinitialgain.g','testBPcal.b']
#gainfield=['']
#interp=['']
#spwmap=[]
#parang=False
#async=False
#blcal()
# NB: level of blcal corrections are an indicator of pointy-ness of
# BPcal and delay cal and/or system health
# Apply gain and bandpass solutions and inspect calibrated BP and delay
# calibrator data for RFI or other problems
logprint ("Applying test calibrations to BP and delay calibrators", logfileout='logs/testBPdcals.log')
AllCalTables=copy.copy(priorcals)
AllCalTables.append('testdelay.k')
AllCalTables.append('testBPdinitialgain.g')
AllCalTables.append('testBPcal.b')
ntables=len(AllCalTables)
default('applycal')
vis=ms_active
field=''
spw=''
intent=''
selectdata=True
scan=testgainscans
docallib=False
gaintable=AllCalTables
interp=['']
spwmap=[]
calwt=[False]*ntables
parang=False
flagbackup=False
async=False
applycal()
logprint ("Plot calibrated bandpass and delay calibrators", logfileout='logs/testBPdcals.log')
syscommand='rm -rf testcalibratedBPcal.png'
os.system(syscommand)
default('plotms')
vis=ms_active
xaxis='freq'
yaxis='amp'
ydatacolumn='corrected'
selectdata=True
field=bandpass_field_select_string
scan=bandpass_scan_select_string
correlation=corrstring
averagedata=True
avgtime='1e8s'
avgscan=True
transform=False
extendflag=False
iteraxis=''
coloraxis='antenna2'
plotrange=[]
title=''
xlabel=''
ylabel=''
showmajorgrid=False
showminorgrid=False
plotfile='testcalibratedBPcal.png'
overwrite=True
showgui=False
async=False
plotms()
# Plot calibrated delay calibrator, if different from BP cal
if (delay_scan_select_string != bandpass_scan_select_string):
syscommand='rm -rf testcalibrated_delaycal.png'
os.system(syscommand)
default('plotms')
vis=ms_active
xaxis='freq'
yaxis='amp'
ydatacolumn='corrected'
selectdata=True
scan=delay_scan_select_string
correlation=corrstring
averagedata=True
avgtime='1e8s'
avgscan=True
transform=False
extendflag=False
iteraxis=''
coloraxis='antenna2'
plotrange=[]
title=''
xlabel=''
ylabel=''
showmajorgrid=False
showminorgrid=False
plotfile='testcalibrated_delaycal.png'
overwrite=True
showgui=False
async=False
plotms()
# Calculate fractions of flagged solutions for final QA2
flaggedDelaySolns=getCalFlaggedSoln('testdelay.k')
flaggedGainSolns=getCalFlaggedSoln('testBPdinitialgain.g')
flaggedBPSolns=getCalFlaggedSoln('testBPcal.b')
if (flaggedDelaySolns['all']['total'] > 0):
if (flaggedDelaySolns['antmedian']['fraction'] > critfrac):
QA2_delay='Partial'
else:
QA2_delay='Pass'
else:
QA2_delay='Fail'
logprint ("QA2_delay: "+QA2_delay, logfileout='logs/testBPdcals.log')
if (flaggedGainSolns['all']['total'] > 0):
if (flaggedGainSolns['antmedian']['fraction'] > 0.1):
QA2_gain='Partial'
else:
QA2_gain='Pass'
else:
QA2_gain='Fail'
logprint ("QA2_gain: "+QA2_gain, logfileout='logs/testBPdcals.log')
if (flaggedBPSolns['all']['total'] > 0):
if (flaggedBPSolns['antmedian']['fraction'] > 0.2):
QA2_BP='Partial'
else:
QA2_BP='Pass'
else:
QA2_BP='Fail'
logprint ("QA2_BP: "+QA2_BP, logfileout='logs/testBPdcals.log')
if (QA2_delay=='Fail' or QA2_gain=='Fail' or QA2_BP=='Fail'):
QA2_testBPdcals='Fail'
elif (QA2_delay=='Partial' or QA2_gain=='Partial' or QA2_BP=='Partial'):
QA2_testBPdcals='Partial'
logprint ("QA2 score: "+QA2_testBPdcals, logfileout='logs/testBPdcals.log')
logprint ("Finished EVLA_pipe_testBPdcals.py", logfileout='logs/testBPdcals.log')
time_list=runtiming('testBPdcals', 'end')
pipeline_save()
| mit |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/lead_form_desired_intent.py | 1 | 1209 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'LeadFormDesiredIntentEnum',
},
)
class LeadFormDesiredIntentEnum(proto.Message):
r"""Describes the desired level of intent of generated leads. """
class LeadFormDesiredIntent(proto.Enum):
r"""Enum describing the desired level of intent of generated
leads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOW_INTENT = 2
HIGH_INTENT = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
Asana/boto | tests/integration/s3/test_bucket.py | 11 | 12500 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 Bucket
"""
from mock import patch, Mock
import unittest
import time
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection
from boto.s3.bucketlogging import BucketLogging
from boto.s3.lifecycle import Lifecycle
from boto.s3.lifecycle import Transition
from boto.s3.lifecycle import Expiration
from boto.s3.lifecycle import Rule
from boto.s3.acl import Grant
from boto.s3.tagging import Tags, TagSet
from boto.s3.website import RedirectLocation
from boto.compat import unquote_str
class S3BucketTest (unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'bucket-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_next_marker(self):
expected = ["a/", "b", "c"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# Normal list of first 2 keys will have
# no NextMarker set, so we use last key to iterate
# last element will be "b" so no issue.
rs = self.bucket.get_all_keys(max_keys=2)
for element in rs:
pass
self.assertEqual(element.name, "b")
self.assertEqual(rs.next_marker, None)
# list using delimiter of first 2 keys will have
# a NextMarker set (when truncated). As prefixes
# are grouped together at the end, we get "a/" as
# last element, but luckily we have next_marker.
rs = self.bucket.get_all_keys(max_keys=2, delimiter="/")
for element in rs:
pass
self.assertEqual(element.name, "a/")
self.assertEqual(rs.next_marker, "b")
# ensure bucket.list() still works by just
# popping elements off the front of expected.
rs = self.bucket.list()
for element in rs:
self.assertEqual(element.name, expected.pop(0))
self.assertEqual(expected, [])
def test_list_with_url_encoding(self):
expected = [u"α", u"β", u"γ"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# ensure bucket.list() still works by just
# popping elements off the front of expected.
orig_getall = self.bucket._get_all
getall = lambda *a, **k: orig_getall(*a, max_keys=2, **k)
with patch.object(self.bucket, '_get_all', getall):
rs = self.bucket.list(encoding_type="url")
for element in rs:
name = unquote_str(element.name)
self.assertEqual(name, expected.pop(0))
self.assertEqual(expected, [])
def test_logging(self):
# use self.bucket as the target bucket so that teardown
# will delete any log files that make it into the bucket
# automatically and all we have to do is delete the
# source bucket.
sb_name = "src-" + self.bucket_name
sb = self.conn.create_bucket(sb_name)
# grant log write perms to target bucket using canned-acl
self.bucket.set_acl("log-delivery-write")
target_bucket = self.bucket_name
target_prefix = u"jp/ログ/"
# Check existing status is disabled
bls = sb.get_logging_status()
self.assertEqual(bls.target, None)
# Create a logging status and grant auth users READ PERM
authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
authr = Grant(permission="READ", type="Group", uri=authuri)
sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr])
# Check the status and confirm its set.
bls = sb.get_logging_status()
self.assertEqual(bls.target, target_bucket)
self.assertEqual(bls.prefix, target_prefix)
self.assertEqual(len(bls.grants), 1)
self.assertEqual(bls.grants[0].type, "Group")
self.assertEqual(bls.grants[0].uri, authuri)
# finally delete the src bucket
sb.delete()
def test_tagging(self):
tagging = """
<Tagging>
<TagSet>
<Tag>
<Key>tagkey</Key>
<Value>tagvalue</Value>
</Tag>
</TagSet>
</Tagging>
"""
self.bucket.set_xml_tags(tagging)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'tagkey')
self.assertEqual(response[0][0].value, 'tagvalue')
self.bucket.delete_tags()
try:
self.bucket.get_tags()
except S3ResponseError as e:
self.assertEqual(e.code, 'NoSuchTagSet')
except Exception as e:
self.fail("Wrong exception raised (expected S3ResponseError): %s"
% e)
else:
self.fail("Expected S3ResponseError, but no exception raised.")
def test_tagging_from_objects(self):
"""Create tags from python objects rather than raw xml."""
t = Tags()
tag_set = TagSet()
tag_set.add_tag('akey', 'avalue')
tag_set.add_tag('anotherkey', 'anothervalue')
t.add_tag_set(tag_set)
self.bucket.set_tags(t)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'akey')
self.assertEqual(response[0][0].value, 'avalue')
self.assertEqual(response[0][1].key, 'anotherkey')
self.assertEqual(response[0][1].value, 'anothervalue')
def test_website_configuration(self):
response = self.bucket.configure_website('index.html')
self.assertTrue(response)
config = self.bucket.get_website_configuration()
self.assertEqual(config, {'WebsiteConfiguration':
{'IndexDocument': {'Suffix': 'index.html'}}})
config2, xml = self.bucket.get_website_configuration_with_xml()
self.assertEqual(config, config2)
self.assertTrue('<Suffix>index.html</Suffix>' in xml, xml)
def test_website_redirect_all_requests(self):
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {
'RedirectAllRequestsTo': {
'HostName': 'example.com'}}})
# Can configure the protocol as well.
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com', 'https'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {'RedirectAllRequestsTo': {
'HostName': 'example.com',
'Protocol': 'https',
}}}
)
def test_lifecycle(self):
lifecycle = Lifecycle()
lifecycle.add_rule('myid', '', 'Enabled', 30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertEqual(actual_lifecycle.id, 'myid')
self.assertEqual(actual_lifecycle.prefix, '')
self.assertEqual(actual_lifecycle.status, 'Enabled')
self.assertEqual(actual_lifecycle.transition, None)
def test_lifecycle_with_glacier_transition(self):
lifecycle = Lifecycle()
transition = Transition(days=30, storage_class='GLACIER')
rule = Rule('myid', prefix='', status='Enabled', expiration=None,
transition=transition)
lifecycle.append(rule)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
transition = response[0].transition
self.assertEqual(transition.days, 30)
self.assertEqual(transition.storage_class, 'GLACIER')
self.assertEqual(transition.date, None)
def test_lifecycle_multi(self):
date = '2022-10-12T00:00:00.000Z'
sc = 'GLACIER'
lifecycle = Lifecycle()
lifecycle.add_rule("1", "1/", "Enabled", 1)
lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
lifecycle.add_rule("4", "4/", "Enabled", None,
Transition(days=4, storage_class=sc))
lifecycle.add_rule("5", "5/", "Enabled", None,
Transition(date=date, storage_class=sc))
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
if rule.id == "1":
self.assertEqual(rule.prefix, "1/")
self.assertEqual(rule.expiration.days, 1)
elif rule.id == "2":
self.assertEqual(rule.prefix, "2/")
self.assertEqual(rule.expiration.days, 2)
elif rule.id == "3":
self.assertEqual(rule.prefix, "3/")
self.assertEqual(rule.expiration.date, date)
elif rule.id == "4":
self.assertEqual(rule.prefix, "4/")
self.assertEqual(rule.transition.days, 4)
self.assertEqual(rule.transition.storage_class, sc)
elif rule.id == "5":
self.assertEqual(rule.prefix, "5/")
self.assertEqual(rule.transition.date, date)
self.assertEqual(rule.transition.storage_class, sc)
else:
self.fail("unexpected id %s" % rule.id)
def test_lifecycle_jp(self):
# test lifecycle with Japanese prefix
name = "Japanese files"
prefix = "日本語/"
days = 30
lifecycle = Lifecycle()
lifecycle.add_rule(name, prefix, "Enabled", days)
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
self.assertEqual(rule.id, name)
self.assertEqual(rule.expiration.days, days)
#Note: Boto seems correct? AWS seems broken?
#self.assertEqual(rule.prefix, prefix)
def test_lifecycle_with_defaults(self):
lifecycle = Lifecycle()
lifecycle.add_rule(expiration=30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertNotEqual(len(actual_lifecycle.id), 0)
self.assertEqual(actual_lifecycle.prefix, '')
def test_lifecycle_rule_xml(self):
# create a rule directly with id, prefix defaults
rule = Rule(status='Enabled', expiration=30)
s = rule.to_xml()
# Confirm no ID is set in the rule.
self.assertEqual(s.find("<ID>"), -1)
# Confirm Prefix is '' and not set to 'None'
self.assertNotEqual(s.find("<Prefix></Prefix>"), -1)
| mit |
GioneeDevTeam/android_kernel_gionee_msm8974 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
leilihh/cinder | cinder/api/contrib/hosts.py | 11 | 10321 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from xml.parsers import expat
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI
from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('service-status')
elem.set('service')
elem.set('zone')
elem.set('service-state')
elem.set('host_name')
elem.set('last-update')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
for child in node.childNodes[0].childNodes:
updates[child.tagName] = self.extract_text(child)
return dict(body=updates)
def _list_hosts(req, service=None):
"""Returns a summary list of hosts."""
curr_time = timeutils.utcnow()
context = req.environ['cinder.context']
services = db.service_get_all(context, False)
zone = ''
if 'zone' in req.GET:
zone = req.GET['zone']
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for host in services:
delta = curr_time - (host['updated_at'] or host['created_at'])
alive = abs(delta.total_seconds()) <= CONF.service_down_time
status = (alive and "available") or "unavailable"
active = 'enabled'
if host['disabled']:
active = 'disabled'
LOG.debug('status, active and update: %s, %s, %s',
status, active, host['updated_at'])
hosts.append({'host_name': host['host'],
'service': host['topic'],
'zone': host['availability_zone'],
'service-status': status,
'service-state': active,
'last-update': host['updated_at']})
if service:
hosts = [host for host in hosts
if host["service"] == service]
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
message = _("Host '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=message)
return wrapped
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = volume_api.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
authorize(req.environ['cinder.context'])
return {'hosts': _list_hosts(req)}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostDeserializer)
@check_host
def update(self, req, id, body):
authorize(req.environ['cinder.context'])
update_values = {}
for raw_key, raw_val in body.items():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
for key, value in update_values.items():
result.update(update_setters[key](req, id, value))
return result
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.info(_LI("Setting host %(host)s to %(state)s."),
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "status": result}
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the volume usage info given by hosts.
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'volume_count': 1, 'total_volume_gb': 2048}
"""
host = id
context = req.environ['cinder.context']
if not context.is_admin:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
try:
host_ref = db.service_get_by_host_and_topic(context,
host,
CONF.volume_topic)
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
# Getting total available/used resource
# TODO(jdg): Add summary info for Snapshots
volume_refs = db.volume_get_all_by_host(context, host_ref['host'])
(count, sum) = db.volume_data_get_for_host(context,
host_ref['host'])
snap_count_total = 0
snap_sum_total = 0
resources = [{'resource': {'host': host, 'project': '(total)',
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count_total),
'total_snapshot_gb': str(snap_sum_total)}}]
project_ids = [v['project_id'] for v in volume_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
(count, sum) = db.volume_data_get_for_project(context, project_id)
(snap_count, snap_sum) = (
objects.Snapshot.snapshot_data_get_for_project(context,
project_id))
resources.append(
{'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total)
return {"host": resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={
'update': 'PUT'},
member_actions={
'startup': 'GET',
'shutdown': 'GET',
'reboot': 'GET'})]
return resources
| apache-2.0 |
JshWright/home-assistant | tests/util/test_yaml.py | 6 | 15572 | """Test Home Assistant yaml loader."""
import io
import os
import unittest
from unittest.mock import patch
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util import yaml
from homeassistant.config import YAML_CONFIG_FILE, load_yaml_config_file
from tests.common import get_test_config_dir, patch_yaml_files
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
# pylint: disable=no-self-use, invalid-name
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test duplicate dict keys."""
files = {YAML_CONFIG_FILE: 'key: thing1\nkey: thing2'}
with self.assertRaises(HomeAssistantError):
with patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
def test_unhashable_key(self):
"""Test an unhasable key."""
files = {YAML_CONFIG_FILE: 'message:\n {{ states.state }}'}
with self.assertRaises(HomeAssistantError), \
patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
def test_no_key(self):
"""Test item without an key."""
files = {YAML_CONFIG_FILE: 'a: a\nnokeyhere'}
with self.assertRaises(HomeAssistantError), \
patch_yaml_files(files):
yaml.load_yaml(YAML_CONFIG_FILE)
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
with self.assertRaises(HomeAssistantError):
with io.StringIO(conf) as file:
yaml.yaml.safe_load(file)
def test_include_yaml(self):
"""Test include yaml."""
with patch_yaml_files({'test.yaml': 'value'}):
conf = 'key: !include test.yaml'
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == "value"
with patch_yaml_files({'test.yaml': None}):
conf = 'key: !include test.yaml'
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == {}
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_list(self, mock_walk):
"""Test include dir list yaml."""
mock_walk.return_value = [
['/tmp', [], ['one.yaml', 'two.yaml']],
]
with patch_yaml_files({
'/tmp/one.yaml': 'one',
'/tmp/two.yaml': 'two',
}):
conf = "key: !include_dir_list /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert sorted(doc["key"]) == sorted(["one", "two"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_list_recursive(self, mock_walk):
"""Test include dir recursive list yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['zero.yaml']],
['/tmp/tmp2', [], ['one.yaml', 'two.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/zero.yaml': 'zero',
'/tmp/tmp2/one.yaml': 'one',
'/tmp/tmp2/two.yaml': 'two'
}):
conf = "key: !include_dir_list /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["zero", "one", "two"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_named(self, mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
['/tmp', [], ['first.yaml', 'second.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'one',
'/tmp/second.yaml': 'two'
}):
conf = "key: !include_dir_named /tmp"
correct = {'first': 'one', 'second': 'two'}
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == correct
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_named_recursive(self, mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'one',
'/tmp/tmp2/second.yaml': 'two',
'/tmp/tmp2/third.yaml': 'three'
}):
conf = "key: !include_dir_named /tmp"
correct = {'first': 'one', 'second': 'two', 'third': 'three'}
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert doc["key"] == correct
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_list(self, mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [['/tmp', [], ['first.yaml', 'second.yaml']]]
with patch_yaml_files({
'/tmp/first.yaml': '- one',
'/tmp/second.yaml': '- two\n- three'
}):
conf = "key: !include_dir_merge_list /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert sorted(doc["key"]) == sorted(["one", "two", "three"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_list_recursive(self, mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': '- one',
'/tmp/tmp2/second.yaml': '- two',
'/tmp/tmp2/third.yaml': '- three\n- four'
}):
conf = "key: !include_dir_merge_list /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["one", "two",
"three", "four"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_named(self, mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [['/tmp', [], ['first.yaml', 'second.yaml']]]
files = {
'/tmp/first.yaml': 'key1: one',
'/tmp/second.yaml': 'key2: two\nkey3: three',
}
with patch_yaml_files(files):
conf = "key: !include_dir_merge_named /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three"
}
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_named_recursive(self, mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'key1: one',
'/tmp/tmp2/second.yaml': 'key2: two',
'/tmp/tmp2/third.yaml': 'key3: three\nkey4: four'
}):
conf = "key: !include_dir_merge_named /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three",
"key4": "four"
}
@patch('homeassistant.util.yaml.open', create=True)
def test_load_yaml_encoding_error(self, mock_open):
"""Test raising a UnicodeDecodeError."""
mock_open.side_effect = UnicodeDecodeError('', b'', 1, 0, '')
self.assertRaises(HomeAssistantError, yaml.load_yaml, 'test')
def test_dump(self):
"""The that the dump method returns empty None values."""
assert yaml.dump({'a': None, 'b': 'b'}) == 'a:\nb: b\n'
FILES = {}
def load_yaml(fname, string):
"""Write a string to file and return the parsed yaml."""
FILES[fname] = string
with patch_yaml_files(FILES):
return load_yaml_config_file(fname)
class FakeKeyring():
"""Fake a keyring class."""
def __init__(self, secrets_dict):
"""Store keyring dictionary."""
self._secrets = secrets_dict
# pylint: disable=protected-access
def get_password(self, domain, name):
"""Retrieve password."""
assert domain == yaml._SECRET_NAMESPACE
return self._secrets.get(name)
class TestSecrets(unittest.TestCase):
"""Test the secrets parameter in the yaml utility."""
# pylint: disable=protected-access,invalid-name
def setUp(self):
"""Create & load secrets file."""
config_dir = get_test_config_dir()
yaml.clear_secret_cache()
self._yaml_path = os.path.join(config_dir, YAML_CONFIG_FILE)
self._secret_path = os.path.join(config_dir, yaml._SECRET_YAML)
self._sub_folder_path = os.path.join(config_dir, 'subFolder')
self._unrelated_path = os.path.join(config_dir, 'unrelated')
load_yaml(self._secret_path,
'http_pw: pwhttp\n'
'comp1_un: un1\n'
'comp1_pw: pw1\n'
'stale_pw: not_used\n'
'logger: debug\n')
self._yaml = load_yaml(self._yaml_path,
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
def tearDown(self):
"""Clean up secrets."""
yaml.clear_secret_cache()
FILES.clear()
def test_secrets_from_yaml(self):
"""Did secrets load ok."""
expected = {'api_password': 'pwhttp'}
self.assertEqual(expected, self._yaml['http'])
expected = {
'username': 'un1',
'password': 'pw1'}
self.assertEqual(expected, self._yaml['component'])
def test_secrets_from_parent_folder(self):
"""Test loading secrets from parent foler."""
expected = {'api_password': 'pwhttp'}
self._yaml = load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
self.assertEqual(expected, self._yaml['http'])
def test_secret_overrides_parent(self):
"""Test loading current directory secret overrides the parent."""
expected = {'api_password': 'override'}
load_yaml(os.path.join(self._sub_folder_path, yaml._SECRET_YAML),
'http_pw: override')
self._yaml = load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
self.assertEqual(expected, self._yaml['http'])
def test_secrets_from_unrelated_fails(self):
"""Test loading secrets from unrelated folder fails."""
load_yaml(os.path.join(self._unrelated_path, yaml._SECRET_YAML),
'test: failure')
with self.assertRaises(HomeAssistantError):
load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret test')
def test_secrets_keyring(self):
"""Test keyring fallback & get_password."""
yaml.keyring = None # Ensure its not there
yaml_str = 'http:\n api_password: !secret http_pw_keyring'
with self.assertRaises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, yaml_str)
yaml.keyring = FakeKeyring({'http_pw_keyring': 'yeah'})
_yaml = load_yaml(self._yaml_path, yaml_str)
self.assertEqual({'http': {'api_password': 'yeah'}}, _yaml)
def test_secrets_logger_removed(self):
"""Ensure logger: debug was removed."""
with self.assertRaises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, 'api_password: !secret logger')
@patch('homeassistant.util.yaml._LOGGER.error')
def test_bad_logger_value(self, mock_error):
"""Ensure logger: debug was removed."""
yaml.clear_secret_cache()
load_yaml(self._secret_path, 'logger: info\npw: abc')
load_yaml(self._yaml_path, 'api_password: !secret pw')
assert mock_error.call_count == 1, \
"Expected an error about logger: value"
def test_representing_yaml_loaded_data():
"""Test we can represent YAML loaded data."""
files = {YAML_CONFIG_FILE: 'key: [1, "2", 3]'}
with patch_yaml_files(files):
data = load_yaml_config_file(YAML_CONFIG_FILE)
assert yaml.dump(data) == "key:\n- 1\n- '2'\n- 3\n"
| apache-2.0 |
pblottiere/QGIS | tests/src/python/test_qgspallabeling_placement.py | 29 | 55022 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPalLabeling: base suite of render check tests
Class is meant to be inherited by classes that test different labeling outputs
See <qgis-src-dir>/tests/testdata/labeling/README.rst for description.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2015-08-24'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis # NOQA
import os
import sys
from qgis.PyQt.QtCore import QThreadPool, qDebug
from qgis.core import (QgsLabelingEngineSettings,
QgsPalLayerSettings,
QgsSingleSymbolRenderer,
QgsMarkerSymbol,
QgsProperty,
QgsVectorLayerSimpleLabeling,
QgsLabelObstacleSettings,
QgsLabeling)
from utilities import getTempfilePath, renderMapToImage, mapSettingsString
from test_qgspallabeling_base import TestQgsPalLabeling, runSuite
from qgis.testing import unittest
# noinspection PyPep8Naming
class TestPlacementBase(TestQgsPalLabeling):
@classmethod
def setUpClass(cls):
if not cls._BaseSetup:
TestQgsPalLabeling.setUpClass()
@classmethod
def tearDownClass(cls):
TestQgsPalLabeling.tearDownClass()
def setUp(self):
"""Run before each test."""
super(TestPlacementBase, self).setUp()
self.removeAllLayers()
self.configTest('pal_placement', 'sp')
self._TestImage = ''
self._Mismatch = 0
self._ColorTol = 0
self._Mismatches.clear()
self._ColorTols.clear()
# render only rectangles of the placed labels
engine_settings = QgsLabelingEngineSettings()
engine_settings.setPlacementVersion(QgsLabelingEngineSettings.PlacementEngineVersion2)
engine_settings.setFlag(QgsLabelingEngineSettings.DrawLabelRectOnly)
self._MapSettings.setLabelingEngineSettings(engine_settings)
def checkTest(self, **kwargs):
if kwargs.get('apply_simple_labeling', True):
self.layer.setLabeling(QgsVectorLayerSimpleLabeling(self.lyr))
ms = self._MapSettings # class settings
settings_type = 'Class'
if self._TestMapSettings is not None:
ms = self._TestMapSettings # per test settings
settings_type = 'Test'
if 'PAL_VERBOSE' in os.environ:
qDebug('MapSettings type: {0}'.format(settings_type))
qDebug(mapSettingsString(ms))
img = renderMapToImage(ms, parallel=False)
self._TestImage = getTempfilePath('png')
if not img.save(self._TestImage, 'png'):
os.unlink(self._TestImage)
raise OSError('Failed to save output from map render job')
self.saveControlImage(self._TestImage)
mismatch = 0
if 'PAL_NO_MISMATCH' not in os.environ:
# some mismatch expected
mismatch = self._Mismatch if self._Mismatch else 0
if self._TestGroup in self._Mismatches:
mismatch = self._Mismatches[self._TestGroup]
colortol = 0
if 'PAL_NO_COLORTOL' not in os.environ:
colortol = self._ColorTol if self._ColorTol else 0
if self._TestGroup in self._ColorTols:
colortol = self._ColorTols[self._TestGroup]
self.assertTrue(*self.renderCheck(mismatch=mismatch,
colortol=colortol,
imgpath=self._TestImage))
# noinspection PyPep8Naming
class TestPointPlacement(TestPlacementBase):
@classmethod
def setUpClass(cls):
TestPlacementBase.setUpClass()
cls.layer = None
def test_point_placement_around(self):
# Default point label placement
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_point_placement_around_obstacle(self):
# Default point label placement with obstacle
self.layer = TestQgsPalLabeling.loadFeatureLayer('point2')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_point_placement_narrow_polygon_obstacle(self):
# Default point label placement with narrow polygon obstacle
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
polyLayer = TestQgsPalLabeling.loadFeatureLayer('narrow_polygon')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.checkTest()
self.removeMapLayer(self.layer)
self.removeMapLayer(polyLayer)
self.layer = None
def test_point_placement_around_obstacle_large_symbol(self):
# Default point label placement with obstacle and large symbols
self.layer = TestQgsPalLabeling.loadFeatureLayer('point3')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_line_with_no_candidate_show_all(self):
# A line too short to have any candidates, yet we need to show all labels for the layer
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_short')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.layer.setLabelsEnabled(True)
self.lyr.displayAll = True
f = self.lyr.format()
f.setSize(60)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_placement_with_hole(self):
# Horizontal label placement for polygon with hole
# Note for this test, the mask is used to check only pixels outside of the polygon.
# We don't care where in the polygon the label is, just that it
# is INSIDE the polygon
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_with_hole')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Horizontal
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_placement_with_hole_and_point(self):
# Testing that hole from a feature is not treated as an obstacle for other feature's labels
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
polyLayer = TestQgsPalLabeling.loadFeatureLayer('polygon_with_hole')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.checkTest()
self.removeMapLayer(self.layer)
self.removeMapLayer(polyLayer)
self.layer = None
def test_polygon_placement_with_obstacle(self):
# Horizontal label placement for polygon and a line obstacle
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_rect')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('polygon_with_hole_line_obstacle')
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = 7
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Horizontal
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_placement_bumps(self):
# Horizontal label placement for polygon with bumps, checking that
# labels are placed close to the pole of inaccessibility (max distance
# to rings)
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_with_bump')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Horizontal
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_placement_small_bump(self):
# Horizontal label placement for polygon with a small bump, checking that
# labels AREN'T placed right at the pole of inaccessibility
# when that position is far from the polygon's centroid
# i.e. when label candidates have close-ish max distance to rings
# then we pick the one closest to the polygon's centroid
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small_bump')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Horizontal
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_multiple_labels(self):
# Horizontal label placement for polygon with hole
# Note for this test, the mask is used to check only pixels outside of the polygon.
# We don't care where in the polygon the label is, just that it
# is INSIDE the polygon
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_rule_based')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.checkTest(apply_simple_labeling=False)
self.removeMapLayer(self.layer)
self.layer = None
def test_multipolygon_obstacle(self):
# Test that all parts of multipolygon are used as an obstacle
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
polyLayer = TestQgsPalLabeling.loadFeatureLayer('multi_polygon')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.checkTest()
self.removeMapLayer(self.layer)
self.removeMapLayer(polyLayer)
self.layer = None
def test_point_offset_center_placement(self):
# Test point offset from point, center placement
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantOver
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_point_offset_below_left_placement(self):
# Test point offset from point, below left placement
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantBelowLeft
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_obstacle_collision_but_showing_all(self):
# Test the when a collision occurs and the Show All labels setting is active, Show All wins
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('line')
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = 8
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantAboveLeft
self.lyr.priority = 4
self.lyr.displayAll = True
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_point_obstacle_obstacle_factor_greater_equal(self):
# Test point label but obstacle exists with a greater than obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_obstacle1')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self.assertEqual(self._MapSettings.labelingEngineSettings().placementVersion(), QgsLabelingEngineSettings.PlacementEngineVersion2)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.assertEqual(self._TestMapSettings.labelingEngineSettings().placementVersion(), QgsLabelingEngineSettings.PlacementEngineVersion2)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantAboveRight
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_point_obstacle_obstacle_factor_less(self):
# Test point label but obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_obstacle1')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantAboveRight
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_line_obstacle_obstacle_factor_greater_equal(self):
# Test point label but line obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('line')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantAboveLeft
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_line_obstacle_obstacle_factor_less(self):
# Test point label but line obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('line')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantAboveLeft
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_polygon_obstacle_obstacle_factor_greater_equal(self):
# Test point label but polygon obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('narrow_polygon')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantBelowRight
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_polygon_obstacle_obstacle_factor_less(self):
# Test point label but polygon obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('point')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('narrow_polygon')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.quadOffset = QgsPalLayerSettings.QuadrantBelowRight
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_line_point_obstacle_obstacle_factor_greater_equal(self):
# Test line label but obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_short')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_line_point_obstacle_obstacle_factor_less(self):
# Test line label but obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_short')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_line_line_obstacle_obstacle_factor_greater_equal(self):
# Test line label but obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_short')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('line')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_line_line_obstacle_obstacle_factor_less(self):
# Test line label but obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_short')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('line')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_line_polygon_obstacle_obstacle_factor_greater_equal(self):
# Test line label but obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_short')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_line_polygon_obstacle_obstacle_factor_less(self):
# Test line label but obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_short')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_point_obstacle_obstacle_factor_greater_equal(self):
# Test polygon label but obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_point_obstacle_obstacle_factor_less(self):
# Test line label but obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_line_obstacle_obstacle_factor_greater_equal(self):
# Test polygon label but obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('line_placement_4')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_line_obstacle_obstacle_factor_less(self):
# Test line label but obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('line_placement_4')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_polygon_obstacle_obstacle_factor_greater_equal(self):
# Test polygon label but obstacle exists with a greater obstacle factor vs label priority => NO LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
for label_priority in range(0, 11):
for obstacle_weight in range(label_priority + 1, 11):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacle_label_settings.obstacleSettings().setType(QgsLabelObstacleSettings.PolygonInterior)
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_polygon_obstacle_obstacle_factor_less(self):
# Test line label but obstacle exists with an equal or lower obstacle factor vs label priority => LABEL
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_center')
self.layer.setLabelsEnabled(True)
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
for label_priority in range(0, 11):
for obstacle_weight in range(0, label_priority + 1):
obstacle_label_settings = QgsPalLayerSettings()
obstacle_label_settings.obstacle = True
obstacle_label_settings.drawLabels = False
obstacle_label_settings.obstacleFactor = obstacle_weight * 0.2
obstacle_label_settings.obstacleSettings().setType(QgsLabelObstacleSettings.PolygonInterior)
obstacleLayer.setLabeling(QgsVectorLayerSimpleLabeling(obstacle_label_settings))
obstacleLayer.setLabelsEnabled(True)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.priority = label_priority
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_ordered_placement1(self):
# Test ordered placements for point
self.layer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_placement')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OrderedPositionsAroundPoint
self.lyr.dist = 2
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_point_ordered_placement2(self):
# Test ordered placements for point (1 obstacle)
self.layer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_placement')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_obstacle1')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OrderedPositionsAroundPoint
self.lyr.dist = 2
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_ordered_placement3(self):
# Test ordered placements for point (2 obstacle)
self.layer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_placement')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_obstacle2')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OrderedPositionsAroundPoint
self.lyr.dist = 2
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_ordered_placement4(self):
# Test ordered placements for point (3 obstacle)
self.layer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_placement')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_obstacle3')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OrderedPositionsAroundPoint
self.lyr.dist = 2
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.layer = None
def test_point_dd_ordered_placement(self):
# Test ordered placements for point with data defined order
self.layer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_placement')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OrderedPositionsAroundPoint
self.lyr.dist = 2
self.lyr.dataDefinedProperties().setProperty(QgsPalLayerSettings.PredefinedPositionOrder, QgsProperty.fromExpression("'T,B'"))
self.checkTest()
self.removeMapLayer(self.layer)
self.lyr.dataDefinedProperties().setProperty(QgsPalLayerSettings.PredefinedPositionOrder, QgsProperty())
self.layer = None
def test_point_dd_ordered_placement1(self):
# Test ordered placements for point with data defined order and obstacle
self.layer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_placement')
obstacleLayer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_obstacle_top')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OrderedPositionsAroundPoint
self.lyr.dist = 2
self.lyr.dataDefinedProperties().setProperty(QgsPalLayerSettings.PredefinedPositionOrder, QgsProperty.fromExpression("'T,B'"))
self.checkTest()
self.removeMapLayer(obstacleLayer)
self.removeMapLayer(self.layer)
self.lyr.dataDefinedProperties().setProperty(QgsPalLayerSettings.PredefinedPositionOrder, QgsProperty())
self.layer = None
def test_point_ordered_symbol_bound_offset(self):
# Test ordered placements for point using symbol bounds offset
self.layer = TestQgsPalLabeling.loadFeatureLayer('point_ordered_placement')
# Make a big symbol
symbol = QgsMarkerSymbol.createSimple({'color': '31,120,180,255',
'outline_color': '0,0,0,0',
'outline_style': 'solid',
'size': '10',
'name': 'rectangle',
'size_unit': 'MM'})
renderer = QgsSingleSymbolRenderer(symbol)
self.layer.setRenderer(renderer)
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OrderedPositionsAroundPoint
self.lyr.dist = 2
self.lyr.offsetType = QgsPalLayerSettings.FromSymbolBounds
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_polygon_placement_perimeter(self):
# Default polygon perimeter placement
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_perimeter')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_placement_perimeter(self):
# Default polygon perimeter placement for small polygon
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_large_label(self):
# Default polygon placement for small polygon with a large label
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.format().setSize(30)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_large_label_force_inside(self):
# Default polygon placement for small polygon with a large label, with only placement of inside labels
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.fitInPolygonOnly = True
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_large_label_allow_outside(self):
# Default polygon placement for small polygon with a large label, allowing outside placement
# we expect this to sit outside, because it CAN'T fit
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.setPolygonPlacementFlags(QgsLabeling.AllowPlacementOutsideOfPolygon | QgsLabeling.AllowPlacementInsideOfPolygon)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_small_label_inside_and_outside(self):
# Default polygon placement for small polygon with a small label, allowing outside placement
# we expect this to sit inside, because it CAN fit
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.setPolygonPlacementFlags(QgsLabeling.AllowPlacementOutsideOfPolygon | QgsLabeling.AllowPlacementInsideOfPolygon)
f = self.lyr.format()
f.setSize(8)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_small_label_outside_only(self):
# Default polygon placement for small polygon with a small label, allowing outside placement only
# we expect this to sit outside, cos we are blocking inside placement
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.setPolygonPlacementFlags(QgsLabeling.AllowPlacementOutsideOfPolygon)
f = self.lyr.format()
f.setSize(8)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_small_data_defined_allow_outside(self):
# Default data defined allow outside mode
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Horizontal
self.lyr.dataDefinedProperties().setProperty(QgsPalLayerSettings.PolygonLabelOutside, QgsProperty.fromValue(1))
f = self.lyr.format()
f.setSize(8)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_small_data_defined_force_outside(self):
# Default data defined allow outside mode
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Horizontal
self.lyr.dataDefinedProperties().setProperty(QgsPalLayerSettings.PolygonLabelOutside, QgsProperty.fromValue('force'))
f = self.lyr.format()
f.setSize(8)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_small_data_defined_allow_outside_large(self):
# Default data defined allow outside mode
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Horizontal
self.lyr.dataDefinedProperties().setProperty(QgsPalLayerSettings.PolygonLabelOutside, QgsProperty.fromValue(1))
f = self.lyr.format()
f.setSize(20)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_small_label_outside_mode(self):
# Forced outside placement for polygon
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OutsidePolygons
f = self.lyr.format()
f.setSize(8)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_small_label_outside_mode_distance(self):
# Forced outside placement for polygon with distance
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OutsidePolygons
self.lyr.dist = 10
f = self.lyr.format()
f.setSize(8)
self.lyr.setFormat(f)
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_perimeter_only_fit(self):
# Polygon perimeter placement for small polygon when set to only show labels which fit in polygon
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.fitInPolygonOnly = True
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_curvedperimeter_only_fit(self):
# Polygon perimeter placement for small polygon when set to only show labels which fit in polygon
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.PerimeterCurved
self.lyr.fitInPolygonOnly = True
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_small_polygon_over_point_only_fit(self):
# Polygon over point placement for small polygon when set to only show labels which fit in polygon
self.layer = TestQgsPalLabeling.loadFeatureLayer('polygon_small')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.OverPoint
self.lyr.fitInPolygonOnly = True
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_line_curved_above_instead_of_below(self):
# Test that labeling a line using curved labels when both above and below placement are allowed that above
# is preferred
self.layer = TestQgsPalLabeling.loadFeatureLayer('line')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_line_curved_above_instead_of_online(self):
# Test that labeling a line using curved labels when both above and online placement are allowed that above
# is preferred
self.layer = TestQgsPalLabeling.loadFeatureLayer('line')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.OnLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_line_curved_below_instead_of_online(self):
# Test that labeling a line using curved labels when both below and online placement are allowed that below
# is preferred
self.layer = TestQgsPalLabeling.loadFeatureLayer('line')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.OnLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_line_above_instead_of_below(self):
# Test that labeling a line using parallel labels when both above and below placement are allowed that above
# is preferred
self.layer = TestQgsPalLabeling.loadFeatureLayer('line')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_line_above_instead_of_online(self):
# Test that labeling a line using parallel labels when both above and online placement are allowed that above
# is preferred
self.layer = TestQgsPalLabeling.loadFeatureLayer('line')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.OnLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_line_below_instead_of_online(self):
# Test that labeling a line using parallel labels when both below and online placement are allowed that below
# is preferred
self.layer = TestQgsPalLabeling.loadFeatureLayer('line')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.OnLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_longer_lines_over_shorter(self):
# Test that labeling a line using parallel labels will tend to place the labels over the longer straight parts of
# the line
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_placement_1')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_prefer_more_horizontal_lines(self):
# Test that labeling a line using parallel labels will tend to place the labels over more horizontal sections
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_placement_2')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_label_line_over_small_angles(self):
# Test that labeling a line using parallel labels will place labels near center of straightish line
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_placement_3')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_label_line_toward_center(self):
# Test that labeling a line using parallel labels will try to place labels as close to center of line as possible
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_placement_4')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_label_line_avoid_jaggy(self):
# Test that labeling a line using parallel labels won't place labels over jaggy bits of line
self.layer = TestQgsPalLabeling.loadFeatureLayer('line_placement_5')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Line
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
def test_label_curved_zero_width_char(self):
# Test that curved label work with zero-width characters
self.layer = TestQgsPalLabeling.loadFeatureLayer('line')
self._TestMapSettings = self.cloneMapSettings(self._MapSettings)
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.OnLine
self.lyr.fieldName = "'invisiblespace'"
self.lyr.isExpression = True
self.checkTest()
self.removeMapLayer(self.layer)
self.layer = None
if __name__ == '__main__':
# NOTE: unless PAL_SUITE env var is set all test class methods will be run
# SEE: test_qgspallabeling_tests.suiteTests() to define suite
suite = ('TestPointPlacement')
res = runSuite(sys.modules[__name__], suite)
sys.exit(not res.wasSuccessful())
| gpl-2.0 |
jabez1314/youtube-dl | youtube_dl/extractor/nrk.py | 83 | 10884 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
parse_duration,
unified_strdate,
)
class NRKIE(InfoExtractor):
_VALID_URL = r'(?:nrk:|https?://(?:www\.)?nrk\.no/video/PS\*)(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.nrk.no/video/PS*150533',
'md5': 'bccd850baebefe23b56d708a113229c2',
'info_dict': {
'id': '150533',
'ext': 'flv',
'title': 'Dompap og andre fugler i Piip-Show',
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
'duration': 263,
}
},
{
'url': 'http://www.nrk.no/video/PS*154915',
'md5': '0b1493ba1aae7d9579a5ad5531bc395a',
'info_dict': {
'id': '154915',
'ext': 'flv',
'title': 'Slik høres internett ut når du er blind',
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
'duration': 20,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://v8.psapi.nrk.no/mediaelement/%s' % video_id,
video_id, 'Downloading media JSON')
if data['usageRights']['isGeoBlocked']:
raise ExtractorError(
'NRK har ikke rettig-heter til å vise dette programmet utenfor Norge',
expected=True)
video_url = data['mediaUrl'] + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81'
duration = parse_duration(data.get('duration'))
images = data.get('images')
if images:
thumbnails = images['webImages']
thumbnails.sort(key=lambda image: image['pixelWidth'])
thumbnail = thumbnails[-1]['imageUrl']
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': data['title'],
'description': data['description'],
'duration': duration,
'thumbnail': thumbnail,
}
class NRKPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video)(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763',
'info_dict': {
'id': 'gjenopplev-den-historiske-solformorkelsen-1.12270763',
'title': 'Gjenopplev den historiske solformørkelsen',
'description': 'md5:c2df8ea3bac5654a26fc2834a542feed',
},
'playlist_count': 2,
}, {
'url': 'http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449',
'info_dict': {
'id': 'rivertonprisen-til-karin-fossum-1.12266449',
'title': 'Rivertonprisen til Karin Fossum',
'description': 'Første kvinne på 15 år til å vinne krimlitteraturprisen.',
},
'playlist_count': 5,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('nrk:%s' % video_id, 'NRK')
for video_id in re.findall(
r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"',
webpage)
]
playlist_title = self._og_search_title(webpage)
playlist_description = self._og_search_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
class NRKTVIE(InfoExtractor):
IE_DESC = 'NRK TV and NRK Radio'
_VALID_URL = r'(?P<baseurl>https?://(?:tv|radio)\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?'
_TESTS = [
{
'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
'md5': 'adf2c5454fa2bf032f47a9f8fb351342',
'info_dict': {
'id': 'MUHH48000314',
'ext': 'flv',
'title': '20 spørsmål',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'upload_date': '20140523',
'duration': 1741.52,
},
},
{
'url': 'https://tv.nrk.no/program/mdfp15000514',
'md5': '383650ece2b25ecec996ad7b5bb2a384',
'info_dict': {
'id': 'mdfp15000514',
'ext': 'flv',
'title': 'Kunnskapskanalen: Grunnlovsjubiléet - Stor ståhei for ingenting',
'description': 'md5:654c12511f035aed1e42bdf5db3b206a',
'upload_date': '20140524',
'duration': 4605.0,
},
},
{
# single playlist video
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
'md5': 'adbd1dbd813edaf532b0a253780719c2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
'skip': 'Only works from Norway',
},
{
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
'playlist': [
{
'md5': '9480285eff92d64f06e02a5367970a7a',
'info_dict': {
'id': 'MSPO40010515-part1',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 1:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
},
{
'md5': 'adbd1dbd813edaf532b0a253780719c2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
},
],
'info_dict': {
'id': 'MSPO40010515',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
'duration': 6947.5199999999995,
},
'skip': 'Only works from Norway',
},
{
'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
'only_matching': True,
}
]
def _debug_print(self, txt):
if self._downloader.params.get('verbose', False):
self.to_screen('[debug] %s' % txt)
def _get_subtitles(self, subtitlesurl, video_id, baseurl):
url = "%s%s" % (baseurl, subtitlesurl)
self._debug_print('%s: Subtitle url: %s' % (video_id, url))
captions = self._download_xml(
url, video_id, 'Downloading subtitles')
lang = captions.get('lang', 'no')
return {lang: [
{'ext': 'ttml', 'url': url},
]}
def _extract_f4m(self, manifest_url, video_id):
return self._extract_f4m_formats(
manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id, f4m_id='hds')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
part_id = mobj.group('part_id')
baseurl = mobj.group('baseurl')
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(
'title', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
thumbnail = self._html_search_regex(
r'data-posterimage="([^"]+)"',
webpage, 'thumbnail', fatal=False)
upload_date = unified_strdate(self._html_search_meta(
'rightsfrom', webpage, 'upload date', fatal=False))
duration = float_or_none(self._html_search_regex(
r'data-duration="([^"]+)"',
webpage, 'duration', fatal=False))
# playlist
parts = re.findall(
r'<a href="#del=(\d+)"[^>]+data-argument="([^"]+)">([^<]+)</a>', webpage)
if parts:
entries = []
for current_part_id, stream_url, part_title in parts:
if part_id and current_part_id != part_id:
continue
video_part_id = '%s-part%s' % (video_id, current_part_id)
formats = self._extract_f4m(stream_url, video_part_id)
entries.append({
'id': video_part_id,
'title': part_title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
})
if part_id:
if entries:
return entries[0]
else:
playlist = self.playlist_result(entries, video_id, title, description)
playlist.update({
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
})
return playlist
formats = []
f4m_url = re.search(r'data-media="([^"]+)"', webpage)
if f4m_url:
formats.extend(self._extract_f4m(f4m_url.group(1), video_id))
m3u8_url = re.search(r'data-hls-media="([^"]+)"', webpage)
if m3u8_url:
formats.extend(self._extract_m3u8_formats(m3u8_url.group(1), video_id, 'mp4', m3u8_id='hls'))
self._sort_formats(formats)
subtitles_url = self._html_search_regex(
r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"',
webpage, 'subtitle URL', default=None)
subtitles = None
if subtitles_url:
subtitles = self.extract_subtitles(subtitles_url, video_id, baseurl)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
kevinge314gh/tornado | tornado/test/util.py | 61 | 2881 | from __future__ import absolute_import, division, print_function, with_statement
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Encapsulate the choice of unittest or unittest2 here.
# To be used as 'from tornado.test.util import unittest'.
if sys.version_info < (2, 7):
# In py26, we must always use unittest2.
import unittest2 as unittest
else:
# Otherwise, use whichever version of unittest was imported in
# tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipIfNoIPv6 = unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals)
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
| apache-2.0 |
praekelt/jmbo-twitter | jmbo_twitter/models.py | 1 | 5204 | import datetime, twitter
from urllib2 import URLError
import logging
from django.db import models
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from jmbo.models import ModelBase
logger = logging.getLogger('django')
class Status(ModelBase):
"""Purely a wrapper that allows us to use jmbo-foundry's listings for
tweets."""
def __init__(self, status):
# Copy attributes over
attrs = ('contributors', 'coordinates', 'created_at', \
'created_at_in_seconds', 'favorited', 'geo', 'hashtags', 'id', \
'in_reply_to_screen_name', 'in_reply_to_status_id', \
'in_reply_to_user_id', 'location', 'now', 'place', \
'relative_created_at', 'retweet_count', 'retweeted', \
'retweeted_status', 'source', 'text', 'truncated', 'urls', 'user', \
'user_mentions', 'created_at_datetime')
for attr in attrs:
setattr(self, attr, getattr(status, attr))
@property
def as_leaf_class(self):
return self
def save(self):
raise NotImplemented
class StatusMixin(object):
def get_statuses(self, api):
raise NotImplemented
def fetch(self, force=False):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
cached = cache.get(cache_key, None)
if (cached is not None) and not force:
return cached
# Get and check settings
di = getattr(settings, 'JMBO_TWITTER', {})
ck = di.get('consumer_key')
cs = di.get('consumer_secret')
atk = di.get('access_token_key')
ats = di.get('access_token_secret')
if not all([ck, cs, atk, ats]):
logger.error(
'jmbo_twitter.models.%s.fetch - incomplete settings' \
% klass_name
)
return []
# Query twitter taking care to handle network errors
api = twitter.Api(
consumer_key=ck, consumer_secret=cs, access_token_key=atk,
access_token_secret=ats, requests_timeout=10
)
try:
statuses = self.get_statuses(api)
except (URLError, ValueError, twitter.TwitterError):
statuses = []
except Exception, e:
# All manner of things can go wrong with integration
logger.error(
'jmbo_twitter.models.%s.fetch - %s' % (klass_name, e.message)
)
statuses = []
for status in statuses:
status.created_at_datetime = datetime.datetime.fromtimestamp(
status.created_at_in_seconds
)
if statuses:
# Only set if there are statuses. Twitter may randomly throttle us
# and destroy our cache without this check. Cache for a long time
# incase Twitter goes down.
cache.set(cache_key, statuses, 86400)
# Legacy return
return statuses
@property
def fetched(self):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
return cache.get(cache_key, [])
@property
def tweets(self):
class MyList(list):
"""Slightly emulate QuerySet API so jmbo-foundry listings work"""
@property
def exists(self):
return len(self) > 0
result = []
for status in self.fetched:
result.append(Status(status))
return MyList(result)
class Feed(ModelBase, StatusMixin):
"""A feed represents a twitter user account"""
name = models.CharField(
max_length=255,
unique=True,
help_text="A twitter account name, eg. johnsmith"
)
profile_image_url = models.CharField(
null=True, editable=False, max_length=255
)
twitter_id = models.CharField(max_length=255, default='', editable=False)
def get_statuses(self, api):
# Fall back to slug for historical reasons
statuses = api.GetUserTimeline(
screen_name=self.name or self.slug, include_rts=True
)
return statuses
def fetch(self, force=False):
statuses = super(Feed, self).fetch(force=force)
if statuses:
# This is a convenient place to set the feed image url
status = statuses[0]
changed = False
if status.user.profile_image_url != self.profile_image_url:
self.profile_image_url = status.user.profile_image_url
changed = True
if status.user.name != self.title:
self.title = status.user.name
changed = True
if changed:
self.save()
return statuses
class Search(ModelBase, StatusMixin):
"""A search represents a twitter keyword search"""
criteria = models.CharField(
max_length=255,
unique=True,
help_text="Search string or a hashtag"
)
class Meta:
verbose_name_plural = _("Searches")
def get_statuses(self, api):
return api.GetSearch(self.criteria)
| bsd-3-clause |
steventimberman/masterDebater | venv/lib/python2.7/site-packages/django/utils/numberformat.py | 63 | 2568 | from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.utils import six
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False):
"""
Gets a number (as a number or string), and returns it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping != 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(six.text_type(number))
# sign
sign = ''
if isinstance(number, Decimal):
str_number = '{:f}'.format(number)
else:
str_number = six.text_type(number)
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part:
dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
try:
# if grouping is a sequence
intervals = list(grouping)
except TypeError:
# grouping is a single value
intervals = [grouping, 0]
active_interval = intervals.pop(0)
int_part_gd = ''
cnt = 0
for digit in int_part[::-1]:
if cnt and cnt == active_interval:
if intervals:
active_interval = intervals.pop(0) or active_interval
int_part_gd += thousand_sep[::-1]
cnt = 0
int_part_gd += digit
cnt += 1
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| mit |
floxard/folly | folly/test/gtest/googletest/test/gtest_test_utils.py | 344 | 10823 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
sys.stdout.write(message)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| apache-2.0 |
ronas/PythonGNF | Fabulao/PedidosCapa.py | 1 | 3001 | # -*- coding: latin -*-
import sys
#from PyQt5 import QtGui, QtCore, QtWidgets #, QTableWidget, QTableWidgetItem
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QLineEdit, QLabel
from PyQt5.QtCore import QSize, Qt
import pymysql
config = {
'host': 'localhost',
'port': 3306,
'database': 'LojaDB',
'user': 'root',
'password' : 'fbl1978'
}
class ClasseAPP(QWidget):
def __init__(self):
super(ClasseAPP, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('Pedidos')
self.resize(850, 400)
self.move(300, 200)
self.tabela = QTableWidget(3,5,self)
self.tabela.setGeometry(20,20,760,300)
self.tabela.setHorizontalHeaderLabels(('Numero Pedido','Data','Codigo Cliente','Telefone','Cond Pagamento'))
self.dbBuscarPedidos()
self.lblNumeroPedido = QLabel('Numero Pedido',self)
self.lblNumeroPedido.setGeometry(20,330,130,25)
self.lblData = QLabel('Data',self)
self.lblData.setGeometry(100.360,50,25)
#self.lblCodigoCliente = QLabel('Codigo Cliente',self)
#self.lblCodigoCliente.setGeometry()
#self.lblTelefone = QLabel('Telefone',self)
#self.lblTelefone.setGeometry()
#self.lblCondPagamento = QLabel('Cond Pagamento',self)
#self.lblCondPagamento.setGeometry()
self.txtNumeroPedido = QLineEdit(self)
self.txtNumeroPedido.setGeometry(130,330,130,25)
self.txtData = QLineEdit(self)
self.txtData.setGeometry(130,360,50,25)
#self.txtCodigoCliente = QLineEdit(self)
#self.txtCOdigoCliente.setGeometry()
#self.txtTelefone = QLineEdit(self)
#self.txtTelefone.setGeometry()
#self.txtCondPagamento = QLineEdit(self)
#self.txtCondPagamento.setGeometry()
self.tabela.resizeColumnsToContents()
self.show()
def dbBuscarPedidos(self):
db = pymysql.connect(**config)
cursor = db.cursor()
comando = ('select * from LojaDB.Pedidos ')
cursor.execute(comando )
self.tabela.setRowCount(0)
registros = cursor.fetchall()
for registro in registros:
numerolinhas = self.tabela.rowCount()
self.tabela.insertRow(numerolinhas)
self.tabela.setItem(numerolinhas, 0, QTableWidgetItem( str(registro[0]) ))
self.tabela.setItem(numerolinhas, 1, QTableWidgetItem( str(registro[1]) ))
self.tabela.setItem(numerolinhas, 2, QTableWidgetItem( registro[2] ))
self.tabela.setItem(numerolinhas, 3, QTableWidgetItem( str(registro[3]) ))
self.tabela.setItem(numerolinhas, 4, QTableWidgetItem( registro[4] ))
cursor.close()
db.close()
def main():
app = QApplication(sys.argv)
MeuApp = ClasseAPP()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | gpl-3.0 |
wujuguang/sentry | src/sentry/migrations/0030_auto__add_view__chg_field_event_group.py | 36 | 12673 | # encoding: utf-8
import datetime
import sentry
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'View'
db.create_table('sentry_view', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
))
db.send_create_signal('sentry', ['View'])
# Adding M2M table for field views on 'Group'
db.create_table('sentry_groupedmessage_views', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm['sentry.group'], null=False)),
('view', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm['sentry.view'], null=False))
))
db.create_unique('sentry_groupedmessage_views', ['group_id', 'view_id'])
def backwards(self, orm):
# Deleting model 'View'
db.delete_table('sentry_view')
# Removing M2M table for field views on 'Group'
db.delete_table('sentry_groupedmessage_views')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True', 'db_column': "'group'", 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoptions': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'ProjectOptions'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
mick-d/nipype | nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py | 1 | 1144 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import ExtractMainComponent
def test_ExtractMainComponent_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=1,
),
out_file=dict(argstr='%s',
name_source='in_file',
name_template='%s.maincmp',
position=2,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = ExtractMainComponent.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ExtractMainComponent_outputs():
output_map = dict(out_file=dict(),
)
outputs = ExtractMainComponent.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| bsd-3-clause |
annarev/tensorflow | tensorflow/python/kernel_tests/random/random_gamma_test.py | 23 | 9049 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops.random_gamma."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomGammaTest(test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def setUp(self):
np.random.seed(137)
random_seed.set_random_seed(137)
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_gamma(
[num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testNpDtypes(self):
self.evaluate(random_ops.random_gamma(
[5], alpha=np.ones([2, 1, 3]), beta=np.ones([3]), dtype=np.float32))
def testEmptySamplingNoError(self):
self.evaluate(random_ops.random_gamma(
[5], alpha=np.ones([2, 0, 3]), beta=np.ones([3]), dtype=dtypes.float32))
@test_util.run_deprecated_v1
def testMomentsFloat32(self):
self._testMoments(dtypes.float32)
@test_util.run_deprecated_v1
def testMomentsFloat64(self):
self._testMoments(dtypes.float64)
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s" % e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
if dt == dtypes.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
# Gamma moments only defined for values less than the scale param.
max_moment = min(6, scale // 2)
sampler = self._Sampler(
20000, alpha, 1 / scale, dt, use_gpu=False, seed=12345)
z_scores = util.test_moment_matching(
sampler(),
max_moment,
stats.gamma(alpha, scale=scale),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
def _testZeroDensity(self, alpha):
"""Zero isn't in the support of the gamma distribution.
But quantized floating point math has its limits.
TODO(bjp): Implement log-gamma sampler for small-shape distributions.
Args:
alpha: float shape value to test
"""
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(
10000, alpha, 1.0, dt, use_gpu=use_gpu, seed=12345)
x = sampler()
allowable = allowable_zeros[dt] * x.size
allowable = allowable * 2 if allowable < 10 else allowable * 1.05
if np.sum(x <= 0) > allowable:
failures += [(use_gpu, dt)]
self.assertEqual([], failures)
def testNonZeroSmallShape(self):
self._testZeroDensity(0.01)
def testNonZeroSmallishShape(self):
self._testZeroDensity(0.35)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
count_limit = 20 if dt == dtypes.float16 else 10
if count >= count_limit:
print(use_gpu, dt)
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertLess(count, count_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
@test_util.run_deprecated_v1
def testShape(self):
# Fully known shape.
rnd = random_ops.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], array_ops.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([20, 30], array_ops.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
[123], array_ops.placeholder(
dtypes.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(1,)), array_ops.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(3,)), array_ops.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
rnd = random_ops.random_gamma([50], array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
@test_util.run_deprecated_v1
def testPositive(self):
n = int(10e3)
for dt in [dtypes.float16, dtypes.float32, dtypes.float64]:
with self.cached_session():
x = random_ops.random_gamma(shape=[n], alpha=0.001, dtype=dt, seed=0)
self.assertEqual(0, math_ops.reduce_sum(math_ops.cast(
math_ops.less_equal(x, 0.), dtype=dtypes.int64)).eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
LLNL/spack | var/spack/repos/builtin/packages/libdap4/package.py | 5 | 1379 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libdap4(AutotoolsPackage):
"""
libdap4 is is a c++ library to serve as a client for the OPeNDAP framework
that simplifies all aspects of scientific data networking and provides
software which makes local data accessible to remote locations regardless
of local storage format.
"""
homepage = "https://www.opendap.org/"
url = "https://github.com/OPENDAP/libdap4/archive/version-3.20.4.tar.gz"
maintainers = ['tjhei']
version('3.20.4', sha256='c39fa310985cc8963029ad0d0aba784e7dbf1f70c566bd7ae58242f1bb06d24a')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('bison', type='build')
depends_on('flex')
depends_on('curl')
depends_on('libxml2')
depends_on('libuuid')
def configure_args(self):
# libxml2 exports ./include/libxml2/ instead of ./include/, which we
# need, so grab this path manually:
libxml2_include = self.spec['libxml2'].prefix.include
args = ['CPPFLAGS=-I{0}'.format(libxml2_include)]
return args
| lgpl-2.1 |
caronc/newsreap | newsreap/Logging.py | 1 | 6395 | # -*- coding: utf-8 -*-
#
# Common Logging Parameters and Defaults
#
# Copyright (C) 2015-2017 Chris Caron <lead2gold@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# The first part of the file defines all of the namespacing
# used by this application
import sys
import logging
# We intentionally import this module so it preconfigures it's logging
# From there we can choose to manipulate it later without worrying about
# it's configuration over-riding ours; This creates a lint warning
# that we're importing a module we're not using; but this is intended.
# do not comment out or remove this entry
import sqlalchemy
# The default logger identifier used for general logging
NEWSREAP_LOGGER = 'newsreap'
# The default logger which displays backend engine and
# NNTP Server Interaction
NEWSREAP_ENGINE = '%s.engine' % NEWSREAP_LOGGER
# Codec Manipulation such as yEnc, uuencoded, etc
NEWSREAP_CODEC = '%s.codec' % NEWSREAP_LOGGER
# Users should utilize this for their hook logging
NEWSREAP_HOOKS = '%s.hooks' % NEWSREAP_LOGGER
# Command Line Interface Logger
NEWSREAP_CLI = '%s.cli' % NEWSREAP_LOGGER
# For a common reference point, we include the static logging
# Resource at the time for this information was:
# - http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#dbengine-logging
#
# namespaces used by SQLAlchemy
SQLALCHEMY_LOGGER = 'sqlalchemy'
# Defines the logger for the SQLAlchemy Engine
SQLALCHEMY_ENGINE = '%s.engine' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's connection pool logging.
SQLALCHEMY_POOL = '%s.pool' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's various Object Relational Mapping (ORM) logging.
SQLALCHEMY_ORM = '%s.orm' % SQLALCHEMY_LOGGER
# The number of bytes reached before automatically rotating the log file
# if this option was specified
# 5000000 bytes == 5 Megabytes
LOG_ROTATE_FILESIZE_BYTES = 5000000
def add_handler(logger, sendto=True, backupCount=5):
"""
Add handler to idenfied logger
sendto == None then logging is disabled
sendto == True then logging is put to stdout
sendto == False then logging is put to stderr
sendto == <string> then logging is routed to the filename specified
if sendto is a <string>, then backupCount defines the number of logs
to keep around. Set this to 0 or None if you don't wish the python
logger to backupCount the files ever. By default logs are rotated
once they reach 5MB
"""
if sendto is True:
# redirect to stdout
handler = logging.StreamHandler(sys.stdout)
elif sendto is False:
# redirect to stderr
handler = logging.StreamHandler(sys.stderr)
elif sendto is None:
# redirect to null
try:
handler = logging.NullHandler()
except AttributeError:
# Python <= v2.6
class NullHandler(logging.Handler):
def emit(self, record):
pass
handler = NullHandler()
# Set data to NOTSET just to eliminate the
# extra checks done internally
if logger.level != logging.NOTSET:
logger.setLevel(logging.NOTSET)
elif isinstance(sendto, basestring):
if backupCount is None:
handler = logging.FileHandler(filename=sendto)
elif isinstance(backupCount, int):
handler = logging.RotatingFileHandler(
filename=sendto,
maxBytes=LOG_ROTATE_FILESIZE_BYTES,
backupCount=backupCount,
)
else:
# We failed to add a handler
return False
# Setup Log Format
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'))
# Add Handler
logger.addHandler(handler)
return True
def init(verbose=2, sendto=True, backupCount=5):
"""
Set's up some simple default handling to make it
easier for those wrapping this library.
You do not need to call this function if you
don't wnat to; ideally one might want to set up
things their own way.
"""
# Add our handlers at the parent level
add_handler(
logging.getLogger(SQLALCHEMY_LOGGER),
sendto=True,
backupCount=backupCount,
)
add_handler(
logging.getLogger(NEWSREAP_LOGGER),
sendto=True,
backupCount=backupCount,
)
if verbose:
set_verbosity(verbose=verbose)
def set_verbosity(verbose):
"""
A simple function one can use to set the verbosity of
the app.
"""
# Default
logging.getLogger(SQLALCHEMY_LOGGER).setLevel(logging.ERROR)
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_LOGGER).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CLI).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.ERROR)
# Handle Verbosity
if verbose > 0:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.INFO)
if verbose > 1:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.DEBUG)
if verbose > 2:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.INFO)
if verbose > 3:
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.DEBUG)
if verbose > 4:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.DEBUG)
# set initial level to WARN.
rootlogger = logging.getLogger(NEWSREAP_LOGGER)
if rootlogger.level == logging.NOTSET:
set_verbosity(-1)
| gpl-3.0 |
benchisell/curate_flask_appengine | lib/wtforms/ext/appengine/fields.py | 51 | 7822 | from __future__ import unicode_literals
import decimal
import operator
import warnings
from wtforms import fields, widgets
from wtforms.compat import text_type, string_types
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
label_attr=None, get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if label_attr is not None:
warnings.warn('label_attr= will be removed in WTForms 1.1, use get_label= instead.', DeprecationWarning)
self.get_label = operator.attrgetter(label_attr)
elif get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.get_label(obj)
yield (key, label, (self.data.key() == obj.key()) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if str(self.data.key()) == str(obj.key()):
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class KeyPropertyField(fields.SelectFieldBase):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=None, allow_blank=False, blank_text='', **kwargs):
super(KeyPropertyField, self).__init__(label, validators, **kwargs)
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, basestring):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.query()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key.id()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key.id())
label = self.get_label(obj)
yield (key, label, (self.data.key == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if self.data is not None:
for obj in self.query:
if self.data.key == obj.key:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext('Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and text_type("\n".join(self.data)) or ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext('Not a valid list'))
class IntegerListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return text_type('\n'.join(self.data)) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = [int(value) for value in valuelist[0].splitlines()]
except ValueError:
raise ValueError(self.gettext('Not a valid integer list'))
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = '%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError('Not a valid coordinate location')
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/load_balancer_load_balancing_rules_operations.py | 1 | 7995 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerLoadBalancingRulesOperations(object):
"""LoadBalancerLoadBalancingRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancing rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancingRule
:rtype:
~azure.mgmt.network.v2017_08_01.models.LoadBalancingRulePaged[~azure.mgmt.network.v2017_08_01.models.LoadBalancingRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancingRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancingRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, load_balancer_name, load_balancing_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer load balancing rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param load_balancing_rule_name: The name of the load balancing rule.
:type load_balancing_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LoadBalancingRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_08_01.models.LoadBalancingRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'loadBalancingRuleName': self._serialize.url("load_balancing_rule_name", load_balancing_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancingRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit |
mou4e/zirconium | third_party/cython/src/Cython/Distutils/build_ext.py | 94 | 12955 | """Cython.Distutils.build_ext
Implements a version of the Distutils 'build_ext' command, for
building Cython extension modules."""
# This module should be kept compatible with Python 2.3.
__revision__ = "$Id:$"
import sys
import os
import re
from distutils.core import Command
from distutils.errors import DistutilsPlatformError
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer, newer_group
from distutils import log
from distutils.dir_util import mkpath
from distutils.command import build_ext as _build_ext
from distutils import sysconfig
extension_name_re = _build_ext.extension_name_re
show_compilers = _build_ext.show_compilers
class Optimization(object):
def __init__(self):
self.flags = (
'OPT',
'CFLAGS',
'CPPFLAGS',
'EXTRA_CFLAGS',
'BASECFLAGS',
'PY_CFLAGS',
)
self.state = sysconfig.get_config_vars(*self.flags)
self.config_vars = sysconfig.get_config_vars()
def disable_optimization(self):
"disable optimization for the C or C++ compiler"
badoptions = ('-O1', '-O2', '-O3')
for flag, option in zip(self.flags, self.state):
if option is not None:
L = [opt for opt in option.split() if opt not in badoptions]
self.config_vars[flag] = ' '.join(L)
def restore_state(self):
"restore the original state"
for flag, option in zip(self.flags, self.state):
if option is not None:
self.config_vars[flag] = option
optimization = Optimization()
class build_ext(_build_ext.build_ext):
description = "build C/C++ and Cython extensions (compile/link to build directory)"
sep_by = _build_ext.build_ext.sep_by
user_options = _build_ext.build_ext.user_options
boolean_options = _build_ext.build_ext.boolean_options
help_options = _build_ext.build_ext.help_options
# Add the pyrex specific data.
user_options.extend([
('cython-cplus', None,
"generate C++ source files"),
('cython-create-listing', None,
"write errors to a listing file"),
('cython-line-directives', None,
"emit source line directives"),
('cython-include-dirs=', None,
"path to the Cython include files" + sep_by),
('cython-c-in-temp', None,
"put generated C files in temp directory"),
('cython-gen-pxi', None,
"generate .pxi file for public declarations"),
('cython-directives=', None,
"compiler directive overrides"),
('cython-gdb', None,
"generate debug information for cygdb"),
('cython-compile-time-env', None,
"cython compile time environment"),
# For backwards compatibility.
('pyrex-cplus', None,
"generate C++ source files"),
('pyrex-create-listing', None,
"write errors to a listing file"),
('pyrex-line-directives', None,
"emit source line directives"),
('pyrex-include-dirs=', None,
"path to the Cython include files" + sep_by),
('pyrex-c-in-temp', None,
"put generated C files in temp directory"),
('pyrex-gen-pxi', None,
"generate .pxi file for public declarations"),
('pyrex-directives=', None,
"compiler directive overrides"),
('pyrex-gdb', None,
"generate debug information for cygdb"),
])
boolean_options.extend([
'cython-cplus', 'cython-create-listing', 'cython-line-directives',
'cython-c-in-temp', 'cython-gdb',
# For backwards compatibility.
'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives',
'pyrex-c-in-temp', 'pyrex-gdb',
])
def initialize_options(self):
_build_ext.build_ext.initialize_options(self)
self.cython_cplus = 0
self.cython_create_listing = 0
self.cython_line_directives = 0
self.cython_include_dirs = None
self.cython_directives = None
self.cython_c_in_temp = 0
self.cython_gen_pxi = 0
self.cython_gdb = False
self.no_c_in_traceback = 0
self.cython_compile_time_env = None
def __getattr__(self, name):
if name[:6] == 'pyrex_':
return getattr(self, 'cython_' + name[6:])
else:
return _build_ext.build_ext.__getattr__(self, name)
def __setattr__(self, name, value):
if name[:6] == 'pyrex_':
return setattr(self, 'cython_' + name[6:], value)
else:
# _build_ext.build_ext.__setattr__(self, name, value)
self.__dict__[name] = value
def finalize_options (self):
_build_ext.build_ext.finalize_options(self)
if self.cython_include_dirs is None:
self.cython_include_dirs = []
elif isinstance(self.cython_include_dirs, basestring):
self.cython_include_dirs = \
self.cython_include_dirs.split(os.pathsep)
if self.cython_directives is None:
self.cython_directives = {}
# finalize_options ()
def run(self):
# We have one shot at this before build_ext initializes the compiler.
# If --pyrex-gdb is in effect as a command line option or as option
# of any Extension module, disable optimization for the C or C++
# compiler.
if self.cython_gdb or [1 for ext in self.extensions
if getattr(ext, 'cython_gdb', False)]:
optimization.disable_optimization()
_build_ext.build_ext.run(self)
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
ext.sources = self.cython_sources(ext.sources, ext)
self.build_extension(ext)
def cython_sources(self, sources, extension):
"""
Walk the list of source files in 'sources', looking for Cython
source files (.pyx and .py). Run Cython on all that are
found, and return a modified 'sources' list with Cython source
files replaced by the generated C (or C++) files.
"""
try:
from Cython.Compiler.Main \
import CompilationOptions, \
default_options as cython_default_options, \
compile as cython_compile
from Cython.Compiler.Errors import PyrexError
except ImportError:
e = sys.exc_info()[1]
print("failed to import Cython: %s" % e)
raise DistutilsPlatformError("Cython does not appear to be installed")
new_sources = []
cython_sources = []
cython_targets = {}
# Setup create_list and cplus from the extension options if
# Cython.Distutils.extension.Extension is used, otherwise just
# use what was parsed from the command-line or the configuration file.
# cplus will also be set to true is extension.language is equal to
# 'C++' or 'c++'.
#try:
# create_listing = self.cython_create_listing or \
# extension.cython_create_listing
# cplus = self.cython_cplus or \
# extension.cython_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
#except AttributeError:
# create_listing = self.cython_create_listing
# cplus = self.cython_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
create_listing = self.cython_create_listing or \
getattr(extension, 'cython_create_listing', 0)
line_directives = self.cython_line_directives or \
getattr(extension, 'cython_line_directives', 0)
no_c_in_traceback = self.no_c_in_traceback or \
getattr(extension, 'no_c_in_traceback', 0)
cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \
(extension.language and extension.language.lower() == 'c++')
cython_gen_pxi = self.cython_gen_pxi or getattr(extension, 'cython_gen_pxi', 0)
cython_gdb = self.cython_gdb or getattr(extension, 'cython_gdb', False)
cython_compile_time_env = self.cython_compile_time_env or \
getattr(extension, 'cython_compile_time_env', None)
# Set up the include_path for the Cython compiler:
# 1. Start with the command line option.
# 2. Add in any (unique) paths from the extension
# cython_include_dirs (if Cython.Distutils.extension is used).
# 3. Add in any (unique) paths from the extension include_dirs
includes = self.cython_include_dirs
try:
for i in extension.cython_include_dirs:
if not i in includes:
includes.append(i)
except AttributeError:
pass
for i in extension.include_dirs:
if not i in includes:
includes.append(i)
# Set up Cython compiler directives:
# 1. Start with the command line option.
# 2. Add in any (unique) entries from the extension
# cython_directives (if Cython.Distutils.extension is used).
directives = self.cython_directives
if hasattr(extension, "cython_directives"):
directives.update(extension.cython_directives)
# Set the target_ext to '.c'. Cython will change this to '.cpp' if
# needed.
if cplus:
target_ext = '.cpp'
else:
target_ext = '.c'
# Decide whether to drop the generated C files into the temp dir
# or the source tree.
if not self.inplace and (self.cython_c_in_temp
or getattr(extension, 'cython_c_in_temp', 0)):
target_dir = os.path.join(self.build_temp, "pyrex")
for package_name in extension.name.split('.')[:-1]:
target_dir = os.path.join(target_dir, package_name)
else:
target_dir = None
newest_dependency = None
for source in sources:
(base, ext) = os.path.splitext(os.path.basename(source))
if ext == ".py":
# FIXME: we might want to special case this some more
ext = '.pyx'
if ext == ".pyx": # Cython source file
output_dir = target_dir or os.path.dirname(source)
new_sources.append(os.path.join(output_dir, base + target_ext))
cython_sources.append(source)
cython_targets[source] = new_sources[-1]
elif ext == '.pxi' or ext == '.pxd':
if newest_dependency is None \
or newer(source, newest_dependency):
newest_dependency = source
else:
new_sources.append(source)
if not cython_sources:
return new_sources
module_name = extension.name
for source in cython_sources:
target = cython_targets[source]
depends = [source] + list(extension.depends or ())
if(source[-4:].lower()==".pyx" and os.path.isfile(source[:-3]+"pxd")):
depends += [source[:-3]+"pxd"]
rebuild = self.force or newer_group(depends, target, 'newer')
if not rebuild and newest_dependency is not None:
rebuild = newer(newest_dependency, target)
if rebuild:
log.info("cythoning %s to %s", source, target)
self.mkpath(os.path.dirname(target))
if self.inplace:
output_dir = os.curdir
else:
output_dir = self.build_lib
options = CompilationOptions(cython_default_options,
use_listing_file = create_listing,
include_path = includes,
compiler_directives = directives,
output_file = target,
cplus = cplus,
emit_linenums = line_directives,
c_line_in_traceback = not no_c_in_traceback,
generate_pxi = cython_gen_pxi,
output_dir = output_dir,
gdb_debug = cython_gdb,
compile_time_env = cython_compile_time_env)
result = cython_compile(source, options=options,
full_module_name=module_name)
else:
log.info("skipping '%s' Cython extension (up-to-date)", target)
return new_sources
# cython_sources ()
# class build_ext
| bsd-3-clause |
RedhawkSDR/integration-gnuhawk | components/fll_band_edge_cc_4o/tests/test_fll_band_edge_cc_4o.py | 1 | 4545 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in fll_band_edge_cc_4o"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
self.assertEqual(self.spd.get_id(), self.comp.ref._get_identifier())
#######################################################################
# Simulate regular component startup
# Verify that initialize nor configure throw errors
self.comp.initialize()
configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False)
self.comp.configure(configureProps)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../fll_band_edge_cc_4o.spd.xml") # By default tests all implementations
| gpl-3.0 |
drakuna/odoo | addons/website_sale/models/product.py | 3 | 9392 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp
from openerp import tools
from openerp.osv import osv, fields
class product_style(osv.Model):
_name = "product.style"
_columns = {
'name' : fields.char('Style Name', required=True),
'html_class': fields.char('HTML Classes'),
}
class product_pricelist(osv.Model):
_inherit = "product.pricelist"
_columns = {
'code': fields.char('E-commerce Promotional Code'),
}
class product_public_category(osv.osv):
_name = "product.public.category"
_inherit = ["website.seo.metadata"]
_description = "Website Product Category"
_order = "sequence, name"
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
res = []
for cat in self.browse(cr, uid, ids, context=context):
names = [cat.name]
pcat = cat.parent_id
while pcat:
names.append(pcat.name)
pcat = pcat.parent_id
res.append((cat.id, ' / '.join(reversed(names))))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.public.category','Parent Category', select=True),
'child_id': fields.one2many('product.public.category', 'parent_id', string='Children Categories'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."),
}
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = openerp.fields.Binary("Image", attachment=True,
help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = openerp.fields.Binary("Medium-sized image",
compute='_compute_images', inverse='_inverse_image_medium', store=True, attachment=True,
help="Medium-sized image of the category. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views.")
image_small = openerp.fields.Binary("Small-sized image",
compute='_compute_images', inverse='_inverse_image_small', store=True, attachment=True,
help="Small-sized image of the category. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required.")
@openerp.api.depends('image')
def _compute_images(self):
for rec in self:
rec.image_medium = tools.image_resize_image_medium(rec.image)
rec.image_small = tools.image_resize_image_small(rec.image)
def _inverse_image_medium(self):
for rec in self:
rec.image = tools.image_resize_image_big(rec.image_medium)
def _inverse_image_small(self):
for rec in self:
rec.image = tools.image_resize_image_big(rec.image_small)
class product_template(osv.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.mixin', 'rating.mixin']
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = super(product_template, self)._website_url(cr, uid, ids, field_name, arg, context=context)
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.id,)
return res
_columns = {
# TODO FIXME tde: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('message_type', '=', 'comment')
],
string='Website Comments',
),
'website_description': fields.html('Description for the website', sanitize=False, translate=True),
'alternative_product_ids': fields.many2many('product.template','product_alternative_rel','src_id','dest_id', string='Suggested Products', help='Appear on the product page'),
'accessory_product_ids': fields.many2many('product.product','product_accessory_rel','src_id','dest_id', string='Accessory Products', help='Appear on the shopping cart'),
'website_size_x': fields.integer('Size X'),
'website_size_y': fields.integer('Size Y'),
'website_style_ids': fields.many2many('product.style', string='Styles'),
'website_sequence': fields.integer('Sequence', help="Determine the display order in the Website E-commerce"),
'public_categ_ids': fields.many2many('product.public.category', string='Website Product Category', help="Those categories are used to group similar products for e-commerce."),
}
def _defaults_website_sequence(self, cr, uid, *l, **kwargs):
cr.execute('SELECT MIN(website_sequence)-1 FROM product_template')
next_sequence = cr.fetchone()[0] or 10
return next_sequence
_defaults = {
'website_size_x': 1,
'website_size_y': 1,
'website_sequence': _defaults_website_sequence,
}
def set_sequence_top(self, cr, uid, ids, context=None):
cr.execute('SELECT MAX(website_sequence) FROM product_template')
max_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': max_sequence + 1}, context=context)
def set_sequence_bottom(self, cr, uid, ids, context=None):
cr.execute('SELECT MIN(website_sequence) FROM product_template')
min_sequence = cr.fetchone()[0] or 0
return self.write(cr, uid, ids, {'website_sequence': min_sequence -1}, context=context)
def set_sequence_up(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence > %s AND website_published = %s ORDER BY website_sequence ASC LIMIT 1""" % (product.website_sequence, product.website_published))
prev = cr.fetchone()
if prev:
self.write(cr, uid, [prev[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': prev[1]}, context=context)
else:
return self.set_sequence_top(cr, uid, ids, context=context)
def set_sequence_down(self, cr, uid, ids, context=None):
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence < %s AND website_published = %s ORDER BY website_sequence DESC LIMIT 1""" % (product.website_sequence, product.website_published))
next = cr.fetchone()
if next:
self.write(cr, uid, [next[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': next[1]}, context=context)
else:
return self.set_sequence_bottom(cr, uid, ids, context=context)
class product_product(osv.Model):
_inherit = "product.product"
# Wrappers for call_kw with inherits
def open_website_url(self, cr, uid, ids, context=None):
template_id = self.browse(cr, uid, ids, context=context).product_tmpl_id.id
return self.pool['product.template'].open_website_url(cr, uid, [template_id], context=context)
def website_publish_button(self, cr, uid, ids, context=None):
template_id = self.browse(cr, uid, ids, context=context).product_tmpl_id.id
return self.pool['product.template'].website_publish_button(cr, uid, [template_id], context=context)
def website_publish_button(self, cr, uid, ids, context=None):
template_id = self.browse(cr, uid, ids, context=context).product_tmpl_id.id
return self.pool['product.template'].website_publish_button(cr, uid, [template_id], context=context)
class product_attribute(osv.Model):
_inherit = "product.attribute"
_columns = {
'type': fields.selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color'), ('hidden', 'Hidden')], string="Type"),
}
_defaults = {
'type': lambda *a: 'radio',
}
class product_attribute_value(osv.Model):
_inherit = "product.attribute.value"
_columns = {
'color': fields.char("HTML Color Index", help="Here you can set a specific HTML color index (e.g. #ff0000) to display the color on the website if the attibute type is 'Color'."),
}
| gpl-3.0 |
uranusjr/django | tests/template_tests/syntax_tests/test_width_ratio.py | 56 | 5956 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class WidthRatioTagTests(SimpleTestCase):
libraries = {'custom': 'template_tests.templatetags.custom'}
@setup({'widthratio01': '{% widthratio a b 0 %}'})
def test_widthratio01(self):
output = self.engine.render_to_string('widthratio01', {'a': 50, 'b': 100})
self.assertEqual(output, '0')
@setup({'widthratio02': '{% widthratio a b 100 %}'})
def test_widthratio02(self):
output = self.engine.render_to_string('widthratio02', {'a': 0, 'b': 0})
self.assertEqual(output, '0')
@setup({'widthratio03': '{% widthratio a b 100 %}'})
def test_widthratio03(self):
output = self.engine.render_to_string('widthratio03', {'a': 0, 'b': 100})
self.assertEqual(output, '0')
@setup({'widthratio04': '{% widthratio a b 100 %}'})
def test_widthratio04(self):
output = self.engine.render_to_string('widthratio04', {'a': 50, 'b': 100})
self.assertEqual(output, '50')
@setup({'widthratio05': '{% widthratio a b 100 %}'})
def test_widthratio05(self):
output = self.engine.render_to_string('widthratio05', {'a': 100, 'b': 100})
self.assertEqual(output, '100')
@setup({'widthratio06': '{% widthratio a b 100 %}'})
def test_widthratio06(self):
"""
62.5 should round to 62
"""
output = self.engine.render_to_string('widthratio06', {'a': 50, 'b': 80})
self.assertEqual(output, '62')
@setup({'widthratio07': '{% widthratio a b 100 %}'})
def test_widthratio07(self):
"""
71.4 should round to 71
"""
output = self.engine.render_to_string('widthratio07', {'a': 50, 'b': 70})
self.assertEqual(output, '71')
# Raise exception if we don't have 3 args, last one an integer
@setup({'widthratio08': '{% widthratio %}'})
def test_widthratio08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio08')
@setup({'widthratio09': '{% widthratio a b %}'})
def test_widthratio09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio09', {'a': 50, 'b': 100})
@setup({'widthratio10': '{% widthratio a b 100.0 %}'})
def test_widthratio10(self):
output = self.engine.render_to_string('widthratio10', {'a': 50, 'b': 100})
self.assertEqual(output, '50')
@setup({'widthratio11': '{% widthratio a b c %}'})
def test_widthratio11(self):
"""
#10043: widthratio should allow max_width to be a variable
"""
output = self.engine.render_to_string('widthratio11', {'a': 50, 'c': 100, 'b': 100})
self.assertEqual(output, '50')
# #18739: widthratio should handle None args consistently with
# non-numerics
@setup({'widthratio12a': '{% widthratio a b c %}'})
def test_widthratio12a(self):
output = self.engine.render_to_string('widthratio12a', {'a': 'a', 'c': 100, 'b': 100})
self.assertEqual(output, '')
@setup({'widthratio12b': '{% widthratio a b c %}'})
def test_widthratio12b(self):
output = self.engine.render_to_string('widthratio12b', {'a': None, 'c': 100, 'b': 100})
self.assertEqual(output, '')
@setup({'widthratio13a': '{% widthratio a b c %}'})
def test_widthratio13a(self):
output = self.engine.render_to_string('widthratio13a', {'a': 0, 'c': 100, 'b': 'b'})
self.assertEqual(output, '')
@setup({'widthratio13b': '{% widthratio a b c %}'})
def test_widthratio13b(self):
output = self.engine.render_to_string('widthratio13b', {'a': 0, 'c': 100, 'b': None})
self.assertEqual(output, '')
@setup({'widthratio14a': '{% widthratio a b c %}'})
def test_widthratio14a(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio14a', {'a': 0, 'c': 'c', 'b': 100})
@setup({'widthratio14b': '{% widthratio a b c %}'})
def test_widthratio14b(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio14b', {'a': 0, 'c': None, 'b': 100})
@setup({'widthratio15': '{% load custom %}{% widthratio a|noop:"x y" b 0 %}'})
def test_widthratio15(self):
"""
Test whitespace in filter argument
"""
output = self.engine.render_to_string('widthratio15', {'a': 50, 'b': 100})
self.assertEqual(output, '0')
# Widthratio with variable assignment
@setup({'widthratio16': '{% widthratio a b 100 as variable %}-{{ variable }}-'})
def test_widthratio16(self):
output = self.engine.render_to_string('widthratio16', {'a': 50, 'b': 100})
self.assertEqual(output, '-50-')
@setup({'widthratio17': '{% widthratio a b 100 as variable %}-{{ variable }}-'})
def test_widthratio17(self):
output = self.engine.render_to_string('widthratio17', {'a': 100, 'b': 100})
self.assertEqual(output, '-100-')
@setup({'widthratio18': '{% widthratio a b 100 as %}'})
def test_widthratio18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio18')
@setup({'widthratio19': '{% widthratio a b 100 not_as variable %}'})
def test_widthratio19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio19')
@setup({'widthratio20': '{% widthratio a b 100 %}'})
def test_widthratio20(self):
output = self.engine.render_to_string('widthratio20', {'a': float('inf'), 'b': float('inf')})
self.assertEqual(output, '')
@setup({'widthratio21': '{% widthratio a b 100 %}'})
def test_widthratio21(self):
output = self.engine.render_to_string('widthratio21', {'a': float('inf'), 'b': 2})
self.assertEqual(output, '')
| bsd-3-clause |
Mirantis/octane | octane/commands/sync_images.py | 1 | 2791 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers.sync_glance_images import sync_glance_images
from octane.util import db
from octane.util import env as env_util
from octane.util import ssh
def prepare(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
controller = env_util.get_one_controller(seed_env)
with tempfile.NamedTemporaryFile() as temp:
db.mysqldump_from_env(orig_env, ['keystone'], temp.name)
db.mysqldump_restore_to_env(seed_env, temp.name)
ssh.call(['keystone-manage', 'db_sync'],
node=controller, parse_levels=True)
for controller in env_util.get_controllers(seed_env):
ssh.call(['service', 'memcached', 'restart'], node=controller)
class SyncImagesCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
parser.add_argument(
'swift_ep', type=str,
help="Endpoint's name where swift-proxy service is listening on")
return parser
def take_action(self, parsed_args):
sync_glance_images(parsed_args.orig_id, parsed_args.seed_id,
parsed_args.swift_ep)
class SyncImagesPrepareCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesPrepareCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
return parser
def take_action(self, parsed_args):
prepare(parsed_args.orig_id, parsed_args.seed_id)
| apache-2.0 |
Wolnosciowiec/file-repository | client/bahub/bahubapp/handler/__init__.py | 1 | 4733 | from ..entity.definition import BackupDefinition
from ..service.client import FileRepositoryClient
from ..service.pipefactory import PipeFactory
from ..exceptions import ReadWriteException
from ..result import CommandExecutionResult
from logging import Logger
import string
import random
import subprocess
from shutil import copyfileobj
class BackupHandler:
""" Manages the process of backup and restore, interacts with different sources of backup data using adapters """
_client = None # type: FileRepositoryClient
_pipe_factory = None # type: PipeFactory
_logger = None # type: Logger
_definition = None
def __init__(self,
_client: FileRepositoryClient,
_pipe_factory: PipeFactory,
_logger: Logger,
_definition: BackupDefinition):
self._client = _client
self._pipe_factory = _pipe_factory
self._logger = _logger
self._definition = _definition
def perform_backup(self):
self._validate()
self._validate_running_command()
response = self._read()
if response.return_code != 0 and response.return_code is not None:
raise ReadWriteException('Backup source read error, use --debug and retry to investigate')
upload_response = self._client.send(response.stdout, self._get_definition())
response.process.wait(15)
response.stdout.close()
return upload_response
def perform_restore(self, version: str):
response = self._write(
self._read_from_storage(version)
)
response.process.wait()
self._logger.info('Waiting for process to finish')
if response.return_code is not None and response.return_code > 0:
raise ReadWriteException('Cannot write files to disk while restoring from backup. Errors: '
+ str(response.stderr.read().decode('utf-8')))
self._logger.info('No errors found, sending success information')
return '{"status": "OK"}'
def close(self):
self._logger.info('Finishing the process')
self._close()
def _get_definition(self) -> BackupDefinition:
return self._definition
def _execute_command(self, command: str, stdin=None) -> CommandExecutionResult:
"""
Executes a command on local machine, returning stdout as a stream, and streaming in the stdin (optionally)
"""
self._logger.debug('shell(' + command + ')')
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin else None,
executable='/bin/bash',
shell=True)
if stdin:
self._logger.info('Copying stdin to process')
try:
copyfileobj(stdin, process.stdin)
except BrokenPipeError:
raise ReadWriteException(
'Cannot write to process, broken pipe occurred, probably a tar process died. '
+ str(process.stdin.read()) + str(process.stderr.read())
)
process.stdin.close()
return CommandExecutionResult(process.stdout, process.stderr, process.returncode, process)
def _validate_running_command(self):
""" Validate if the command really exports the data, does not end up with an error """
response = self._read()
response.stdout.read(1024)
response.process.kill()
response.process.wait(15)
if response.process.returncode > 0:
raise ReadWriteException(
'The process exited with incorrect code, try to verify the command in with --debug switch'
)
def _validate(self):
raise Exception('_validate() not implemented for handler')
def _read(self) -> CommandExecutionResult:
""" TAR output or file stream buffered from ANY source for example """
raise Exception('_read() not implemented for handler')
def _write(self, stream) -> CommandExecutionResult:
""" A file stream or tar output be written into the storage. May be OpenSSL encoded, depends on definition """
raise Exception('_write() not implemented for handler')
def _read_from_storage(self, version: str):
return self._client.fetch(version, self._get_definition())
def _close(self):
pass
@staticmethod
def generate_id(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
| lgpl-3.0 |
arafsheikh/coala | coalib/misc/Shell.py | 10 | 4385 | from contextlib import contextmanager
from subprocess import PIPE, Popen
from coalib.parsing.StringProcessing import escape
@contextmanager
def run_interactive_shell_command(command, **kwargs):
"""
Runs a command in shell and provides stdout, stderr and stdin streams.
This function creates a context manager that sets up the process, returns
to caller, closes streams and waits for process to exit on leaving.
The process is opened in `universal_newlines` mode.
:param command: The command to run on shell.
:param kwargs: Additional keyword arguments to pass to `subprocess.Popen`
that is used to spawn the process (except `shell`,
`stdout`, `stderr`, `stdin` and `universal_newlines`, a
`TypeError` is raised then).
:return: A context manager yielding the process started from the
command.
"""
process = Popen(command,
shell=True,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
universal_newlines=True,
**kwargs)
try:
yield process
finally:
process.stdout.close()
process.stderr.close()
process.stdin.close()
process.wait()
def run_shell_command(command, stdin=None, **kwargs):
"""
Runs a command in shell and returns the read stdout and stderr data.
This function waits for the process to exit.
:param command: The command to run on shell.
:param stdin: Initial input to send to the process.
:param kwargs: Additional keyword arguments to pass to `subprocess.Popen`
that is used to spawn the process (except `shell`,
`stdout`, `stderr`, `stdin` and `universal_newlines`, a
`TypeError` is raised then).
:return: A tuple with `(stdoutstring, stderrstring)`.
"""
with run_interactive_shell_command(command, **kwargs) as p:
ret = p.communicate(stdin)
return ret
def get_shell_type(): # pragma: no cover
"""
Finds the current shell type based on the outputs of common pre-defined
variables in them. This is useful to identify which sort of escaping
is required for strings.
:return: The shell type. This can be either "powershell" if Windows
Powershell is detected, "cmd" if command prompt is been
detected or "sh" if it's neither of these.
"""
out_hostname, _ = run_shell_command(["echo", "$host.name"])
if out_hostname.strip() == "ConsoleHost":
return "powershell"
out_0, _ = run_shell_command(["echo", "$0"])
if out_0.strip() == "" and out_0.strip() == "":
return "cmd"
return "sh"
def prepare_string_argument(string, shell=get_shell_type()):
"""
Prepares a string argument for being passed as a parameter on shell.
On `sh` this function effectively encloses the given string
with quotes (either '' or "", depending on content).
:param string: The string to prepare for shell.
:param shell: The shell platform to prepare string argument for.
If it is not "sh" it will be ignored and return the
given string without modification.
:return: The shell-prepared string.
"""
if shell == "sh":
return '"' + escape(string, '"') + '"'
else:
return string
def escape_path_argument(path, shell=get_shell_type()):
"""
Makes a raw path ready for using as parameter in a shell command (escapes
illegal characters, surrounds with quotes etc.).
:param path: The path to make ready for shell.
:param shell: The shell platform to escape the path argument for. Possible
values are "sh", "powershell", and "cmd" (others will be
ignored and return the given path without modification).
:return: The escaped path argument.
"""
if shell == "cmd":
# If a quote (") occurs in path (which is illegal for NTFS file
# systems, but maybe for others), escape it by preceding it with
# a caret (^).
return '"' + escape(path, '"', '^') + '"'
elif shell == "sh":
return escape(path, " ")
else:
# Any other non-supported system doesn't get a path escape.
return path
| agpl-3.0 |
phaseout/ircbot-collection | botcommon.py | 6 | 1153 | """\
Common bits and pieces used by the various bots.
"""
import sys
import os
import time
from threading import Thread, Event
class OutputManager(Thread):
def __init__(self, connection, delay=.5):
Thread.__init__(self)
self.setDaemon(1)
self.connection = connection
self.delay = delay
self.event = Event()
self.queue = []
def run(self):
while 1:
self.event.wait()
while self.queue:
msg,target = self.queue.pop(0)
self.connection.privmsg(target, msg)
time.sleep(self.delay)
self.event.clear()
def send(self, msg, target):
self.queue.append((msg.strip(),target))
self.event.set()
def trivial_bot_main(klass):
if len(sys.argv) != 4:
botname = os.path.basename(sys.argv[0])
print "Usage: %s <server[:port]> <channel> <nickname>" % botname
sys.exit(1)
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print "Error: Erroneous port."
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
klass(channel, nickname, server, port).start()
| gpl-2.0 |
alexm92/sentry | src/sentry/south_migrations/0008_auto__chg_field_message_view__add_field_groupedmessage_data__chg_field.py | 36 | 5375 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Message.view'
db.alter_column('sentry_message', 'view', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Adding field 'GroupedMessage.data'
db.add_column('sentry_groupedmessage', 'data', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Changing field 'GroupedMessage.view'
db.alter_column('sentry_groupedmessage', 'view', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'FilterValue.value'
db.alter_column('sentry_filtervalue', 'value', self.gf('django.db.models.fields.CharField')(max_length=200))
def backwards(self, orm):
# Changing field 'Message.view'
db.alter_column('sentry_message', 'view', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Deleting field 'GroupedMessage.data'
db.delete_column('sentry_groupedmessage', 'data')
# Changing field 'GroupedMessage.view'
db.alter_column('sentry_groupedmessage', 'view', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'FilterValue.value'
db.alter_column('sentry_filtervalue', 'value', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'sentry.filtervalue': {
'Meta': {'unique_together': "(('key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
bluedazzle/flask | tests/test_config.py | 139 | 5543 | # -*- coding: utf-8 -*-
"""
tests.test_config
~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Flask Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import flask
# config keys used for the TestConfig
TEST_KEY = 'foo'
SECRET_KEY = 'devkey'
def common_object_test(app):
assert app.secret_key == 'devkey'
assert app.config['TEST_KEY'] == 'foo'
assert 'TestConfig' not in app.config
def test_config_from_file():
app = flask.Flask(__name__)
app.config.from_pyfile(__file__.rsplit('.', 1)[0] + '.py')
common_object_test(app)
def test_config_from_object():
app = flask.Flask(__name__)
app.config.from_object(__name__)
common_object_test(app)
def test_config_from_json():
app = flask.Flask(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
app.config.from_json(os.path.join(current_dir, 'static', 'config.json'))
common_object_test(app)
def test_config_from_mapping():
app = flask.Flask(__name__)
app.config.from_mapping({
'SECRET_KEY': 'devkey',
'TEST_KEY': 'foo'
})
common_object_test(app)
app = flask.Flask(__name__)
app.config.from_mapping([
('SECRET_KEY', 'devkey'),
('TEST_KEY', 'foo')
])
common_object_test(app)
app = flask.Flask(__name__)
app.config.from_mapping(
SECRET_KEY='devkey',
TEST_KEY='foo'
)
common_object_test(app)
app = flask.Flask(__name__)
with pytest.raises(TypeError):
app.config.from_mapping(
{}, {}
)
def test_config_from_class():
class Base(object):
TEST_KEY = 'foo'
class Test(Base):
SECRET_KEY = 'devkey'
app = flask.Flask(__name__)
app.config.from_object(Test)
common_object_test(app)
def test_config_from_envvar():
env = os.environ
try:
os.environ = {}
app = flask.Flask(__name__)
try:
app.config.from_envvar('FOO_SETTINGS')
except RuntimeError as e:
assert "'FOO_SETTINGS' is not set" in str(e)
else:
assert 0, 'expected exception'
assert not app.config.from_envvar('FOO_SETTINGS', silent=True)
os.environ = {'FOO_SETTINGS': __file__.rsplit('.', 1)[0] + '.py'}
assert app.config.from_envvar('FOO_SETTINGS')
common_object_test(app)
finally:
os.environ = env
def test_config_from_envvar_missing():
env = os.environ
try:
os.environ = {'FOO_SETTINGS': 'missing.cfg'}
try:
app = flask.Flask(__name__)
app.config.from_envvar('FOO_SETTINGS')
except IOError as e:
msg = str(e)
assert msg.startswith('[Errno 2] Unable to load configuration '
'file (No such file or directory):')
assert msg.endswith("missing.cfg'")
else:
assert False, 'expected IOError'
assert not app.config.from_envvar('FOO_SETTINGS', silent=True)
finally:
os.environ = env
def test_config_missing():
app = flask.Flask(__name__)
try:
app.config.from_pyfile('missing.cfg')
except IOError as e:
msg = str(e)
assert msg.startswith('[Errno 2] Unable to load configuration '
'file (No such file or directory):')
assert msg.endswith("missing.cfg'")
else:
assert 0, 'expected config'
assert not app.config.from_pyfile('missing.cfg', silent=True)
def test_config_missing_json():
app = flask.Flask(__name__)
try:
app.config.from_json('missing.json')
except IOError as e:
msg = str(e)
assert msg.startswith('[Errno 2] Unable to load configuration '
'file (No such file or directory):')
assert msg.endswith("missing.json'")
else:
assert 0, 'expected config'
assert not app.config.from_json('missing.json', silent=True)
def test_custom_config_class():
class Config(flask.Config):
pass
class Flask(flask.Flask):
config_class = Config
app = Flask(__name__)
assert isinstance(app.config, Config)
app.config.from_object(__name__)
common_object_test(app)
def test_session_lifetime():
app = flask.Flask(__name__)
app.config['PERMANENT_SESSION_LIFETIME'] = 42
assert app.permanent_session_lifetime.seconds == 42
def test_get_namespace():
app = flask.Flask(__name__)
app.config['FOO_OPTION_1'] = 'foo option 1'
app.config['FOO_OPTION_2'] = 'foo option 2'
app.config['BAR_STUFF_1'] = 'bar stuff 1'
app.config['BAR_STUFF_2'] = 'bar stuff 2'
foo_options = app.config.get_namespace('FOO_')
assert 2 == len(foo_options)
assert 'foo option 1' == foo_options['option_1']
assert 'foo option 2' == foo_options['option_2']
bar_options = app.config.get_namespace('BAR_', lowercase=False)
assert 2 == len(bar_options)
assert 'bar stuff 1' == bar_options['STUFF_1']
assert 'bar stuff 2' == bar_options['STUFF_2']
foo_options = app.config.get_namespace('FOO_', trim_namespace=False)
assert 2 == len(foo_options)
assert 'foo option 1' == foo_options['foo_option_1']
assert 'foo option 2' == foo_options['foo_option_2']
bar_options = app.config.get_namespace('BAR_', lowercase=False, trim_namespace=False)
assert 2 == len(bar_options)
assert 'bar stuff 1' == bar_options['BAR_STUFF_1']
assert 'bar stuff 2' == bar_options['BAR_STUFF_2']
| bsd-3-clause |
Carmezim/tensorflow | tensorflow/contrib/keras/python/keras/preprocessing/image.py | 25 | 39384 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import threading
import warnings
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.keras.python.keras import backend as K
# pylint: disable=g-import-not-at-top
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
try:
from scipy import linalg
import scipy.ndimage as ndi
except ImportError:
linalg = None
ndi = None
# pylint: enable=g-import-not-at-top
def random_rotation(x,
rg,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random rotation of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x,
wrg,
hrg,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x,
intensity,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x,
zoom_range,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Zoomed Numpy image tensor.
Raises:
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x,
max_x) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Apply the image transformation specified by a matrix.
Arguments:
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [
ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
Arguments:
x: Input Numpy array.
data_format: Image data format.
scale: Whether to rescale image values
to be within [0, 255].
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0) # pylint: disable=g-no-augmented-assignment
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
Arguments:
img: PIL Image instance.
data_format: Image data format.
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None):
"""Loads an image into PIL format.
Arguments:
path: Path to image file
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size:
wh_tuple = (target_size[1], target_size[0])
if img.size != wh_tuple:
img = img.resize(wh_tuple)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [
os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)
]
class ImageDataGenerator(object):
"""Generate minibatches of image data with real-time data augmentation.
Arguments:
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: 'channels_first' or 'channels_last'. In 'channels_first'
mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode it is at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'data_format should be "channels_last" (channel after row and '
'column) or "channels_first" (channel before row and column). '
'Received arg: ', data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg'):
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg',
follow_links=False):
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
Arguments:
x: batch of inputs to be normalized.
Returns:
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_axis = self.channel_axis - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_axis, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-7)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x):
"""Randomly augment a single image tensor.
Arguments:
x: 3D tensor, single image.
Returns:
A randomly transformed version of the input (same shape).
Raises:
ImportError: if Scipy is not available.
"""
if ndi is None:
raise ImportError('Scipy is required for image transformations.')
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range,
self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(
transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(
transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(
transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(
x,
transform_matrix,
img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x, self.channel_shift_range, img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
return x
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
Arguments:
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
Raises:
ValueError: in case of invalid input `x`.
ImportError: if Scipy is not available.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
raise ValueError(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) + ' (' +
str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
if linalg is None:
raise ImportError('Scipy is required for zca_whitening.')
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(
np.dot(u, np.diag(1. / np.sqrt(s + 10e-7))), u.T)
class Iterator(object):
"""Abstract base class for image data iterators.
Arguments:
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index:current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self): # pylint: disable=non-iterator-returned
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
Arguments:
x: Numpy array of input data.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg'):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
raise ValueError(
'NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle,
seed)
def next(self):
"""For python 2.x.
Returns:
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(
tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
Arguments:
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg',
follow_links=False):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
# first, count the number of samples and classes
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
def _recursive_list(subpath):
return sorted(
os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
for subdir in classes:
subpath = os.path.join(directory, subdir)
for root, _, files in _recursive_list(subpath):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.samples += 1
print('Found %d images belonging to %d classes.' % (self.samples,
self.num_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for root, _, files in _recursive_list(subpath):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
i += 1
# add filename relative to directory
absolute_path = os.path.join(root, fname)
self.filenames.append(os.path.relpath(absolute_path, directory))
super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle,
seed)
def next(self):
"""For python 2.x.
Returns:
The next batch.
"""
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(
(current_batch_size,) + self.image_shape, dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(
os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
| apache-2.0 |
thomas1206/azure-linux-extensions | DSC/azure/__init__.py | 46 | 33598 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import base64
import hashlib
import hmac
import sys
import types
import warnings
import inspect
if sys.version_info < (3,):
from urllib2 import quote as url_quote
from urllib2 import unquote as url_unquote
_strtype = basestring
else:
from urllib.parse import quote as url_quote
from urllib.parse import unquote as url_unquote
_strtype = str
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
#--------------------------------------------------------------------------
# constants
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '0.8.4'
# Live ServiceClient URLs
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
MANAGEMENT_HOST = 'management.core.windows.net'
# Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
DEV_TABLE_HOST = '127.0.0.1:10002'
# Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# All of our error messages
_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
_ERROR_INCORRECT_TABLE_IN_BATCH = \
'Table should be the same in a batch operations'
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \
'Partition Key should be the same in a batch operations'
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \
'Row Keys should not be the same in a batch operations'
_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \
'Message is not peek locked and cannot be deleted.'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \
'Message is not peek locked and cannot be unlocked.'
_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'
_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_SERVICEBUS_MISSING_INFO = \
'You need to provide servicebus namespace, access key and Issuer'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide both account name and access key'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_WARNING_VALUE_SHOULD_BE_BYTES = \
'Warning: {0} must be bytes data type. It will be converted ' + \
'automatically, with utf-8 text encoding.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \
'Cannot serialize the specified value ({0}) to an entity. Please use ' + \
'an EntityProperty (which can specify custom types), int, str, bool, ' + \
'or datetime.'
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
'Invalid page blob size: {0}. ' + \
'The size must be aligned to a 512-byte boundary.'
_USER_AGENT_STRING = 'pyazure/' + __version__
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
class WindowsAzureData(object):
''' This is the base of data class.
It is only used to check whether it is instance or not. '''
pass
class WindowsAzureError(Exception):
''' WindowsAzure Excpetion base class. '''
def __init__(self, message):
super(WindowsAzureError, self).__init__(message)
class WindowsAzureConflictError(WindowsAzureError):
'''Indicates that the resource could not be created because it already
exists'''
def __init__(self, message):
super(WindowsAzureConflictError, self).__init__(message)
class WindowsAzureMissingResourceError(WindowsAzureError):
'''Indicates that a request for a request for a resource (queue, table,
container, etc...) failed because the specified resource does not exist'''
def __init__(self, message):
super(WindowsAzureMissingResourceError, self).__init__(message)
class WindowsAzureBatchOperationError(WindowsAzureError):
'''Indicates that a batch operation failed'''
def __init__(self, message, code):
super(WindowsAzureBatchOperationError, self).__init__(message)
self.code = code
class Feed(object):
pass
class _Base64String(str):
pass
class HeaderDict(dict):
def __getitem__(self, index):
return super(HeaderDict, self).__getitem__(index.lower())
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def _get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False):
''' get properties from entry xml '''
properties = {}
etag = entry.getAttributeNS(METADATA_NS, 'etag')
if etag:
properties['etag'] = etag
for updated in _get_child_nodes(entry, 'updated'):
properties['updated'] = updated.firstChild.nodeValue
for name in _get_children_from_path(entry, 'author', 'name'):
if name.firstChild is not None:
properties['author'] = name.firstChild.nodeValue
if include_id:
if use_title_as_id:
for title in _get_child_nodes(entry, 'title'):
properties['name'] = title.firstChild.nodeValue
else:
for id in _get_child_nodes(entry, 'id'):
properties['name'] = _get_readable_id(
id.firstChild.nodeValue, id_prefix_to_skip)
return properties
def _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):
''' get properties from entry xml '''
xmldoc = minidom.parseString(xmlstr)
properties = {}
for entry in _get_child_nodes(xmldoc, 'entry'):
properties.update(_get_entry_properties_from_node(entry, include_id, id_prefix_to_skip))
return properties
def _get_first_child_node_value(parent_node, node_name):
xml_attrs = _get_child_nodes(parent_node, node_name)
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
return value
def _get_child_nodes(node, tagName):
return [childNode for childNode in node.getElementsByTagName(tagName)
if childNode.parentNode == node]
def _get_children_from_path(node, *path):
'''descends through a hierarchy of nodes returning the list of children
at the inner most level. Only returns children who share a common parent,
not cousins.'''
cur = node
for index, child in enumerate(path):
if isinstance(child, _strtype):
next = _get_child_nodes(cur, child)
else:
next = _get_child_nodesNS(cur, *child)
if index == len(path) - 1:
return next
elif not next:
break
cur = next[0]
return []
def _get_child_nodesNS(node, ns, tagName):
return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)
if childNode.parentNode == node]
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
def _str_or_none(value):
if value is None:
return None
return _str(value)
def _int_or_none(value):
if value is None:
return None
return str(int(value))
def _bool_or_none(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _find_namespaces_from_child(parent, child, namespaces):
"""Recursively searches from the parent to the child,
gathering all the applicable namespaces along the way"""
for cur_child in parent.childNodes:
if cur_child is child:
return True
if _find_namespaces_from_child(cur_child, child, namespaces):
# we are the parent node
for key in cur_child.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
namespaces[key] = cur_child.attributes[key]
break
return False
def _find_namespaces(parent, child):
res = {}
for key in parent.documentElement.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
res[key] = parent.documentElement.attributes[key]
_find_namespaces_from_child(parent, child, res)
return res
def _clone_node_with_namespaces(node_to_clone, original_doc):
clone = node_to_clone.cloneNode(True)
for key, value in _find_namespaces(original_doc, node_to_clone).items():
clone.attributes[key] = value
return clone
def _convert_response_to_feeds(response, convert_callback):
if response is None:
return None
feeds = _list_of(Feed)
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
xmldoc = minidom.parseString(response.body)
xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')
if not xml_entries:
# in some cases, response contains only entry but no feed
xml_entries = _get_children_from_path(xmldoc, 'entry')
if inspect.isclass(convert_callback) and issubclass(convert_callback, WindowsAzureData):
for xml_entry in xml_entries:
return_obj = convert_callback()
for node in _get_children_from_path(xml_entry,
'content',
convert_callback.__name__):
_fill_data_to_return_object(node, return_obj)
for name, value in _get_entry_properties_from_node(xml_entry,
include_id=True,
use_title_as_id=True).items():
setattr(return_obj, name, value)
feeds.append(return_obj)
else:
for xml_entry in xml_entries:
new_node = _clone_node_with_namespaces(xml_entry, xmldoc)
feeds.append(convert_callback(new_node.toxml('utf-8')))
return feeds
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_NONE.format(param_name))
def _fill_list_of(xmldoc, element_type, xml_element_name):
xmlelements = _get_child_nodes(xmldoc, xml_element_name)
return [_parse_response_body_from_xml_node(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,
xml_element_name):
'''Converts an xml fragment into a list of scalar types. The parent xml
element contains a flat list of xml elements which are converted into the
specified scalar type and added to the list.
Example:
xmldoc=
<Endpoints>
<Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>
</Endpoints>
element_type=str
parent_xml_element_name='Endpoints'
xml_element_name='Endpoint'
'''
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)
return [_get_node_value(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_dict(xmldoc, element_name):
xmlelements = _get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,
key_xml_element_name, value_xml_element_name):
'''Converts an xml fragment into a dictionary. The parent xml element
contains a list of xml elements where each element has a child element for
the key, and another for the value.
Example:
xmldoc=
<ExtendedProperties>
<ExtendedProperty>
<Name>Ext1</Name>
<Value>Val1</Value>
</ExtendedProperty>
<ExtendedProperty>
<Name>Ext2</Name>
<Value>Val2</Value>
</ExtendedProperty>
</ExtendedProperties>
element_type=str
parent_xml_element_name='ExtendedProperties'
pair_xml_element_name='ExtendedProperty'
key_xml_element_name='Name'
value_xml_element_name='Value'
'''
return_obj = {}
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)
for pair in xmlelements:
keys = _get_child_nodes(pair, key_xml_element_name)
values = _get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type.
'''
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements:
return None
return_obj = return_type()
_fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
return _parse_response_body_from_xml_node(element, return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return _to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
else:
return type(data_member)(value)
def _get_node_value(xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return _to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
# Previous versions of the SDK allowed data types other than bytes to be
# passed in, and they would be auto-converted to bytes. We preserve this
# behavior when running under 2.7, but issue a warning.
# Python 3 support is new, so we reject anything that's not bytes.
if sys.version_info < (3,):
warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))
return _get_request_body(param_value)
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _parse_enum_results_list(response, return_type, resp_type, item_type):
"""resp_body is the XML we received
resp_type is a string, such as Containers,
return_type is the type we're constructing, such as ContainerEnumResults
item_type is the type object of the item to be created, such as Container
This function then returns a ContainerEnumResults object with the
containers member populated with the results.
"""
# parsing something like:
# <EnumerationResults ... >
# <Queues>
# <Queue>
# <Something />
# <SomethingElse />
# </Queue>
# </Queues>
# </EnumerationResults>
respbody = response.body
return_obj = return_type()
doc = minidom.parseString(respbody)
items = []
for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
# path is something like Queues, Queue
for child in _get_children_from_path(enum_results,
resp_type,
resp_type[:-1]):
items.append(_fill_instance_element(child, item_type))
for name, value in vars(return_obj).items():
# queues, Queues, this is the list its self which we populated
# above
if name == resp_type.lower():
# the list its self.
continue
value = _fill_data_minidom(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
setattr(return_obj, resp_type.lower(), items)
return return_obj
def _parse_simple_list(response, type, item_type, list_name):
respbody = response.body
res = type()
res_items = []
doc = minidom.parseString(respbody)
type_name = type.__name__
item_name = item_type.__name__
for item in _get_children_from_path(doc, type_name, item_name):
res_items.append(_fill_instance_element(item, item_type))
setattr(res, list_name, res_items)
return res
def _parse_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_xml_text(response.body, return_type)
def _parse_service_resources_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_service_resources_xml_text(response.body, return_type)
def _fill_data_to_return_object(node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _list_of):
setattr(return_obj,
name,
_fill_list_of(node,
value.list_type,
value.xml_element_name))
elif isinstance(value, _scalar_list_of):
setattr(return_obj,
name,
_fill_scalar_list_of(node,
value.list_type,
_get_serialization_name(name),
value.xml_element_name))
elif isinstance(value, _dict_of):
setattr(return_obj,
name,
_fill_dict_of(node,
_get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name))
elif isinstance(value, _xml_attribute):
real_value = None
if node.hasAttribute(value.xml_element_name):
real_value = node.getAttribute(value.xml_element_name)
if real_value is not None:
setattr(return_obj, name, real_value)
elif isinstance(value, WindowsAzureData):
setattr(return_obj,
name,
_fill_instance_child(node, name, value.__class__))
elif isinstance(value, dict):
setattr(return_obj,
name,
_fill_dict(node, _get_serialization_name(name)))
elif isinstance(value, _Base64String):
value = _fill_data_minidom(node, name, '')
if value is not None:
value = _decode_base64_to_text(value)
# always set the attribute, so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = _fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _parse_response_body_from_xml_node(node, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
return_obj = return_type()
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = return_type()
xml_name = return_type._xml_name if hasattr(return_type, '_xml_name') else return_type.__name__
for node in _get_child_nodes(doc, xml_name):
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_service_resources_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = _list_of(return_type)
for node in _get_children_from_path(doc, "ServiceResources", "ServiceResource"):
local_obj = return_type()
_fill_data_to_return_object(node, local_obj)
return_obj.append(local_obj)
return return_obj
class _dict_of(dict):
"""a dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists"""
def __init__(self, pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_dict_of, self).__init__()
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_list_of, self).__init__()
class _scalar_list_of(list):
"""a list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(_scalar_list_of, self).__init__()
class _xml_attribute:
"""a accessor to XML attributes
expected to go in it along with its xml element name.
Used for deserialization and construction"""
def __init__(self, xml_element_name):
self.xml_element_name = xml_element_name
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
uri, query = _update_request_uri_query(request)
if use_local_storage:
return '/' + DEV_ACCOUNT_NAME + uri, query
return uri, query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureConflictError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureMissingResourceError):
return False
else:
raise error
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
if http_error.status == 409:
raise WindowsAzureConflictError(
_ERROR_CONFLICT.format(str(http_error)))
elif http_error.status == 404:
raise WindowsAzureMissingResourceError(
_ERROR_NOT_FOUND.format(str(http_error)))
else:
if http_error.respbody is not None:
raise WindowsAzureError(
_ERROR_UNKNOWN.format(str(http_error)) + '\n' + \
http_error.respbody.decode('utf-8'))
else:
raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict
def _parse_response_for_dict_prefix(response, prefixes):
''' Extracts name-values for names starting with prefix from response
header. Filter out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
for prefix_value in prefixes:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
return return_dict
else:
return None
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter
out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
if name.lower() in filter:
return_dict[name] = value
return return_dict
else:
return None
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
| apache-2.0 |
fpy171/django | tests/extra_regress/tests.py | 130 | 16198 | from __future__ import unicode_literals
import datetime
from collections import OrderedDict
from django.contrib.auth.models import User
from django.test import TestCase
from .models import Order, RevisionableModel, TestObject
class ExtraRegressTests(TestCase):
def setUp(self):
self.u = User.objects.create_user(
username="fred",
password="secret",
email="fred@example.com"
)
def test_regression_7314_7372(self):
"""
Regression tests for #7314 and #7372
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertEqual(rm.pk, rm.base.pk)
rm2 = rm.new_revision()
rm2.title = "Second Revision"
rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
rm2.save()
self.assertEqual(rm2.title, 'Second Revision')
self.assertEqual(rm2.base.title, 'First Revision')
self.assertNotEqual(rm2.pk, rm.pk)
self.assertEqual(rm2.base.pk, rm.pk)
# Queryset to match most recent revision:
qs = RevisionableModel.objects.extra(
where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {
'table': RevisionableModel._meta.db_table,
}]
)
self.assertQuerysetEqual(qs,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title)
)
# Queryset to search for string in title:
qs2 = RevisionableModel.objects.filter(title__contains="Revision")
self.assertQuerysetEqual(qs2,
[
('First Revision', 'First Revision'),
('Second Revision', 'First Revision'),
],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
# Following queryset should return the most recent revision:
self.assertQuerysetEqual(qs & qs2,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
def test_extra_stay_tied(self):
# Extra select parameters should stay tied to their corresponding
# select portions. Applies when portions are updated or otherwise
# moved around.
qs = User.objects.extra(
select=OrderedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))),
select_params=(1, 3)
)
qs = qs.extra(select={"beta": 4})
qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
self.assertEqual(
list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')),
[{'alpha': 5, 'beta': 4, 'gamma': 3}]
)
def test_regression_7957(self):
"""
Regression test for #7957: Combining extra() calls should leave the
corresponding parameters associated with the right extra() bit. I.e.
internal dictionary must remain sorted.
"""
self.assertEqual(
(User.objects
.extra(select={"alpha": "%s"}, select_params=(1,))
.extra(select={"beta": "%s"}, select_params=(2,))[0].alpha),
1
)
self.assertEqual(
(User.objects
.extra(select={"beta": "%s"}, select_params=(1,))
.extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha),
2
)
def test_regression_7961(self):
"""
Regression test for #7961: When not using a portion of an
extra(...) in a query, remove any corresponding parameters from the
query as well.
"""
self.assertEqual(
list(User.objects
.extra(select={"alpha": "%s"}, select_params=(-6,))
.filter(id=self.u.id)
.values_list('id', flat=True)),
[self.u.id]
)
def test_regression_8063(self):
"""
Regression test for #8063: limiting a query shouldn't discard any
extra() bits.
"""
qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id])
self.assertQuerysetEqual(qs, ['<User: fred>'])
self.assertQuerysetEqual(qs[:1], ['<User: fred>'])
def test_regression_8039(self):
"""
Regression test for #8039: Ordering sometimes removed relevant tables
from extra(). This test is the critical case: ordering uses a table,
but then removes the reference because of an optimization. The table
should still be present because of the extra() call.
"""
self.assertQuerysetEqual(
(Order.objects
.extra(where=["username=%s"], params=["fred"], tables=["auth_user"])
.order_by('created_by')),
[]
)
def test_regression_8819(self):
"""
Regression test for #8819: Fields in the extra(select=...) list
should be available to extra(order_by=...).
"""
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(),
['<User: fred>']
)
def test_dates_query(self):
"""
When calling the dates() method on a queryset with extra selection
columns, we can (and should) ignore those columns. They don't change
the result and cause incorrect SQL to be produced otherwise.
"""
RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertQuerysetEqual(
RevisionableModel.objects.extra(select={"the_answer": 'id'}).datetimes('when', 'month'),
[datetime.datetime(2008, 9, 1, 0, 0)],
transform=lambda d: d,
)
def test_values_with_extra(self):
"""
Regression test for #10256... If there is a values() clause, Extra
columns are only returned if they are explicitly mentioned.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values()
),
[{
'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first',
'id': obj.pk, 'first': 'first'
}]
)
# Extra clauses after an empty values clause are still included
self.assertEqual(
list(
TestObject.objects
.values()
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[{
'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first',
'id': obj.pk, 'first': 'first'
}]
)
# Extra columns are ignored if not mentioned in the values() clause
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values('first', 'second')
),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns after a non-empty values() clause are ignored
self.assertEqual(
list(
TestObject.objects
.values('first', 'second')
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns can be partially returned
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values('first', 'second', 'foo')
),
[{'second': 'second', 'foo': 'first', 'first': 'first'}]
)
# Also works if only extra columns are included
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values('foo', 'whiz')
),
[{'foo': 'first', 'whiz': 'third'}]
)
# Values list works the same way
# All columns are returned for an empty values_list()
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list()
),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns after an empty values_list() are still included
self.assertEqual(
list(
TestObject.objects
.values_list()
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns ignored completely if not mentioned in values_list()
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('first', 'second')
),
[('first', 'second')]
)
# Extra columns after a non-empty values_list() clause are ignored completely
self.assertEqual(
list(
TestObject.objects
.values_list('first', 'second')
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[('first', 'second')]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('second', flat=True)
),
['second']
)
# Only the extra columns specified in the values_list() are returned
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('first', 'second', 'whiz')
),
[('first', 'second', 'third')]
)
# ...also works if only extra columns are included
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('foo', 'whiz')
),
[('first', 'third')]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('whiz', flat=True)
),
['third']
)
# ... and values are returned in the order they are specified
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('whiz', 'foo')
),
[('third', 'first')]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('first', 'id')
),
[('first', obj.pk)]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('whiz', 'first', 'bar', 'id')
),
[('third', 'first', 'second', obj.pk)]
)
def test_regression_10847(self):
"""
Regression for #10847: the list of extra columns can always be
accurately evaluated. Using an inner query ensures that as_sql() is
producing correct output without requiring full evaluation and
execution of the inner query.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select={'extra': 1}).values('pk')),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk')
),
['<TestObject: TestObject: first,second,third>']
)
self.assertEqual(
list(TestObject.objects.values('pk').extra(select={'extra': 1})),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.values('pk').extra(select={'extra': 1})
),
['<TestObject: TestObject: first,second,third>']
)
self.assertQuerysetEqual(
TestObject.objects.filter(pk=obj.pk) | TestObject.objects.extra(where=["id > %s"], params=[obj.pk]),
['<TestObject: TestObject: first,second,third>']
)
def test_regression_17877(self):
"""
Ensure that extra WHERE clauses get correctly ANDed, even when they
contain OR operations.
"""
# Test Case 1: should appear in queryset.
t = TestObject(first='a', second='a', third='a')
t.save()
# Test Case 2: should appear in queryset.
t = TestObject(first='b', second='a', third='a')
t.save()
# Test Case 3: should not appear in queryset, bug case.
t = TestObject(first='a', second='a', third='b')
t.save()
# Test Case 4: should not appear in queryset.
t = TestObject(first='b', second='a', third='b')
t.save()
# Test Case 5: should not appear in queryset.
t = TestObject(first='b', second='b', third='a')
t.save()
# Test Case 6: should not appear in queryset, bug case.
t = TestObject(first='a', second='b', third='b')
t.save()
self.assertQuerysetEqual(
TestObject.objects.extra(
where=["first = 'a' OR second = 'a'", "third = 'a'"],
),
['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'],
ordered=False
)
def test_extra_values_distinct_ordering(self):
t1 = TestObject.objects.create(first='a', second='a', third='a')
t2 = TestObject.objects.create(first='a', second='b', third='b')
qs = TestObject.objects.extra(
select={'second_extra': 'second'}
).values_list('id', flat=True).distinct()
self.assertQuerysetEqual(
qs.order_by('second_extra'), [t1.pk, t2.pk], lambda x: x)
self.assertQuerysetEqual(
qs.order_by('-second_extra'), [t2.pk, t1.pk], lambda x: x)
# Note: the extra ordering must appear in select clause, so we get two
# non-distinct results here (this is on purpose, see #7070).
self.assertQuerysetEqual(
qs.order_by('-second_extra').values_list('first', flat=True),
['a', 'a'], lambda x: x)
| bsd-3-clause |
fishcorn/pylearn2 | pylearn2/scripts/tutorials/jobman_demo/utils.py | 44 | 1560 | from __future__ import print_function
import numpy
from jobman import tools
from jobman.tools import DD
def log_uniform(low, high):
"""
Generates a number that's uniformly distributed in the log-space between
`low` and `high`
Parameters
----------
low : float
Lower bound of the randomly generated number
high : float
Upper bound of the randomly generated number
Returns
-------
rval : float
Random number uniformly distributed in the log-space specified by `low`
and `high`
"""
log_low = numpy.log(low)
log_high = numpy.log(high)
log_rval = numpy.random.uniform(log_low, log_high)
rval = float(numpy.exp(log_rval))
return rval
def results_extractor(train_obj):
channels = train_obj.model.monitor.channels
train_y_misclass = channels['y_misclass'].val_record[-1]
train_y_nll = channels['y_nll'].val_record[-1]
return DD(train_y_misclass=train_y_misclass,
train_y_nll=train_y_nll)
def parse_results(cwd):
optimal_dd = None
optimal_measure = numpy.inf
for tup in tools.find_conf_files(cwd):
dd = tup[1]
if 'results.train_y_misclass' in dd:
if dd['results.train_y_misclass'] < optimal_measure:
optimal_measure = dd['results.train_y_misclass']
optimal_dd = dd
print("Optimal results.train_y_misclass:", str(optimal_measure))
for key, value in optimal_dd.items():
if 'hyper_parameters' in key:
print(key + ": " + str(value))
| bsd-3-clause |
meppe/ros-ort | src/frcnn/src/lib/setup.py | 11 | 5665 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import subprocess
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension(
'pycocotools._mask',
sources=['pycocotools/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [numpy_include, 'pycocotools'],
extra_compile_args={
'gcc': ['-Wno-cpp', '-Wno-unused-function', '-std=c99']},
),
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| gpl-3.0 |
etashjian/ECE757-final | src/arch/micro_asm_test.py | 86 | 3195 | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from micro_asm import MicroAssembler, Combinational_Macroop, Rom_Macroop, Rom
class Bah(object):
def __init__(self):
self.mnemonic = "bah"
class Bah_Tweaked(object):
def __init__(self):
self.mnemonic = "bah_tweaked"
class Hoop(object):
def __init__(self, first_param, second_param):
self.mnemonic = "hoop_%s_%s" % (first_param, second_param)
def __str__(self):
return "%s" % self.mnemonic
class Dah(object):
def __init__(self):
self.mnemonic = "dah"
microops = {
"bah": Bah,
"hoop": Hoop,
"dah": Dah
}
class TestMacroop(Combinational_Macroop):
def tweak(self):
microops["bah"] = Bah_Tweaked
def untweak(self):
microops["bah"] = Bah
def print_debug(self, message):
print message
def __init__(self, name):
super(TestMacroop, self).__init__(name)
self.directives = {
"tweak": self.tweak,
"untweak": self.untweak,
"print": self.print_debug
}
assembler = MicroAssembler(TestMacroop, microops, Rom('main ROM'), Rom_Macroop)
testAssembly = '''
# Single line comment
def rom {
goo: bah
extern la: hoop 4*8, "a"
}; /* multiline comment on one line */
/* multi line comment across lines
to make sure they work */
def macroop squishy {
.tweak
bah
.untweak
.print "In the midst"
bah
dah # single line comment after something
.tweak
};
#Extending the rom...
def rom
{
#Here's more stuff for the rom
bah
};
def macroop squashy {
bah
};
def macroop jumper (bar);
'''
assembler.assemble(testAssembly)
| bsd-3-clause |
e2/googlemock | scripts/generator/cpp/keywords.py | 1157 | 2004 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| bsd-3-clause |
benthomasson/ansible | lib/ansible/module_utils/azure_rm_common.py | 16 | 27094 | # Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import re
import sys
import copy
import importlib
import inspect
from packaging.version import Version
from os.path import expanduser
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
AZURE_COMMON_ARGS = dict(
profile=dict(type='str'),
subscription_id=dict(type='str', no_log=True),
client_id=dict(type='str', no_log=True),
secret=dict(type='str', no_log=True),
tenant=dict(type='str', no_log=True),
ad_user=dict(type='str', no_log=True),
password=dict(type='str', no_log=True),
# debug=dict(type='bool', default=False),
)
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_TAG_ARGS = dict(
tags=dict(type='dict'),
append_tags=dict(type='bool', default=True),
)
AZURE_COMMON_REQUIRED_IF = [
('log_mode', 'file', ['log_path'])
]
ANSIBLE_USER_AGENT = 'Ansible-Deploy'
CIDR_PATTERN = re.compile("(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
"[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))")
AZURE_SUCCESS_STATE = "Succeeded"
AZURE_FAILED_STATE = "Failed"
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_MSRESTAZURE = True
HAS_MSRESTAZURE_EXC = None
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
try:
from msrest.serialization import Serializer
except ImportError as exc:
HAS_MSRESTAZURE_EXC = exc
HAS_MSRESTAZURE = False
try:
from enum import Enum
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, SecurityRule, NetworkInterface, \
NetworkInterfaceIPConfiguration, Subnet
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.version import VERSION as network_client_version
from azure.mgmt.storage.version import VERSION as storage_client_version
from azure.mgmt.compute.version import VERSION as compute_client_version
from azure.mgmt.resource.version import VERSION as resource_client_version
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.storage.storage_management_client import StorageManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
from azure.storage.cloudstorageaccount import CloudStorageAccount
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
AZURE_EXPECTED_VERSIONS = dict(
storage_client_version="0.30.0rc5",
compute_client_version="0.30.0rc5",
network_client_version="0.30.0rc5",
resource_client_version="0.30.0rc5"
)
AZURE_MIN_RELEASE = '2.0.0rc5'
class AzureRMModuleBase(object):
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, supports_tags=True, facts_module=False):
merged_arg_spec = dict()
merged_arg_spec.update(AZURE_COMMON_ARGS)
if supports_tags:
merged_arg_spec.update(AZURE_TAG_ARGS)
if derived_arg_spec:
merged_arg_spec.update(derived_arg_spec)
merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
if required_if:
merged_required_if += required_if
self.module = AnsibleModule(argument_spec=merged_arg_spec,
bypass_checks=bypass_checks,
no_log=no_log,
check_invalid_arguments=check_invalid_arguments,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args,
supports_check_mode=supports_check_mode,
required_if=merged_required_if)
if not HAS_MSRESTAZURE:
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
"- {0}".format(HAS_MSRESTAZURE_EXC))
if not HAS_AZURE:
self.fail("Do you have azure>={1} installed? Try `pip install 'azure>={1}' --upgrade`"
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
self._network_client = None
self._storage_client = None
self._resource_client = None
self._compute_client = None
self.check_mode = self.module.check_mode
self.facts_module = facts_module
# self.debug = self.module.params.get('debug')
# authenticate
self.credentials = self._get_credentials(self.module.params)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if tenant is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'], tenant=tenant)
else:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
# common parameter validation
if self.module.params.get('tags'):
self.validate_tags(self.module.params['tags'])
res = self.exec_module(**self.module.params)
self.module.exit_json(**res)
def check_client_version(self, client_name, client_version, expected_version):
# Ensure Azure modules are at least 2.0.0rc5.
if Version(client_version) < Version(expected_version):
self.fail("Installed {0} client version is {1}. The supported version is {2}. Try "
"`pip install azure>={3} --upgrade`".format(client_name, client_version, expected_version,
AZURE_MIN_RELEASE))
def exec_module(self, **kwargs):
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
def fail(self, msg, **kwargs):
'''
Shortcut for calling module.fail()
:param msg: Error message text.
:param kwargs: Any key=value pairs
:return: None
'''
self.module.fail_json(msg=msg, **kwargs)
def log(self, msg, pretty_print=False):
pass
# Use only during module development
# if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
def validate_tags(self, tags):
'''
Check if tags dictionary contains string:string pairs.
:param tags: dictionary of string:string pairs
:return: None
'''
if not self.facts_module:
if not isinstance(tags, dict):
self.fail("Tags must be a dictionary of string:string values.")
for key, value in tags.items():
if not isinstance(value, str):
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
def update_tags(self, tags):
'''
Call from the module to update metadata tags. Returns tuple
with bool indicating if there was a change and dict of new
tags to assign to the object.
:param tags: metadata tags from the object
:return: bool, dict
'''
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
changed = False
if isinstance(self.module.params.get('tags'), dict):
for key, value in self.module.params['tags'].items():
if not new_tags.get(key) or new_tags[key] != value:
changed = True
new_tags[key] = value
if isinstance(tags, dict):
for key, value in tags.items():
if not self.module.params['tags'].get(key):
new_tags.pop(key)
changed = True
return changed, new_tags
def has_tags(self, obj_tags, tag_list):
'''
Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
exists in object tags.
:param obj_tags: dictionary of tags from an Azure object.
:param tag_list: list of tag keys or tag key:value pairs
:return: bool
'''
if not obj_tags and tag_list:
return False
if not tag_list:
return True
matches = 0
result = False
for tag in tag_list:
tag_key = tag
tag_value = None
if ':' in tag:
tag_key, tag_value = tag.split(':')
if tag_value and obj_tags.get(tag_key) == tag_value:
matches += 1
elif not tag_value and obj_tags.get(tag_key):
matches += 1
if matches == len(tag_list):
result = True
return result
def get_resource_group(self, resource_group):
'''
Fetch a resource group.
:param resource_group: name of a resource group
:return: resource group object
'''
try:
return self.rm_client.resource_groups.get(resource_group)
except CloudError:
self.fail("Parameter error: resource group {0} not found".format(resource_group))
except Exception as exc:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
def _get_profile(self, profile="default"):
path = expanduser("~/.azure/credentials")
try:
config = configparser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('subscription_id'):
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile']:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials.get('subscription_id') is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = params.get(attribute, None)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def serialize_obj(self, obj, class_name, enum_modules=[]):
'''
Return a JSON representation of an Azure object.
:param obj: Azure object
:param class_name: Name of the object's class
:param enum_modules: List of module names to build enum dependencies from.
:return: serialized result
'''
dependencies = dict()
if enum_modules:
for module_name in enum_modules:
mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ")
self.log(str(dependencies))
serializer = Serializer(classes=dependencies)
return serializer.body(obj, class_name)
def get_poller_result(self, poller, wait=5):
'''
Consistent method of waiting on and retrieving results from Azure's long poller
:param poller Azure poller object
:return object resulting from the original request
'''
try:
delay = wait
while not poller.done():
self.log("Waiting for {0} sec".format(delay))
poller.wait(timeout=delay)
return poller.result()
except Exception as exc:
self.log(str(exc))
raise
def check_provisioning_state(self, azure_object, requested_state='present'):
'''
Check an Azure object's provisioning state. If something did not complete the provisioning
process, then we cannot operate on it.
:param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
and name attributes.
:return None
'''
if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
hasattr(azure_object, 'name'):
# resource group object fits this model
if isinstance(azure_object.properties.provisioning_state, Enum):
if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
if isinstance(azure_object.provisioning_state, Enum):
if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
def get_blob_client(self, resource_group_name, storage_account_name):
keys = dict()
try:
# Get keys from the storage account
self.log('Getting keys')
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
except Exception as exc:
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
try:
self.log('Create blob service')
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service()
except Exception as exc:
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
str(exc)))
def create_default_pip(self, resource_group, location, name, allocation_method='Dynamic'):
'''
Create a default public IP address <name>01 to associate with a network interface.
If a PIP address matching <vm name>01 exists, return it. Otherwise, create one.
:param resource_group: name of an existing resource group
:param location: a valid azure location
:param name: base name to assign the public IP address
:param allocation_method: one of 'Static' or 'Dynamic'
:return: PIP object
'''
public_ip_name = name + '01'
pip = None
self.log("Starting create_default_pip {0}".format(public_ip_name))
self.log("Check to see if public IP {0} exists".format(public_ip_name))
try:
pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
except CloudError:
pass
if pip:
self.log("Public ip {0} found.".format(public_ip_name))
self.check_provisioning_state(pip)
return pip
params = PublicIPAddress(
location=location,
public_ip_allocation_method=allocation_method,
)
self.log('Creating default public IP {0}'.format(public_ip_name))
try:
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
except Exception as exc:
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
return self.get_poller_result(poller)
def create_default_securitygroup(self, resource_group, location, name, os_type, open_ports):
'''
Create a default security group <name>01 to associate with a network interface. If a security group matching
<name>01 exists, return it. Otherwise, create one.
:param resource_group: Resource group name
:param location: azure location name
:param name: base name to use for the security group
:param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
:param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
:param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
:return: security_group object
'''
security_group_name = name + '01'
group = None
self.log("Create security group {0}".format(security_group_name))
self.log("Check to see if security group {0} exists".format(security_group_name))
try:
group = self.network_client.network_security_groups.get(resource_group, security_group_name)
except CloudError:
pass
if group:
self.log("Security group {0} found.".format(security_group_name))
self.check_provisioning_state(group)
return group
parameters = NetworkSecurityGroup()
parameters.location = location
if not open_ports:
# Open default ports based on OS type
if os_type == 'Linux':
# add an inbound SSH rule
parameters.security_rules = [
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow SSH Access',
source_port_range='*', destination_port_range='22', priority=100, name='SSH')
]
parameters.location = location
else:
# for windows add inbound RDP and WinRM rules
parameters.security_rules = [
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow RDP port 3389',
source_port_range='*', destination_port_range='3389', priority=100, name='RDP01'),
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow WinRM HTTPS port 5986',
source_port_range='*', destination_port_range='5986', priority=101, name='WinRM01'),
]
else:
# Open custom ports
parameters.security_rules = []
priority = 100
for port in open_ports:
priority += 1
rule_name = "Rule_{0}".format(priority)
parameters.security_rules.append(
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', source_port_range='*',
destination_port_range=str(port), priority=priority, name=rule_name)
)
self.log('Creating default security group {0}'.format(security_group_name))
try:
poller = self.network_client.network_security_groups.create_or_update(resource_group,
security_group_name,
parameters)
except Exception as exc:
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
return self.get_poller_result(poller)
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
@property
def storage_client(self):
self.log('Getting storage client...')
if not self._storage_client:
self.check_client_version('storage', storage_client_version, AZURE_EXPECTED_VERSIONS['storage_client_version'])
self._storage_client = StorageManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Storage')
return self._storage_client
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self.check_client_version('network', network_client_version, AZURE_EXPECTED_VERSIONS['network_client_version'])
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self.check_client_version('resource', resource_client_version, AZURE_EXPECTED_VERSIONS['resource_client_version'])
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self.check_client_version('compute', compute_client_version, AZURE_EXPECTED_VERSIONS['compute_client_version'])
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
| gpl-3.0 |
scorpionis/docklet | client/venv/lib/python3.5/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 436 | 5992 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| bsd-3-clause |
eXistenZNL/SickRage | lib/sqlalchemy/sql/elements.py | 75 | 117708 | # sql/elements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
from .base import _generative, Generative
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(expr, lower_bound, upper_bound):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`.ColumnElement` subclass. For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a :class:`.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the lower
bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
.. seealso::
:meth:`.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`.ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`.ColumnElement`
subclass, such as a :class:`~sqlalchemy.schema.Column` object.
Use this function to force the
generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expression, type_):
"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce SQL that is usually otherwise unaffected
by the :func:`.type_coerce` call::
SELECT date_string FROM log
However, when result rows are fetched, the ``StringDateTime`` type
will be applied to result rows on behalf of the ``date_string`` column.
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the :meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compliation/execution time
when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
:func:`.type_coerce` is similar to the :func:`.cast` function,
except that it does not render the ``CAST`` expression in the resulting
statement.
:param expression: A SQL expression, such as a :class:`.ColumnElement` expression
or a Python string which will be coerced into a bound literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the the expression is coerced.
.. seealso::
:func:`.cast`
"""
type_ = type_api.to_instance(type_)
if hasattr(expression, '__clause_element__'):
return type_coerce(expression.__clause_element__(), type_)
elif isinstance(expression, BindParameter):
bp = expression._clone()
bp.type = type_
return bp
elif not isinstance(expression, Visitable):
if expression is None:
return Null()
else:
return literal(expression, type_=type_)
else:
return Label(None, expression, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
_order_by_label_element = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_clauseelement(self, multiparams, params)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:meth:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return self._negate()
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(ClauseElement, operators.ColumnOperators):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression level,
and are intended to accept instances of :class:`.ColumnElement` as arguments.
These functions will typically document that they accept a "SQL expression"
as an argument. What this means in terms of SQLAlchemy usually refers
to an input which is either already in the form of a :class:`.ColumnElement`
object, or a value which can be **coerced** into one. The coercion
rules followed by most, but not all, SQLAlchemy Core functions with regards
to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound value".
This generally means that a :func:`.bindparam` will be produced
featuring the given value embedded into the construct; the resulting
:class:`.BindParameter` object is an instance of :class:`.ColumnElement`.
The Python value will ultimately be sent to the DBAPI at execution time as a
paramterized argument to the ``execute()`` or ``executemany()`` methods,
after SQLAlchemy type-specific converters (e.g. those provided by
any associated :class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which feature
a method called ``__clause_element__()``. The Core expression system
looks for this method when an object of otherwise unknown type is passed
to a function that is looking to coerce the argument into a :class:`.ColumnElement`
expression. The ``__clause_element__()`` method, if present, should
return a :class:`.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound attributes
on ORM-mapped classes; a ``User`` class which contains a mapped attribute
named ``.name`` will have a method ``User.name.__clause_element__()``
which when invoked returns the :class:`.Column` called ``name`` associated
with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``, which
in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
.. seealso::
:class:`.Column`
:func:`.expression.column`
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
_key_label = key = None
_alt_names = ()
def self_group(self, against=None):
if against in (operators.and_, operators.or_, operators._asbool) and \
self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.istrue, operators.isfalse)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class BindParameter(ColumnElement):
"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is supplied
at the point at which the statement in executed against a database
connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = users_table.update().\\
where(user_table.c.name == bindparam('username')).\\
values(fullname=bindparam('fullname'))
connection.execute(stmt, [
{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
])
SQLAlchemy's Core expression system makes wide use of :func:`.bindparam`
in an implicit sense. It is typical that Python literal values passed to
virtually all SQL expression functions are coerced into fixed
:func:`.bindparam` constructs. For example, given a comparison operation
such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
contruct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a :class:`.BindParameter`
representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the Postgresql database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time, generate
bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being autoamtically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required` defaults
to ``False``.
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value set."""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`.Text` construct is produced using the :func:`.text`
function; see that function for full documentation.
.. seealso::
:func:`.text`
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text,
bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
def _create_text(self, text, bind=None, bindparams=None,
typemap=None, autocommit=None):
"""Construct a new :class:`.TextClause` clause, representing
a textual SQL string directly.
E.g.::
fom sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\\:username'")
The :class:`.TextClause` construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The :meth:`.TextClause.bindparams`
method is used to provide bound parameter detail, and
:meth:`.TextClause.columns` method allows specification of
return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\\
bindparams(user_id=7).\\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`.text` construct is used internally in cases when
a literal string is specified for part of a larger query, such as
when a string is specified to the :meth:`.Select.where` method of
:class:`.Select`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`.text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
can be set explicitly so using the :paramref:`.Connection.execution_options.autocommit`
option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`.text` constructs implicitly - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
Deprecated. A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
This argument now invokes the :meth:`.TextClause.bindparams`
method on the construct before returning it. E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
Is equivalent to::
stmt = text("SELECT * FROM table WHERE id=:id").\\
bindparams(bindparam('id', value=5, type_=Integer))
.. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method
supersedes the ``bindparams`` argument to :func:`.text`.
:param typemap:
Deprecated. A dictionary mapping the names of columns
represented in the columns clause of a ``SELECT`` statement
to type objects,
which will be used to perform post-processing on columns within
the result set. This parameter now invokes the :meth:`.TextClause.columns`
method, which returns a :class:`.TextAsFrom` construct that gains
a ``.c`` collection and can be embedded in other expressions. E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
Is equivalent to::
stmt = text("SELECT * FROM table").columns(id=Integer, name=String)
Or alternatively::
from sqlalchemy.sql import column
stmt = text("SELECT * FROM table").columns(
column('id', Integer),
column('name', String)
)
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=True)')
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`.TextClause.bindparams` method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key`` argument,
then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the ``timestamp``
bind, and the type of :class:`.String` for the ``name`` bind. In
the case of ``name`` we also set the default value of ``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`.TextClause.bindparams` method can be called repeatedly, where
it will re-use existing :class:`.BindParameter` objects to add new information.
For example, we can call :meth:`.TextClause.bindparams` first with
typing information, and a second time with value information, and it
will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
.. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method supersedes
the argument ``bindparams`` passed to :func:`~.expression.text`.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
existing = new_params[bind.key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind.key)
else:
new_params[existing.key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key)
else:
new_params[key] = existing._with_value(value)
@util.dependencies('sqlalchemy.sql.selectable')
def columns(self, selectable, *cols, **types):
"""Turn this :class:`.TextClause` object into a :class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we used untyped :func:`.column` elements. These can also have
types specified, which will impact how the column behaves in expressions
as well as determining result set behavior::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
Keyword arguments allow just the names and types of columns to be specified,
where the :func:`.column` elements will be generated automatically::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as :meth:`.SelectBase.cte`
against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`.text` can now be converted into a fully
featured "selectable" construct using the :meth:`.TextClause.columns`
method. This method supersedes the ``typemap`` argument to
:func:`.text`.
"""
input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
] + [ColumnClause(key, type_) for key, type_ in types.items()]
return selectable.TextAsFrom(self, input_cols)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict((b.key, clone(b, **kw))
for b in self._bindparams.values())
def get_children(self, **kwargs):
return list(self._bindparams.values())
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = 'null'
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.Null` construct."""
return NULL
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = 'false'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return TRUE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return FALSE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = 'true'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return FALSE
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._singleton()
else:
return other
@classmethod
def _singleton(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return TRUE
def compare(self, other):
return isinstance(other, True_)
NULL = Null()
FALSE = False_()
TRUE = True_()
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor")
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = util.coerce_generator_arg(clauses)
for clause in clauses:
clause = _literal_as_text(clause)
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [c.self_group(against=operator)
for c in convert_clauses]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`.Select.where` method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\\
where(users_table.c.name == 'wendy').\\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop('type_', self._type_tuple[0]
if self._type_tuple else type_api.NULLTYPE)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=type_, unique=True)
for o, type_ in zip(obj, self._type_tuple)
]).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
contructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be compared
against keyed to result expressions. The statement below is equivalent
to the preceding statement::
stmt = select([users_table]).\\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`.literal_column` construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against, :paramref:`.case.whens`
accepts two different forms, based on whether or not :paramref:`.case.value`
is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple consists
of ``(<sql expression>, <value>)``, where the SQL expression is a
boolean expression and "value" is a resulting value, e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison values
mapped to a resulting value; this form requires :paramref:`.case.value`
to be present, and values will be compared using the ``==`` operator,
e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaulate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:func:`.cast`
"""
__visit_name__ = 'cast'
def __init__(self, expression, type_):
"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:func:`.type_coerce` - Python-side type coercion without emitting
CAST.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using :meth:`.ColumnElement.nullsfirst`,
rather than as its standalone function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullsfirst())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullsfirst_op)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using :meth:`.ColumnElement.nullslast`,
rather than as its standalone function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullslast_op)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`.ColumnElement.desc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.desc_op)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`.ColumnElement.asc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.asc_op)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`.Select.distinct` method of :class:`.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`.ColumnElement.distinct`
:meth:`.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(expr,
operator=operators.distinct_op, type_=expr.type)
@util.memoized_property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
def self_group(self, against=None):
return self
def _negate(self):
return self.element._negate()
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expresion::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=type_api.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
def self_group(self, against=None):
return self
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`.Column` class, is typically invoked using the
:func:`.column` function, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`.Column` object. While the :class:`.Column` class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of the
associations with schema-level metadata or with execution-time behavior
that :class:`.Column` does, so in that sense is a "lightweight" version
of :class:`.Column`.
Full details on :class:`.ColumnClause` usage is at :func:`.column`.
.. seealso::
:func:`.column`
:class:`.Column`
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`.Column` class. The :func:`.column` function can
be invoked with just a name alone, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`.column` may be used like any other SQL expression
element such as within :func:`.select` constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`.column` is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`.literal_column` instead, or pass ``True`` as the
value of :paramref:`.column.is_literal`. Additionally, full SQL
statements are best handled using the :func:`.text` construct.
:func:`.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`.Table`) to produce
a working table construct with minimal boilerplate::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`.column` / :func:`.table` construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any :class:`.schema.MetaData`,
DDL, or events, unlike its :class:`.Table` counterpart.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`.literal_column()` function essentially invokes :func:`.column`
while passing ``is_literal=True``.
.. seealso::
:class:`.Column`
:func:`.literal_column`
:func:`.text`
:ref:`metadata_toplevel`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or self.table._textual or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and
(other.is_literal or
other.table is None or
other.table._textual)
):
return (hasattr(other, 'name') and self.name == other.name) or \
(hasattr(other, '_label') and self._label == other._label)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if \
name_is_truncatable else \
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as :class:`.Table`,
:class:`.Column`, and others. The class can also be passed explicitly
as the name to any function that receives a name which can be quoted.
Such as to use the :meth:`.Engine.has_table` method with an unconditionally
quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
@util.memoized_instancemethod
def lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
@util.memoized_instancemethod
def upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
backslashed = self.encode('ascii', 'backslashreplace')
if not util.py2k:
backslashed = backslashed.decode('ascii')
return "'%s'" % backslashed
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
#return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). it's only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return ColumnClause(str(element), is_literal=True)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key', 'table'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
| gpl-3.0 |
tanmaykm/thrift | lib/py/src/TSerialization.py | 111 | 1389 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .protocol import TBinaryProtocol
from .transport import TTransport
def serialize(thrift_object,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer()
protocol = protocol_factory.getProtocol(transport)
thrift_object.write(protocol)
return transport.getvalue()
def deserialize(base,
buf,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer(buf)
protocol = protocol_factory.getProtocol(transport)
base.read(protocol)
return base
| apache-2.0 |
Saevon/saevon.github.io | plugins/jinja2/__init__.py | 1 | 1409 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from itertools import chain
from plugins.jinja2.debug_dump import dump, dump_all, dump_file, dump_url
from plugins.jinja2.category import find_category, category_preview_articles, mark
from plugins.jinja2.article import is_untold
from plugins.jinja2.nav import is_cur_page, get_main
from plugins.jinja2.summary import summary, summary_raw
from plugins.jinja2.tags import update_count, tag_remap, tag_sort, tag_ratios
from plugins.jinja2.date import date_to_xmlschema
from plugins.jinja2.string import title_case, file_title_case
from datetime import datetime
JINJA_FILTERS = {
'find_category': find_category,
'category_preview_articles': category_preview_articles,
'mark': mark,
'is_untold': is_untold,
'update_count': update_count,
'tag_remap': tag_remap,
'tag_ratios': tag_ratios,
'tag_sort': tag_sort,
'summary': summary,
'summary_raw': summary_raw,
'merge': lambda *args: list(chain(*args)),
'is_cur_page': is_cur_page,
'date_to_xmlschema': date_to_xmlschema,
# DEBUG
'dump': dump,
'dump_all': dump_all,
# String
'title_case': title_case,
'file_title_case': file_title_case,
}
JINJA_FUNCTIONS = {
'today': datetime.today,
'get_main': get_main,
'dump_file': dump_file,
'dump_url': dump_url,
}
JINJA_EXTENSIONS = []
| mit |
roadmapper/ansible | lib/ansible/modules/storage/purestorage/purefa_snap.py | 38 | 6774 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_snap
version_added: '2.4'
short_description: Manage volume snapshots on Pure Storage FlashArrays
description:
- Create or delete volumes and volume snapshots on Pure Storage FlashArray.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
name:
description:
- The name of the source volume.
type: str
required: true
suffix:
description:
- Suffix of snapshot name.
type: str
target:
description:
- Name of target volume if creating from snapshot.
type: str
overwrite:
description:
- Define whether to overwrite existing volume when creating from snapshot.
type: bool
default: 'no'
state:
description:
- Define whether the volume snapshot should exist or not.
choices: [ absent, copy, present ]
type: str
default: present
eradicate:
description:
- Define whether to eradicate the snapshot on delete or leave in trash.
type: bool
default: 'no'
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Create snapshot foo.ansible
purefa_snap:
name: foo
suffix: ansible
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Create R/W clone foo_clone from snapshot foo.snap
purefa_snap:
name: foo
suffix: snap
target: foo_clone
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
- name: Overwrite existing volume foo_clone with snapshot foo.snap
purefa_snap:
name: foo
suffix: snap
target: foo_clone
overwrite: true
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
- name: Delete and eradicate snapshot named foo.snap
purefa_snap:
name: foo
suffix: snap
eradicate: true
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
from datetime import datetime
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
def get_volume(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['name'])
except Exception:
return None
def get_target(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['target'])
except Exception:
return None
def get_snapshot(module, array):
"""Return Snapshot or None"""
try:
snapname = module.params['name'] + "." + module.params['suffix']
for s in array.get_volume(module.params['name'], snap='true'):
if s['name'] == snapname:
return snapname
except Exception:
return None
def create_snapshot(module, array):
"""Create Snapshot"""
changed = True
if not module.check_mode:
try:
array.create_snapshot(module.params['name'], suffix=module.params['suffix'])
except Exception:
changed = False
module.exit_json(changed=changed)
def create_from_snapshot(module, array):
"""Create Volume from Snapshot"""
source = module.params['name'] + "." + module.params['suffix']
tgt = get_target(module, array)
if tgt is None:
changed = True
if not module.check_mode:
array.copy_volume(source,
module.params['target'])
elif tgt is not None and module.params['overwrite']:
changed = True
if not module.check_mode:
array.copy_volume(source,
module.params['target'],
overwrite=module.params['overwrite'])
elif tgt is not None and not module.params['overwrite']:
changed = False
module.exit_json(changed=changed)
def update_snapshot(module, array):
"""Update Snapshot"""
changed = False
module.exit_json(changed=changed)
def delete_snapshot(module, array):
""" Delete Snapshot"""
changed = True
if not module.check_mode:
snapname = module.params['name'] + "." + module.params['suffix']
try:
array.destroy_volume(snapname)
if module.params['eradicate']:
try:
array.eradicate_volume(snapname)
except Exception:
changed = False
except Exception:
changed = False
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
suffix=dict(type='str'),
target=dict(type='str'),
overwrite=dict(type='bool', default=False),
eradicate=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'copy', 'present']),
))
required_if = [('state', 'copy', ['target', 'suffix'])]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in volume')
if module.params['suffix'] is None:
suffix = "snap-" + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
module.params['suffix'] = suffix.replace(".", "")
state = module.params['state']
array = get_system(module)
volume = get_volume(module, array)
target = get_target(module, array)
snap = get_snapshot(module, array)
if state == 'present' and volume and not snap:
create_snapshot(module, array)
elif state == 'present' and volume and snap:
update_snapshot(module, array)
elif state == 'present' and not volume:
update_snapshot(module, array)
elif state == 'copy' and snap:
create_from_snapshot(module, array)
elif state == 'copy' and not snap:
update_snapshot(module, array)
elif state == 'absent' and snap:
delete_snapshot(module, array)
elif state == 'absent' and not snap:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
Sodki/ansible | lib/ansible/utils/module_docs_fragments/auth_basic.py | 152 | 1297 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
api_url:
required: false
default: null
description:
- The resolvable endpoint for the API
api_username:
required: false
default: null
description:
- The username to use for authentication against the API
api_password:
required: false
default: null
description:
- The password to use for authentication against the API
validate_certs:
required: false
default: yes
description:
- Whether or not to validate SSL certs when supplying a https endpoint.
"""
| gpl-3.0 |
vmindru/ansible | lib/ansible/modules/cloud/amazon/iam_policy.py | 54 | 13964 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_policy
short_description: Manage IAM policies for users, groups, and roles
description:
- Allows uploading or removing IAM policies for IAM users, groups or roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
choices: [ "user", "group", "role"]
iam_name:
description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true
policy_name:
description:
- The name label for the policy to create or remove.
required: true
policy_document:
description:
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
policy_json:
description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document),
see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
state:
description:
- Whether to create or delete the IAM policy.
required: true
choices: [ "present", "absent"]
skip_duplicates:
description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with
the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
default: "/"
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a policy with the name of 'Admin' to the group 'administrators'
- name: Assign a policy called Admin to the administrators group
iam_policy:
iam_type: group
iam_name: administrators
policy_name: Admin
state: present
policy_document: admin_policy.json
# Advanced example, create two new groups and add a READ-ONLY policy to both
# groups.
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
loop:
- Mario
- Luigi
register: new_groups
- name: Apply READ-ONLY policy to new groups that have been recently created
iam_policy:
iam_type: group
iam_name: "{{ item.created_group.group_name }}"
policy_name: "READ-ONLY"
policy_document: readonlypolicy.json
state: present
loop: "{{ new_groups.results }}"
# Create a new S3 policy with prefix per user
- name: Create S3 policy from template
iam_policy:
iam_type: user
iam_name: "{{ item.user }}"
policy_name: "s3_limited_access_{{ item.prefix }}"
state: present
policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
loop:
- user: s3_user
prefix: s3_user_prefix
'''
import json
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info, boto_exception
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import urllib
def user_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.parse.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_user_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def role_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as e:
if e.error_code == "NoSuchEntity":
# Role doesn't exist so it's safe to assume the policy doesn't either
module.exit_json(changed=False, msg="No such role, policy will be skipped.")
else:
module.fail_json(msg=e.message)
try:
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_role_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
else:
module.fail_json(msg=err.message)
updated_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
msg = ''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
msg = ("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_group_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies, msg
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
state=dict(
default=None, required=True, choices=['present', 'absent']),
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
policy_json=dict(type='json', default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
iam_type = module.params.get('iam_type').lower()
state = module.params.get('state')
name = module.params.get('iam_name')
policy_name = module.params.get('policy_name')
skip = module.params.get('skip_duplicates')
policy_document = module.params.get('policy_document')
if policy_document is not None and module.params.get('policy_json') is not None:
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
if policy_document is not None:
try:
with open(policy_document, 'r') as json_data:
pdoc = json.dumps(json.load(json_data))
json_data.close()
except IOError as e:
if e.errno == 2:
module.fail_json(
msg='policy_document {0:!r} does not exist'.format(policy_document))
else:
raise
elif module.params.get('policy_json') is not None:
pdoc = module.params.get('policy_json')
# if its a string, assume it is already JSON
if not isinstance(pdoc, string_types):
try:
pdoc = json.dumps(pdoc)
except Exception as e:
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
else:
pdoc = None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
changed = False
if iam_type == 'user':
changed, user_name, current_policies = user_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, user_name=name, policies=current_policies)
elif iam_type == 'role':
changed, role_name, current_policies = role_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
vmindru/ansible | lib/ansible/modules/network/cnos/cnos_save.py | 52 | 3609 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to save running config to start up config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_save
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Save the running configuration as the startup configuration
on devices running Lenovo CNOS
description:
- This module allows you to copy the running configuration of a switch over
its startup configuration. It is recommended to use this module shortly
after any major configuration changes so they persist after a switch
restart. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the
playbook is run.
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_save. These are
written in the main.yml file of the tasks directory.
---
- name: Test Save
cnos_save:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_save_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Switch Running Config is Saved to Startup Config"
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except Exception:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
command = 'write memory'
outputfile = module.params['outputfile']
output = ''
cmd = [{'command': command, 'prompt': None, 'answer': None}]
output = output + str(cnos.run_cnos_commands(module, cmd))
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True,
msg="Switch Running Config is Saved to Startup Config ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
kuiche/chromium | third_party/scons/scons-local/SCons/Tool/msvs.py | 3 | 68498 | """SCons.Tool.msvs
Tool-specific initialization for Microsoft Visual Studio project files.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/msvs.py 3897 2009/01/13 06:45:54 scons"
import base64
import hashlib
import os.path
import pickle
import re
import string
import sys
import SCons.Builder
import SCons.Node.FS
import SCons.Platform.win32
import SCons.Script.SConscript
import SCons.Util
import SCons.Warnings
##############################################################################
# Below here are the classes and functions for generation of
# DSP/DSW/SLN/VCPROJ files.
##############################################################################
def _hexdigest(s):
"""Return a string as a string of hex characters.
"""
# NOTE: This routine is a method in the Python 2.0 interface
# of the native md5 module, but we want SCons to operate all
# the way back to at least Python 1.5.2, which doesn't have it.
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
def xmlify(s):
s = string.replace(s, "&", "&") # do this first
s = string.replace(s, "'", "'")
s = string.replace(s, '"', """)
return s
external_makefile_guid = '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}'
def _generateGUID(slnfile, name):
"""This generates a dummy GUID for the sln file to use. It is
based on the MD5 signatures of the sln filename plus the name of
the project. It basically just needs to be unique, and not
change with each invocation."""
m = hashlib.md5()
m.update(str(slnfile) + str(name))
# TODO(1.5)
#solution = m.hexdigest().upper()
solution = string.upper(_hexdigest(m.digest()))
# convert most of the signature to GUID form (discard the rest)
solution = "{" + solution[:8] + "-" + solution[8:12] + "-" + solution[12:16] + "-" + solution[16:20] + "-" + solution[20:32] + "}"
return solution
version_re = re.compile(r'(\d+\.\d+)(.*)')
def msvs_parse_version(s):
"""
Split a Visual Studio version, which may in fact be something like
'7.0Exp', into is version number (returned as a float) and trailing
"suite" portion.
"""
num, suite = version_re.match(s).groups()
return float(num), suite
# This is how we re-invoke SCons from inside MSVS Project files.
# The problem is that we might have been invoked as either scons.bat
# or scons.py. If we were invoked directly as scons.py, then we could
# use sys.argv[0] to find the SCons "executable," but that doesn't work
# if we were invoked as scons.bat, which uses "python -c" to execute
# things and ends up with "-c" as sys.argv[0]. Consequently, we have
# the MSVS Project file invoke SCons the same way that scons.bat does,
# which works regardless of how we were invoked.
def getExecScriptMain(env, xml=None):
scons_home = env.get('SCONS_HOME')
if not scons_home and os.environ.has_key('SCONS_LIB_DIR'):
scons_home = os.environ['SCONS_LIB_DIR']
if scons_home:
exec_script_main = "from os.path import join; import sys; sys.path = [ r'%s' ] + sys.path; import SCons.Script; SCons.Script.main()" % scons_home
else:
version = SCons.__version__
exec_script_main = "from os.path import join; import sys; sys.path = [ join(sys.prefix, 'Lib', 'site-packages', 'scons-%(version)s'), join(sys.prefix, 'scons-%(version)s'), join(sys.prefix, 'Lib', 'site-packages', 'scons'), join(sys.prefix, 'scons') ] + sys.path; import SCons.Script; SCons.Script.main()" % locals()
if xml:
exec_script_main = xmlify(exec_script_main)
return exec_script_main
# The string for the Python executable we tell the Project file to use
# is either sys.executable or, if an external PYTHON_ROOT environment
# variable exists, $(PYTHON)ROOT\\python.exe (generalized a little to
# pluck the actual executable name from sys.executable).
try:
python_root = os.environ['PYTHON_ROOT']
except KeyError:
python_executable = sys.executable
else:
python_executable = os.path.join('$$(PYTHON_ROOT)',
os.path.split(sys.executable)[1])
class Config:
pass
def splitFully(path):
dir, base = os.path.split(path)
if dir and dir != '' and dir != path:
return splitFully(dir)+[base]
if base == '':
return []
return [base]
def makeHierarchy(sources):
'''Break a list of files into a hierarchy; for each value, if it is a string,
then it is a file. If it is a dictionary, it is a folder. The string is
the original path of the file.'''
hierarchy = {}
for file in sources:
path = splitFully(file)
if len(path):
dict = hierarchy
for part in path[:-1]:
if not dict.has_key(part):
dict[part] = {}
dict = dict[part]
dict[path[-1]] = file
#else:
# print 'Warning: failed to decompose path for '+str(file)
return hierarchy
class _DSPGenerator:
""" Base class for DSP generators """
srcargs = [
'srcs',
'incs',
'localincs',
'resources',
'misc']
def __init__(self, dspfile, source, env):
self.dspfile = str(dspfile)
try:
get_abspath = dspfile.get_abspath
except AttributeError:
self.dspabs = os.path.abspath(dspfile)
else:
self.dspabs = get_abspath()
if not env.has_key('variant'):
raise SCons.Errors.InternalError, \
"You must specify a 'variant' argument (i.e. 'Debug' or " +\
"'Release') to create an MSVSProject."
elif SCons.Util.is_String(env['variant']):
variants = [env['variant']]
elif SCons.Util.is_List(env['variant']):
variants = env['variant']
if not env.has_key('buildtarget') or env['buildtarget'] == None:
buildtarget = ['']
elif SCons.Util.is_String(env['buildtarget']):
buildtarget = [env['buildtarget']]
elif SCons.Util.is_List(env['buildtarget']):
if len(env['buildtarget']) != len(variants):
raise SCons.Errors.InternalError, \
"Sizes of 'buildtarget' and 'variant' lists must be the same."
buildtarget = []
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
buildtarget.append(bt)
else:
buildtarget.append(bt.get_abspath())
else:
buildtarget = [env['buildtarget'].get_abspath()]
if len(buildtarget) == 1:
bt = buildtarget[0]
buildtarget = []
for _ in variants:
buildtarget.append(bt)
if not env.has_key('outdir') or env['outdir'] == None:
outdir = ['']
elif SCons.Util.is_String(env['outdir']):
outdir = [env['outdir']]
elif SCons.Util.is_List(env['outdir']):
if len(env['outdir']) != len(variants):
raise SCons.Errors.InternalError, \
"Sizes of 'outdir' and 'variant' lists must be the same."
outdir = []
for s in env['outdir']:
if SCons.Util.is_String(s):
outdir.append(s)
else:
outdir.append(s.get_abspath())
else:
outdir = [env['outdir'].get_abspath()]
if len(outdir) == 1:
s = outdir[0]
outdir = []
for v in variants:
outdir.append(s)
if not env.has_key('runfile') or env['runfile'] == None:
runfile = buildtarget[-1:]
elif SCons.Util.is_String(env['runfile']):
runfile = [env['runfile']]
elif SCons.Util.is_List(env['runfile']):
if len(env['runfile']) != len(variants):
raise SCons.Errors.InternalError, \
"Sizes of 'runfile' and 'variant' lists must be the same."
runfile = []
for s in env['runfile']:
if SCons.Util.is_String(s):
runfile.append(s)
else:
runfile.append(s.get_abspath())
else:
runfile = [env['runfile'].get_abspath()]
if len(runfile) == 1:
s = runfile[0]
runfile = []
for v in variants:
runfile.append(s)
self.sconscript = env['MSVSSCONSCRIPT']
cmdargs = env.get('cmdargs', '')
self.env = env
if self.env.has_key('name'):
self.name = self.env['name']
else:
self.name = os.path.basename(SCons.Util.splitext(self.dspfile)[0])
self.name = self.env.subst(self.name)
sourcenames = [
'Source Files',
'Header Files',
'Local Headers',
'Resource Files',
'Other Files']
self.sources = {}
for n in sourcenames:
self.sources[n] = []
self.configs = {}
self.nokeep = 0
if env.has_key('nokeep') and env['variant'] != 0:
self.nokeep = 1
if self.nokeep == 0 and os.path.exists(self.dspabs):
self.Parse()
for t in zip(sourcenames,self.srcargs):
if self.env.has_key(t[1]):
if SCons.Util.is_List(self.env[t[1]]):
for i in self.env[t[1]]:
if not i in self.sources[t[0]]:
self.sources[t[0]].append(i)
else:
if not self.env[t[1]] in self.sources[t[0]]:
self.sources[t[0]].append(self.env[t[1]])
for n in sourcenames:
# TODO(1.5):
#self.sources[n].sort(lambda a, b: cmp(a.lower(), b.lower()))
self.sources[n].sort(lambda a, b: cmp(string.lower(a), string.lower(b)))
def AddConfig(self, variant, buildtarget, outdir, runfile, cmdargs, dspfile=dspfile):
config = Config()
config.buildtarget = buildtarget
config.outdir = outdir
config.cmdargs = cmdargs
config.runfile = runfile
match = re.match('(.*)\|(.*)', variant)
if match:
config.variant = match.group(1)
config.platform = match.group(2)
else:
config.variant = variant
config.platform = 'Win32'
self.configs[variant] = config
print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dspfile) + "'"
for i in range(len(variants)):
AddConfig(self, variants[i], buildtarget[i], outdir[i], runfile[i], cmdargs)
self.platforms = []
for key in self.configs.keys():
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
def Build(self):
pass
V6DSPHeader = """\
# Microsoft Developer Studio Project File - Name="%(name)s" - Package Owner=<4>
# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) External Target" 0x0106
CFG=%(name)s - Win32 %(confkey)s
!MESSAGE This is not a valid makefile. To build this project using NMAKE,
!MESSAGE use the Export Makefile command and run
!MESSAGE
!MESSAGE NMAKE /f "%(name)s.mak".
!MESSAGE
!MESSAGE You can specify a configuration when running NMAKE
!MESSAGE by defining the macro CFG on the command line. For example:
!MESSAGE
!MESSAGE NMAKE /f "%(name)s.mak" CFG="%(name)s - Win32 %(confkey)s"
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
"""
class _GenerateV6DSP(_DSPGenerator):
"""Generates a Project file for MSVS 6.0"""
def PrintHeader(self):
# pick a default config
confkeys = self.configs.keys()
confkeys.sort()
name = self.name
confkey = confkeys[0]
self.file.write(V6DSPHeader % locals())
for kind in confkeys:
self.file.write('!MESSAGE "%s - Win32 %s" (based on "Win32 (x86) External Target")\n' % (name, kind))
self.file.write('!MESSAGE \n\n')
def PrintProject(self):
name = self.name
self.file.write('# Begin Project\n'
'# PROP AllowPerConfigDependencies 0\n'
'# PROP Scc_ProjName ""\n'
'# PROP Scc_LocalPath ""\n\n')
first = 1
confkeys = self.configs.keys()
confkeys.sort()
for kind in confkeys:
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
if first == 1:
self.file.write('!IF "$(CFG)" == "%s - Win32 %s"\n\n' % (name, kind))
first = 0
else:
self.file.write('\n!ELSEIF "$(CFG)" == "%s - Win32 %s"\n\n' % (name, kind))
env_has_buildtarget = self.env.has_key('MSVSBUILDTARGET')
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
# have to write this twice, once with the BASE settings, and once without
for base in ("BASE ",""):
self.file.write('# PROP %sUse_MFC 0\n'
'# PROP %sUse_Debug_Libraries ' % (base, base))
# TODO(1.5):
#if kind.lower().find('debug') < 0:
if string.find(string.lower(kind), 'debug') < 0:
self.file.write('0\n')
else:
self.file.write('1\n')
self.file.write('# PROP %sOutput_Dir "%s"\n'
'# PROP %sIntermediate_Dir "%s"\n' % (base,outdir,base,outdir))
cmd = 'echo Starting SCons && ' + self.env.subst('$MSVSBUILDCOM', 1)
self.file.write('# PROP %sCmd_Line "%s"\n'
'# PROP %sRebuild_Opt "-c && %s"\n'
'# PROP %sTarget_File "%s"\n'
'# PROP %sBsc_Name ""\n'
'# PROP %sTarget_Dir ""\n'\
%(base,cmd,base,cmd,base,buildtarget,base,base))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write('\n!ENDIF\n\n'
'# Begin Target\n\n')
for kind in confkeys:
self.file.write('# Name "%s - Win32 %s"\n' % (name,kind))
self.file.write('\n')
first = 0
for kind in confkeys:
if first == 0:
self.file.write('!IF "$(CFG)" == "%s - Win32 %s"\n\n' % (name,kind))
first = 1
else:
self.file.write('!ELSEIF "$(CFG)" == "%s - Win32 %s"\n\n' % (name,kind))
self.file.write('!ENDIF \n\n')
self.PrintSourceFiles()
self.file.write('# End Target\n'
'# End Project\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp|c|cxx|l|y|def|odl|idl|hpj|bat',
'Header Files': 'h|hpp|hxx|hm|inl',
'Local Headers': 'h|hpp|hxx|hm|inl',
'Resource Files': 'r|rc|ico|cur|bmp|dlg|rc2|rct|bin|cnt|rtf|gif|jpg|jpeg|jpe',
'Other Files': ''}
cats = categories.keys()
# TODO(1.5):
#cats.sort(lambda a, b: cmp(a.lower(), b.lower()))
cats.sort(lambda a, b: cmp(string.lower(a), string.lower(b)))
for kind in cats:
if not self.sources[kind]:
continue # skip empty groups
self.file.write('# Begin Group "' + kind + '"\n\n')
# TODO(1.5)
#typelist = categories[kind].replace('|', ';')
typelist = string.replace(categories[kind], '|', ';')
self.file.write('# PROP Default_Filter "' + typelist + '"\n')
for file in self.sources[kind]:
file = os.path.normpath(file)
self.file.write('# Begin Source File\n\n'
'SOURCE="' + file + '"\n'
'# End Source File\n')
self.file.write('# End Group\n')
# add the SConscript file outside of the groups
self.file.write('# Begin Source File\n\n'
'SOURCE="' + str(self.sconscript) + '"\n'
'# End Source File\n')
def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
# TODO(1.5):
#if line.find("# End Project") > -1:
if string.find(line, "# End Project") > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
# it has a "# " in front of it, so we strip that.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data)
def Build(self):
try:
self.file = open(self.dspabs,'w')
except IOError, detail:
raise SCons.Errors.InternalError, 'Unable to open "' + self.dspabs + '" for writing:' + str(detail)
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
V7DSPHeader = """\
<?xml version="1.0" encoding = "%(encoding)s"?>
<VisualStudioProject
\tProjectType="Visual C++"
\tVersion="%(versionstr)s"
\tName="%(name)s"
%(scc_attrs)s
\tKeyword="MakeFileProj">
"""
V7DSPConfiguration = """\
\t\t<Configuration
\t\t\tName="%(variant)s|%(platform)s"
\t\t\tOutputDirectory="%(outdir)s"
\t\t\tIntermediateDirectory="%(outdir)s"
\t\t\tConfigurationType="0"
\t\t\tUseOfMFC="0"
\t\t\tATLMinimizesCRunTimeLibraryUsage="FALSE">
\t\t\t<Tool
\t\t\t\tName="VCNMakeTool"
\t\t\t\tBuildCommandLine="%(buildcmd)s"
\t\t\t\tCleanCommandLine="%(cleancmd)s"
\t\t\t\tRebuildCommandLine="%(rebuildcmd)s"
\t\t\t\tOutput="%(runfile)s"/>
\t\t</Configuration>
"""
V8DSPHeader = """\
<?xml version="1.0" encoding="%(encoding)s"?>
<VisualStudioProject
\tProjectType="Visual C++"
\tVersion="%(versionstr)s"
\tName="%(name)s"
%(scc_attrs)s
\tRootNamespace="%(name)s"
\tKeyword="MakeFileProj">
"""
V8DSPConfiguration = """\
\t\t<Configuration
\t\t\tName="%(variant)s|Win32"
\t\t\tConfigurationType="0"
\t\t\tUseOfMFC="0"
\t\t\tATLMinimizesCRunTimeLibraryUsage="false"
\t\t\t>
\t\t\t<Tool
\t\t\t\tName="VCNMakeTool"
\t\t\t\tBuildCommandLine="%(buildcmd)s"
\t\t\t\tReBuildCommandLine="%(rebuildcmd)s"
\t\t\t\tCleanCommandLine="%(cleancmd)s"
\t\t\t\tOutput="%(runfile)s"
\t\t\t\tPreprocessorDefinitions=""
\t\t\t\tIncludeSearchPath=""
\t\t\t\tForcedIncludes=""
\t\t\t\tAssemblySearchPath=""
\t\t\t\tForcedUsingAssemblies=""
\t\t\t\tCompileAsManaged=""
\t\t\t/>
\t\t</Configuration>
"""
class _GenerateV7DSP(_DSPGenerator):
"""Generates a Project file for MSVS .NET"""
def __init__(self, dspfile, source, env):
_DSPGenerator.__init__(self, dspfile, source, env)
self.version = env['MSVS_VERSION']
self.version_num, self.suite = msvs_parse_version(self.version)
if self.version_num >= 8.0:
self.versionstr = '8.00'
self.dspheader = V8DSPHeader
self.dspconfiguration = V8DSPConfiguration
else:
if self.version_num >= 7.1:
self.versionstr = '7.10'
else:
self.versionstr = '7.00'
self.dspheader = V7DSPHeader
self.dspconfiguration = V7DSPConfiguration
self.file = None
def PrintHeader(self):
env = self.env
versionstr = self.versionstr
name = self.name
encoding = self.env.subst('$MSVSENCODING')
scc_provider = env.get('MSVS_SCC_PROVIDER', '')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '')
scc_aux_path = env.get('MSVS_SCC_AUX_PATH', '')
scc_local_path = env.get('MSVS_SCC_LOCAL_PATH', '')
project_guid = env.get('MSVS_PROJECT_GUID', '')
if self.version_num >= 8.0 and not project_guid:
project_guid = _generateGUID(self.dspfile, '')
if scc_provider != '':
scc_attrs = ('\tProjectGUID="%s"\n'
'\tSccProjectName="%s"\n'
'\tSccAuxPath="%s"\n'
'\tSccLocalPath="%s"\n'
'\tSccProvider="%s"' % (project_guid, scc_project_name, scc_aux_path, scc_local_path, scc_provider))
else:
scc_attrs = ('\tProjectGUID="%s"\n'
'\tSccProjectName="%s"\n'
'\tSccLocalPath="%s"' % (project_guid, scc_project_name, scc_local_path))
self.file.write(self.dspheader % locals())
self.file.write('\t<Platforms>\n')
for platform in self.platforms:
self.file.write(
'\t\t<Platform\n'
'\t\t\tName="%s"/>\n' % platform)
self.file.write('\t</Platforms>\n')
if self.version_num >= 8.0:
self.file.write('\t<ToolFiles>\n'
'\t</ToolFiles>\n')
def PrintProject(self):
self.file.write('\t<Configurations>\n')
confkeys = self.configs.keys()
confkeys.sort()
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
runfile = self.configs[kind].runfile
cmdargs = self.configs[kind].cmdargs
env_has_buildtarget = self.env.has_key('MSVSBUILDTARGET')
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
starting = 'echo Starting SCons && '
if cmdargs:
cmdargs = ' ' + cmdargs
else:
cmdargs = ''
buildcmd = xmlify(starting + self.env.subst('$MSVSBUILDCOM', 1) + cmdargs)
rebuildcmd = xmlify(starting + self.env.subst('$MSVSREBUILDCOM', 1) + cmdargs)
cleancmd = xmlify(starting + self.env.subst('$MSVSCLEANCOM', 1) + cmdargs)
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write(self.dspconfiguration % locals())
self.file.write('\t</Configurations>\n')
if self.version_num >= 7.1:
self.file.write('\t<References>\n'
'\t</References>\n')
self.PrintSourceFiles()
self.file.write('</VisualStudioProject>\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write('<!-- SCons Data:\n' + pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '-->\n')
def printSources(self, hierarchy, commonprefix):
sorteditems = hierarchy.items()
# TODO(1.5):
#sorteditems.sort(lambda a, b: cmp(a[0].lower(), b[0].lower()))
sorteditems.sort(lambda a, b: cmp(string.lower(a[0]), string.lower(b[0])))
# First folders, then files
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
self.file.write('\t\t\t<Filter\n'
'\t\t\t\tName="%s"\n'
'\t\t\t\tFilter="">\n' % (key))
self.printSources(value, commonprefix)
self.file.write('\t\t\t</Filter>\n')
for key, value in sorteditems:
if SCons.Util.is_String(value):
file = value
if commonprefix:
file = os.path.join(commonprefix, value)
file = os.path.normpath(file)
self.file.write('\t\t\t<File\n'
'\t\t\t\tRelativePath="%s">\n'
'\t\t\t</File>\n' % (file))
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp;c;cxx;l;y;def;odl;idl;hpj;bat',
'Header Files': 'h;hpp;hxx;hm;inl',
'Local Headers': 'h;hpp;hxx;hm;inl',
'Resource Files': 'r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe',
'Other Files': ''}
self.file.write('\t<Files>\n')
cats = categories.keys()
# TODO(1.5)
#cats.sort(lambda a, b: cmp(a.lower(), b.lower()))
cats.sort(lambda a, b: cmp(string.lower(a), string.lower(b)))
cats = filter(lambda k, s=self: s.sources[k], cats)
for kind in cats:
if len(cats) > 1:
self.file.write('\t\t<Filter\n'
'\t\t\tName="%s"\n'
'\t\t\tFilter="%s">\n' % (kind, categories[kind]))
sources = self.sources[kind]
# First remove any common prefix
commonprefix = None
if len(sources) > 1:
s = map(os.path.normpath, sources)
# take the dirname because the prefix may include parts
# of the filenames (e.g. if you have 'dir\abcd' and
# 'dir\acde' then the cp will be 'dir\a' )
cp = os.path.dirname( os.path.commonprefix(s) )
if cp and s[0][len(cp)] == os.sep:
# +1 because the filename starts after the separator
sources = map(lambda s, l=len(cp)+1: s[l:], sources)
commonprefix = cp
elif len(sources) == 1:
commonprefix = os.path.dirname( sources[0] )
sources[0] = os.path.basename( sources[0] )
hierarchy = makeHierarchy(sources)
self.printSources(hierarchy, commonprefix=commonprefix)
if len(cats)>1:
self.file.write('\t\t</Filter>\n')
# add the SConscript file outside of the groups
self.file.write('\t\t<File\n'
'\t\t\tRelativePath="%s">\n'
'\t\t</File>\n' % str(self.sconscript))
self.file.write('\t</Files>\n'
'\t<Globals>\n'
'\t</Globals>\n')
def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
# TODO(1.5)
#if line.find('<!-- SCons Data:') > -1:
if string.find(line, '<!-- SCons Data:') > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data)
def Build(self):
try:
self.file = open(self.dspabs,'w')
except IOError, detail:
raise SCons.Errors.InternalError, 'Unable to open "' + self.dspabs + '" for writing:' + str(detail)
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
class _DSWGenerator:
""" Base class for DSW generators """
def __init__(self, dswfile, source, env):
self.dswfile = os.path.normpath(str(dswfile))
self.env = env
if not env.has_key('projects'):
raise SCons.Errors.UserError, \
"You must specify a 'projects' argument to create an MSVSSolution."
projects = env['projects']
if not SCons.Util.is_List(projects):
raise SCons.Errors.InternalError, \
"The 'projects' argument must be a list of nodes."
projects = SCons.Util.flatten(projects)
if len(projects) < 1:
raise SCons.Errors.UserError, \
"You must specify at least one project to create an MSVSSolution."
self.dspfiles = map(str, projects)
if self.env.has_key('name'):
self.name = self.env['name']
else:
self.name = os.path.basename(SCons.Util.splitext(self.dswfile)[0])
self.name = self.env.subst(self.name)
def Build(self):
pass
class _GenerateV7DSW(_DSWGenerator):
"""Generates a Solution file for MSVS .NET"""
def __init__(self, dswfile, source, env):
_DSWGenerator.__init__(self, dswfile, source, env)
self.file = None
self.version = self.env['MSVS_VERSION']
self.version_num, self.suite = msvs_parse_version(self.version)
self.versionstr = '7.00'
if self.version_num >= 8.0:
self.versionstr = '9.00'
elif self.version_num >= 7.1:
self.versionstr = '8.00'
if self.version_num >= 8.0:
self.versionstr = '9.00'
if env.has_key('slnguid') and env['slnguid']:
self.slnguid = env['slnguid']
else:
self.slnguid = _generateGUID(dswfile, self.name)
self.configs = {}
self.nokeep = 0
if env.has_key('nokeep') and env['variant'] != 0:
self.nokeep = 1
if self.nokeep == 0 and os.path.exists(self.dswfile):
self.Parse()
def AddConfig(self, variant, dswfile=dswfile):
config = Config()
match = re.match('(.*)\|(.*)', variant)
if match:
config.variant = match.group(1)
config.platform = match.group(2)
else:
config.variant = variant
config.platform = 'Win32'
self.configs[variant] = config
print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dswfile) + "'"
if not env.has_key('variant'):
raise SCons.Errors.InternalError, \
"You must specify a 'variant' argument (i.e. 'Debug' or " +\
"'Release') to create an MSVS Solution File."
elif SCons.Util.is_String(env['variant']):
AddConfig(self, env['variant'])
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
AddConfig(self, variant)
self.platforms = []
for key in self.configs.keys():
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
def Parse(self):
try:
dswfile = open(self.dswfile,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dswfile.readline()
while line:
if line[:9] == "EndGlobal":
break
line = dswfile.readline()
line = dswfile.readline()
datas = line
while line:
line = dswfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
def PrintSolution(self):
"""Writes a solution file"""
self.file.write('Microsoft Visual Studio Solution File, Format Version %s\n' % self.versionstr )
if self.version_num >= 8.0:
self.file.write('# Visual Studio 2005\n')
for p in self.dspfiles:
name = os.path.basename(p)
base, suffix = SCons.Util.splitext(name)
if suffix == '.vcproj':
name = base
guid = _generateGUID(p, '')
self.file.write('Project("%s") = "%s", "%s", "%s"\n'
% ( external_makefile_guid, name, p, guid ) )
if self.version_num >= 7.1 and self.version_num < 8.0:
self.file.write('\tProjectSection(ProjectDependencies) = postProject\n'
'\tEndProjectSection\n')
self.file.write('EndProject\n')
self.file.write('Global\n')
env = self.env
if env.has_key('MSVS_SCC_PROVIDER'):
dspfile_base = os.path.basename(self.dspfile)
slnguid = self.slnguid
scc_provider = env.get('MSVS_SCC_PROVIDER', '')
scc_provider = string.replace(scc_provider, ' ', r'\u0020')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '')
# scc_aux_path = env.get('MSVS_SCC_AUX_PATH', '')
scc_local_path = env.get('MSVS_SCC_LOCAL_PATH', '')
scc_project_base_path = env.get('MSVS_SCC_PROJECT_BASE_PATH', '')
# project_guid = env.get('MSVS_PROJECT_GUID', '')
self.file.write('\tGlobalSection(SourceCodeControl) = preSolution\n'
'\t\tSccNumberOfProjects = 2\n'
'\t\tSccProjectUniqueName0 = %(dspfile_base)s\n'
'\t\tSccLocalPath0 = %(scc_local_path)s\n'
'\t\tCanCheckoutShared = true\n'
'\t\tSccProjectFilePathRelativizedFromConnection0 = %(scc_project_base_path)s\n'
'\t\tSccProjectName1 = %(scc_project_name)s\n'
'\t\tSccLocalPath1 = %(scc_local_path)s\n'
'\t\tSccProvider1 = %(scc_provider)s\n'
'\t\tCanCheckoutShared = true\n'
'\t\tSccProjectFilePathRelativizedFromConnection1 = %(scc_project_base_path)s\n'
'\t\tSolutionUniqueID = %(slnguid)s\n'
'\tEndGlobalSection\n' % locals())
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\n')
else:
self.file.write('\tGlobalSection(SolutionConfiguration) = preSolution\n')
confkeys = self.configs.keys()
confkeys.sort()
cnt = 0
for name in confkeys:
variant = self.configs[name].variant
platform = self.configs[name].platform
if self.version_num >= 8.0:
self.file.write('\t\t%s|%s = %s|%s\n' % (variant, platform, variant, platform))
else:
self.file.write('\t\tConfigName.%d = %s\n' % (cnt, variant))
cnt = cnt + 1
self.file.write('\tEndGlobalSection\n')
if self.version_num < 7.1:
self.file.write('\tGlobalSection(ProjectDependencies) = postSolution\n'
'\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\n')
else:
self.file.write('\tGlobalSection(ProjectConfiguration) = postSolution\n')
for name in confkeys:
variant = self.configs[name].variant
platform = self.configs[name].platform
if self.version_num >= 8.0:
for p in self.dspfiles:
guid = _generateGUID(p, '')
self.file.write('\t\t%s.%s|%s.ActiveCfg = %s|%s\n'
'\t\t%s.%s|%s.Build.0 = %s|%s\n' % (guid,variant,platform,variant,platform,guid,variant,platform,variant,platform))
else:
for p in self.dspfiles:
guid = _generateGUID(p, '')
self.file.write('\t\t%s.%s.ActiveCfg = %s|%s\n'
'\t\t%s.%s.Build.0 = %s|%s\n' %(guid,variant,variant,platform,guid,variant,variant,platform))
self.file.write('\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(SolutionProperties) = preSolution\n'
'\t\tHideSolutionNode = FALSE\n'
'\tEndGlobalSection\n')
else:
self.file.write('\tGlobalSection(ExtensibilityGlobals) = postSolution\n'
'\tEndGlobalSection\n'
'\tGlobalSection(ExtensibilityAddIns) = postSolution\n'
'\tEndGlobalSection\n')
self.file.write('EndGlobal\n')
if self.nokeep == 0:
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
def Build(self):
try:
self.file = open(self.dswfile,'w')
except IOError, detail:
raise SCons.Errors.InternalError, 'Unable to open "' + self.dswfile + '" for writing:' + str(detail)
else:
self.PrintSolution()
self.file.close()
V6DSWHeader = """\
Microsoft Developer Studio Workspace File, Format Version 6.00
# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
###############################################################################
Project: "%(name)s"="%(dspfile)s" - Package Owner=<4>
Package=<5>
{{{
}}}
Package=<4>
{{{
}}}
###############################################################################
Global:
Package=<5>
{{{
}}}
Package=<3>
{{{
}}}
###############################################################################
"""
class _GenerateV6DSW(_DSWGenerator):
"""Generates a Workspace file for MSVS 6.0"""
def PrintWorkspace(self):
""" writes a DSW file """
name = self.name
dspfile = self.dspfiles[0]
self.file.write(V6DSWHeader % locals())
def Build(self):
try:
self.file = open(self.dswfile,'w')
except IOError, detail:
raise SCons.Errors.InternalError, 'Unable to open "' + self.dswfile + '" for writing:' + str(detail)
else:
self.PrintWorkspace()
self.file.close()
def GenerateDSP(dspfile, source, env):
"""Generates a Project file based on the version of MSVS that is being used"""
version_num = 6.0
if env.has_key('MSVS_VERSION'):
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 7.0:
g = _GenerateV7DSP(dspfile, source, env)
g.Build()
else:
g = _GenerateV6DSP(dspfile, source, env)
g.Build()
def GenerateDSW(dswfile, source, env):
"""Generates a Solution/Workspace file based on the version of MSVS that is being used"""
version_num = 6.0
if env.has_key('MSVS_VERSION'):
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 7.0:
g = _GenerateV7DSW(dswfile, source, env)
g.Build()
else:
g = _GenerateV6DSW(dswfile, source, env)
g.Build()
##############################################################################
# Above here are the classes and functions for generation of
# DSP/DSW/SLN/VCPROJ files.
##############################################################################
def get_default_visualstudio_version(env):
"""Returns the version set in the env, or the latest version
installed, if it can find it, or '6.0' if all else fails. Also
updates the environment with what it found."""
versions = ['6.0']
if not env.has_key('MSVS') or not SCons.Util.is_Dict(env['MSVS']):
v = get_visualstudio_versions()
if v:
versions = v
env['MSVS'] = {'VERSIONS' : versions}
else:
versions = env['MSVS'].get('VERSIONS', versions)
if not env.has_key('MSVS_VERSION'):
env['MSVS_VERSION'] = versions[0] #use highest version by default
env['MSVS']['VERSION'] = env['MSVS_VERSION']
return env['MSVS_VERSION']
def get_visualstudio_versions():
"""
Get list of visualstudio versions from the Windows registry.
Returns a list of strings containing version numbers. An empty list
is returned if we were unable to accees the register (for example,
we couldn't import the registry-access module) or the appropriate
registry keys weren't found.
"""
if not SCons.Util.can_read_reg:
return []
HLM = SCons.Util.HKEY_LOCAL_MACHINE
KEYS = {
r'Software\Microsoft\VisualStudio' : '',
r'Software\Microsoft\VCExpress' : 'Exp',
}
L = []
for K, suite_suffix in KEYS.items():
try:
k = SCons.Util.RegOpenKeyEx(HLM, K)
i = 0
while 1:
try:
p = SCons.Util.RegEnumKey(k,i)
except SCons.Util.RegError:
break
i = i + 1
if not p[0] in '123456789' or p in L:
continue
# Only add this version number if there is a valid
# registry structure (includes the "Setup" key),
# and at least some of the correct directories
# exist. Sometimes VS uninstall leaves around
# some registry/filesystem turds that we don't
# want to trip over. Also, some valid registry
# entries are MSDN entries, not MSVS ('7.1',
# notably), and we want to skip those too.
try:
SCons.Util.RegOpenKeyEx(HLM, K + '\\' + p + '\\Setup')
except SCons.Util.RegError:
continue
id = []
idk = SCons.Util.RegOpenKeyEx(HLM, K + '\\' + p)
# This is not always here -- it only exists if the
# user installed into a non-standard location (at
# least in VS6 it works that way -- VS7 seems to
# always write it)
try:
id = SCons.Util.RegQueryValueEx(idk, 'InstallDir')
except SCons.Util.RegError:
pass
# If the InstallDir key doesn't exist,
# then we check the default locations.
# Note: The IDE's executable is not devenv.exe for VS8 Express.
if not id or not id[0]:
files_dir = SCons.Platform.win32.get_program_files_dir()
version_num, suite = msvs_parse_version(p)
if version_num < 7.0:
vs = r'Microsoft Visual Studio\Common\MSDev98'
elif version_num < 8.0:
vs = r'Microsoft Visual Studio .NET\Common7\IDE'
else:
vs = r'Microsoft Visual Studio 8\Common7\IDE'
id = [ os.path.join(files_dir, vs) ]
if os.path.exists(id[0]):
L.append(p + suite_suffix)
except SCons.Util.RegError:
pass
if not L:
return []
# This is a hack to get around the fact that certain Visual Studio
# patches place a "6.1" version in the registry, which does not have
# any of the keys we need to find include paths, install directories,
# etc. Therefore we ignore it if it is there, since it throws all
# other logic off.
try:
L.remove("6.1")
except ValueError:
pass
L.sort()
L.reverse()
return L
def get_default_visualstudio8_suite(env):
"""
Returns the Visual Studio 2005 suite identifier set in the env, or the
highest suite installed.
"""
if not env.has_key('MSVS') or not SCons.Util.is_Dict(env['MSVS']):
env['MSVS'] = {}
if env.has_key('MSVS_SUITE'):
# TODO(1.5)
#suite = env['MSVS_SUITE'].upper()
suite = string.upper(env['MSVS_SUITE'])
suites = [suite]
else:
suite = 'EXPRESS'
suites = [suite]
if SCons.Util.can_read_reg:
suites = get_visualstudio8_suites()
if suites:
suite = suites[0] #use best suite by default
env['MSVS_SUITE'] = suite
env['MSVS']['SUITES'] = suites
env['MSVS']['SUITE'] = suite
return suite
def get_visualstudio8_suites():
"""
Returns a sorted list of all installed Visual Studio 2005 suites found
in the registry. The highest version should be the first entry in the list.
"""
suites = []
# Detect Standard, Professional and Team edition
try:
idk = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\VisualStudio\8.0')
SCons.Util.RegQueryValueEx(idk, 'InstallDir')
editions = { 'PRO': r'Setup\VS\Pro' } # ToDo: add standard and team editions
edition_name = 'STD'
for name, key_suffix in editions.items():
try:
idk = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\VisualStudio\8.0' + '\\' + key_suffix )
edition_name = name
except SCons.Util.RegError:
pass
suites.append(edition_name)
except SCons.Util.RegError:
pass
# Detect Express edition
try:
idk = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\VCExpress\8.0')
SCons.Util.RegQueryValueEx(idk, 'InstallDir')
suites.append('EXPRESS')
except SCons.Util.RegError:
pass
return suites
def is_msvs_installed():
"""
Check the registry for an installed visual studio.
"""
try:
v = SCons.Tool.msvs.get_visualstudio_versions()
return v
except (SCons.Util.RegError, SCons.Errors.InternalError):
return 0
def get_msvs_install_dirs(version = None, vs8suite = None):
"""
Get installed locations for various msvc-related products, like the .NET SDK
and the Platform SDK.
"""
if not SCons.Util.can_read_reg:
return {}
if not version:
versions = get_visualstudio_versions()
if versions:
version = versions[0] #use highest version by default
else:
return {}
version_num, suite = msvs_parse_version(version)
K = 'Software\\Microsoft\\VisualStudio\\' + str(version_num)
if (version_num >= 8.0):
if vs8suite == None:
# We've been given no guidance about which Visual Studio 8
# suite to use, so attempt to autodetect.
suites = get_visualstudio8_suites()
if suites:
vs8suite = suites[0]
if vs8suite == 'EXPRESS':
K = 'Software\\Microsoft\\VCExpress\\' + str(version_num)
# vc++ install dir
rv = {}
if (version_num < 7.0):
key = K + r'\Setup\Microsoft Visual C++\ProductDir'
else:
key = K + r'\Setup\VC\ProductDir'
try:
(rv['VCINSTALLDIR'], t) = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE, key)
except SCons.Util.RegError:
pass
# visual studio install dir
if (version_num < 7.0):
try:
(rv['VSINSTALLDIR'], t) = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
K + r'\Setup\Microsoft Visual Studio\ProductDir')
except SCons.Util.RegError:
pass
if not rv.has_key('VSINSTALLDIR') or not rv['VSINSTALLDIR']:
if rv.has_key('VCINSTALLDIR') and rv['VCINSTALLDIR']:
rv['VSINSTALLDIR'] = os.path.dirname(rv['VCINSTALLDIR'])
else:
rv['VSINSTALLDIR'] = os.path.join(SCons.Platform.win32.get_program_files_dir(),'Microsoft Visual Studio')
else:
try:
(rv['VSINSTALLDIR'], t) = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
K + r'\Setup\VS\ProductDir')
except SCons.Util.RegError:
pass
# .NET framework install dir
try:
(rv['FRAMEWORKDIR'], t) = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\.NETFramework\InstallRoot')
except SCons.Util.RegError:
pass
if rv.has_key('FRAMEWORKDIR'):
# try and enumerate the installed versions of the .NET framework.
contents = os.listdir(rv['FRAMEWORKDIR'])
l = re.compile('v[0-9]+.*')
installed_framework_versions = filter(lambda e, l=l: l.match(e), contents)
def versrt(a,b):
# since version numbers aren't really floats...
aa = a[1:]
bb = b[1:]
aal = string.split(aa, '.')
bbl = string.split(bb, '.')
# sequence comparison in python is lexicographical
# which is exactly what we want.
# Note we sort backwards so the highest version is first.
return cmp(bbl,aal)
installed_framework_versions.sort(versrt)
rv['FRAMEWORKVERSIONS'] = installed_framework_versions
# TODO: allow a specific framework version to be set
# Choose a default framework version based on the Visual
# Studio version.
DefaultFrameworkVersionMap = {
'7.0' : 'v1.0',
'7.1' : 'v1.1',
'8.0' : 'v2.0',
# TODO: Does .NET 3.0 need to be worked into here somewhere?
}
try:
default_framework_version = DefaultFrameworkVersionMap[version[:3]]
except (KeyError, TypeError):
pass
else:
# Look for the first installed directory in FRAMEWORKDIR that
# begins with the framework version string that's appropriate
# for the Visual Studio version we're using.
for v in installed_framework_versions:
if v[:4] == default_framework_version:
rv['FRAMEWORKVERSION'] = v
break
# If the framework version couldn't be worked out by the previous
# code then fall back to using the latest version of the .NET
# framework
if not rv.has_key('FRAMEWORKVERSION'):
rv['FRAMEWORKVERSION'] = installed_framework_versions[0]
# .NET framework SDK install dir
if rv.has_key('FRAMEWORKVERSION'):
# The .NET SDK version used must match the .NET version used,
# so we deliberately don't fall back to other .NET framework SDK
# versions that might be present.
ver = rv['FRAMEWORKVERSION'][:4]
key = r'Software\Microsoft\.NETFramework\sdkInstallRoot' + ver
try:
(rv['FRAMEWORKSDKDIR'], t) = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
key)
except SCons.Util.RegError:
pass
# MS Platform SDK dir
try:
(rv['PLATFORMSDKDIR'], t) = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\MicrosoftSDK\Directories\Install Dir')
except SCons.Util.RegError:
pass
if rv.has_key('PLATFORMSDKDIR'):
# if we have a platform SDK, try and get some info on it.
vers = {}
try:
loc = r'Software\Microsoft\MicrosoftSDK\InstalledSDKs'
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,loc)
i = 0
while 1:
try:
key = SCons.Util.RegEnumKey(k,i)
sdk = SCons.Util.RegOpenKeyEx(k,key)
j = 0
name = ''
date = ''
version = ''
while 1:
try:
(vk,vv,t) = SCons.Util.RegEnumValue(sdk,j)
# TODO(1.5):
#if vk.lower() == 'keyword':
# name = vv
#if vk.lower() == 'propagation_date':
# date = vv
#if vk.lower() == 'version':
# version = vv
if string.lower(vk) == 'keyword':
name = vv
if string.lower(vk) == 'propagation_date':
date = vv
if string.lower(vk) == 'version':
version = vv
j = j + 1
except SCons.Util.RegError:
break
if name:
vers[name] = (date, version)
i = i + 1
except SCons.Util.RegError:
break
rv['PLATFORMSDK_MODULES'] = vers
except SCons.Util.RegError:
pass
return rv
def GetMSVSProjectSuffix(target, source, env, for_signature):
return env['MSVS']['PROJECTSUFFIX']
def GetMSVSSolutionSuffix(target, source, env, for_signature):
return env['MSVS']['SOLUTIONSUFFIX']
def GenerateProject(target, source, env):
# generate the dsp file, according to the version of MSVS.
builddspfile = target[0]
dspfile = builddspfile.srcnode()
# this detects whether or not we're using a VariantDir
if not dspfile is builddspfile:
try:
bdsp = open(str(builddspfile), "w+")
except IOError, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsp.write("This is just a placeholder file.\nThe real project file is here:\n%s\n" % dspfile.get_abspath())
GenerateDSP(dspfile, source, env)
if env.get('auto_build_solution', 1):
builddswfile = target[1]
dswfile = builddswfile.srcnode()
if not dswfile is builddswfile:
try:
bdsw = open(str(builddswfile), "w+")
except IOError, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsw.write("This is just a placeholder file.\nThe real workspace file is here:\n%s\n" % dswfile.get_abspath())
GenerateDSW(dswfile, source, env)
def GenerateSolution(target, source, env):
GenerateDSW(target[0], source, env)
def projectEmitter(target, source, env):
"""Sets up the DSP dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSPROJECTSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSPROJECTSUFFIX')
target[0] = base + suff
if not source:
source = 'prj_inputs:'
source = source + env.subst('$MSVSSCONSCOM', 1)
source = source + env.subst('$MSVSENCODING', 1)
if env.has_key('buildtarget') and env['buildtarget'] != None:
if SCons.Util.is_String(env['buildtarget']):
source = source + ' "%s"' % env['buildtarget']
elif SCons.Util.is_List(env['buildtarget']):
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
source = source + ' "%s"' % bt
else:
try: source = source + ' "%s"' % bt.get_abspath()
except AttributeError: raise SCons.Errors.InternalError, \
"buildtarget can be a string, a node, a list of strings or nodes, or None"
else:
try: source = source + ' "%s"' % env['buildtarget'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError, \
"buildtarget can be a string, a node, a list of strings or nodes, or None"
if env.has_key('outdir') and env['outdir'] != None:
if SCons.Util.is_String(env['outdir']):
source = source + ' "%s"' % env['outdir']
elif SCons.Util.is_List(env['outdir']):
for s in env['outdir']:
if SCons.Util.is_String(s):
source = source + ' "%s"' % s
else:
try: source = source + ' "%s"' % s.get_abspath()
except AttributeError: raise SCons.Errors.InternalError, \
"outdir can be a string, a node, a list of strings or nodes, or None"
else:
try: source = source + ' "%s"' % env['outdir'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError, \
"outdir can be a string, a node, a list of strings or nodes, or None"
if env.has_key('name'):
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError, "name must be a string"
if env.has_key('variant'):
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError, "name must be a string or a list of strings"
else:
raise SCons.Errors.InternalError, "variant must be a string or a list of strings"
else:
raise SCons.Errors.InternalError, "variant must be specified"
for s in _DSPGenerator.srcargs:
if env.has_key(s):
if SCons.Util.is_String(env[s]):
source = source + ' "%s' % env[s]
elif SCons.Util.is_List(env[s]):
for t in env[s]:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
else:
raise SCons.Errors.InternalError, s + " must be a string or a list of strings"
else:
raise SCons.Errors.InternalError, s + " must be a string or a list of strings"
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
targetlist = [target[0]]
sourcelist = source
if env.get('auto_build_solution', 1):
env['projects'] = targetlist
t, s = solutionEmitter(target, target, env)
targetlist = targetlist + t
return (targetlist, sourcelist)
def solutionEmitter(target, source, env):
"""Sets up the DSW dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSSOLUTIONSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSSOLUTIONSUFFIX')
target[0] = base + suff
if not source:
source = 'sln_inputs:'
if env.has_key('name'):
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError, "name must be a string"
if env.has_key('variant'):
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError, "name must be a string or a list of strings"
else:
raise SCons.Errors.InternalError, "variant must be a string or a list of strings"
else:
raise SCons.Errors.InternalError, "variant must be specified"
if env.has_key('slnguid'):
if SCons.Util.is_String(env['slnguid']):
source = source + ' "%s"' % env['slnguid']
else:
raise SCons.Errors.InternalError, "slnguid must be a string"
if env.has_key('projects'):
if SCons.Util.is_String(env['projects']):
source = source + ' "%s"' % env['projects']
elif SCons.Util.is_List(env['projects']):
for t in env['projects']:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
return ([target[0]], source)
projectAction = SCons.Action.Action(GenerateProject, None)
solutionAction = SCons.Action.Action(GenerateSolution, None)
projectBuilder = SCons.Builder.Builder(action = '$MSVSPROJECTCOM',
suffix = '$MSVSPROJECTSUFFIX',
emitter = projectEmitter)
solutionBuilder = SCons.Builder.Builder(action = '$MSVSSOLUTIONCOM',
suffix = '$MSVSSOLUTIONSUFFIX',
emitter = solutionEmitter)
default_MSVS_SConscript = None
def generate(env):
"""Add Builders and construction variables for Microsoft Visual
Studio project files to an Environment."""
try:
env['BUILDERS']['MSVSProject']
except KeyError:
env['BUILDERS']['MSVSProject'] = projectBuilder
try:
env['BUILDERS']['MSVSSolution']
except KeyError:
env['BUILDERS']['MSVSSolution'] = solutionBuilder
env['MSVSPROJECTCOM'] = projectAction
env['MSVSSOLUTIONCOM'] = solutionAction
if SCons.Script.call_stack:
# XXX Need to find a way to abstract this; the build engine
# shouldn't depend on anything in SCons.Script.
env['MSVSSCONSCRIPT'] = SCons.Script.call_stack[0].sconscript
else:
global default_MSVS_SConscript
if default_MSVS_SConscript is None:
default_MSVS_SConscript = env.File('SConstruct')
env['MSVSSCONSCRIPT'] = default_MSVS_SConscript
env['MSVSSCONS'] = '"%s" -c "%s"' % (python_executable, getExecScriptMain(env))
env['MSVSSCONSFLAGS'] = '-C "${MSVSSCONSCRIPT.dir.abspath}" -f ${MSVSSCONSCRIPT.name}'
env['MSVSSCONSCOM'] = '$MSVSSCONS $MSVSSCONSFLAGS'
env['MSVSBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSREBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSCLEANCOM'] = '$MSVSSCONSCOM -c "$MSVSBUILDTARGET"'
env['MSVSENCODING'] = 'Windows-1252'
try:
version = get_default_visualstudio_version(env)
# keep a record of some of the MSVS info so the user can use it.
dirs = get_msvs_install_dirs(version)
env['MSVS'].update(dirs)
except (SCons.Util.RegError, SCons.Errors.InternalError):
# we don't care if we can't do this -- if we can't, it's
# because we don't have access to the registry, or because the
# tools aren't installed. In either case, the user will have to
# find them on their own.
pass
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if (version_num < 7.0):
env['MSVS']['PROJECTSUFFIX'] = '.dsp'
env['MSVS']['SOLUTIONSUFFIX'] = '.dsw'
else:
env['MSVS']['PROJECTSUFFIX'] = '.vcproj'
env['MSVS']['SOLUTIONSUFFIX'] = '.sln'
env['GET_MSVSPROJECTSUFFIX'] = GetMSVSProjectSuffix
env['GET_MSVSSOLUTIONSUFFIX'] = GetMSVSSolutionSuffix
env['MSVSPROJECTSUFFIX'] = '${GET_MSVSPROJECTSUFFIX}'
env['MSVSSOLUTIONSUFFIX'] = '${GET_MSVSSOLUTIONSUFFIX}'
env['SCONS_HOME'] = os.environ.get('SCONS_HOME')
def exists(env):
if not env['PLATFORM'] in ('win32', 'cygwin'):
return 0
try:
v = SCons.Tool.msvs.get_visualstudio_versions()
except (SCons.Util.RegError, SCons.Errors.InternalError):
pass
if not v:
version_num = 6.0
if env.has_key('MSVS_VERSION'):
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 7.0:
# The executable is 'devenv' in Visual Studio Pro,
# Team System and others. Express Editions have different
# executable names. Right now we're only going to worry
# about Visual C++ 2005 Express Edition.
return env.Detect('devenv') or env.Detect('vcexpress')
else:
return env.Detect('msdev')
else:
# there's at least one version of MSVS installed.
return 1
| bsd-3-clause |
coderbone/SickRage | lib/hachoir_metadata/register.py | 67 | 6975 | from hachoir_core.i18n import _
from hachoir_core.tools import (
humanDuration, humanBitRate,
humanFrequency, humanBitSize, humanFilesize,
humanDatetime)
from hachoir_core.language import Language
from hachoir_metadata.filter import Filter, NumberFilter, DATETIME_FILTER
from datetime import date, datetime, timedelta
from hachoir_metadata.formatter import (
humanAudioChannel, humanFrameRate, humanComprRate, humanAltitude,
humanPixelSize, humanDPI)
from hachoir_metadata.setter import (
setDatetime, setTrackNumber, setTrackTotal, setLanguage)
from hachoir_metadata.metadata_item import Data
MIN_SAMPLE_RATE = 1000 # 1 kHz
MAX_SAMPLE_RATE = 192000 # 192 kHz
MAX_NB_CHANNEL = 8 # 8 channels
MAX_WIDTH = 20000 # 20 000 pixels
MAX_BIT_RATE = 500 * 1024 * 1024 # 500 Mbit/s
MAX_HEIGHT = MAX_WIDTH
MAX_DPI_WIDTH = 10000
MAX_DPI_HEIGHT = MAX_DPI_WIDTH
MAX_NB_COLOR = 2 ** 24 # 16 million of color
MAX_BITS_PER_PIXEL = 256 # 256 bits/pixel
MAX_FRAME_RATE = 150 # 150 frame/sec
MAX_NB_PAGE = 20000
MAX_COMPR_RATE = 1000.0
MIN_COMPR_RATE = 0.001
MAX_TRACK = 999
DURATION_FILTER = Filter(timedelta,
timedelta(milliseconds=1),
timedelta(days=365))
def registerAllItems(meta):
meta.register(Data("title", 100, _("Title"), type=unicode))
meta.register(Data("artist", 101, _("Artist"), type=unicode))
meta.register(Data("author", 102, _("Author"), type=unicode))
meta.register(Data("music_composer", 103, _("Music composer"), type=unicode))
meta.register(Data("album", 200, _("Album"), type=unicode))
meta.register(Data("duration", 201, _("Duration"), # integer in milliseconde
type=timedelta, text_handler=humanDuration, filter=DURATION_FILTER))
meta.register(Data("nb_page", 202, _("Nb page"), filter=NumberFilter(1, MAX_NB_PAGE)))
meta.register(Data("music_genre", 203, _("Music genre"), type=unicode))
meta.register(Data("language", 204, _("Language"), conversion=setLanguage, type=Language))
meta.register(Data("track_number", 205, _("Track number"), conversion=setTrackNumber,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("track_total", 206, _("Track total"), conversion=setTrackTotal,
filter=NumberFilter(1, MAX_TRACK), type=(int, long)))
meta.register(Data("organization", 210, _("Organization"), type=unicode))
meta.register(Data("version", 220, _("Version")))
meta.register(Data("width", 301, _("Image width"), filter=NumberFilter(1, MAX_WIDTH), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("height", 302, _("Image height"), filter=NumberFilter(1, MAX_HEIGHT), type=(int, long), text_handler=humanPixelSize))
meta.register(Data("nb_channel", 303, _("Channel"), text_handler=humanAudioChannel, filter=NumberFilter(1, MAX_NB_CHANNEL), type=(int, long)))
meta.register(Data("sample_rate", 304, _("Sample rate"), text_handler=humanFrequency, filter=NumberFilter(MIN_SAMPLE_RATE, MAX_SAMPLE_RATE), type=(int, long, float)))
meta.register(Data("bits_per_sample", 305, _("Bits/sample"), text_handler=humanBitSize, filter=NumberFilter(1, 64), type=(int, long)))
meta.register(Data("image_orientation", 306, _("Image orientation")))
meta.register(Data("nb_colors", 307, _("Number of colors"), filter=NumberFilter(1, MAX_NB_COLOR), type=(int, long)))
meta.register(Data("bits_per_pixel", 308, _("Bits/pixel"), filter=NumberFilter(1, MAX_BITS_PER_PIXEL), type=(int, long)))
meta.register(Data("filename", 309, _("File name"), type=unicode))
meta.register(Data("file_size", 310, _("File size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("pixel_format", 311, _("Pixel format")))
meta.register(Data("compr_size", 312, _("Compressed file size"), text_handler=humanFilesize, type=(int, long)))
meta.register(Data("compr_rate", 313, _("Compression rate"), text_handler=humanComprRate, filter=NumberFilter(MIN_COMPR_RATE, MAX_COMPR_RATE), type=(int, long, float)))
meta.register(Data("width_dpi", 320, _("Image DPI width"), filter=NumberFilter(1, MAX_DPI_WIDTH), type=(int, long), text_handler=humanDPI))
meta.register(Data("height_dpi", 321, _("Image DPI height"), filter=NumberFilter(1, MAX_DPI_HEIGHT), type=(int, long), text_handler=humanDPI))
meta.register(Data("file_attr", 400, _("File attributes")))
meta.register(Data("file_type", 401, _("File type")))
meta.register(Data("subtitle_author", 402, _("Subtitle author"), type=unicode))
meta.register(Data("creation_date", 500, _("Creation date"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("last_modification", 501, _("Last modification"), text_handler=humanDatetime,
filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime))
meta.register(Data("latitude", 510, _("Latitude"), type=float))
meta.register(Data("longitude", 511, _("Longitude"), type=float))
meta.register(Data("altitude", 511, _("Altitude"), type=float, text_handler=humanAltitude))
meta.register(Data("location", 530, _("Location"), type=unicode))
meta.register(Data("city", 531, _("City"), type=unicode))
meta.register(Data("country", 532, _("Country"), type=unicode))
meta.register(Data("charset", 540, _("Charset"), type=unicode))
meta.register(Data("font_weight", 550, _("Font weight")))
meta.register(Data("camera_aperture", 520, _("Camera aperture")))
meta.register(Data("camera_focal", 521, _("Camera focal")))
meta.register(Data("camera_exposure", 522, _("Camera exposure")))
meta.register(Data("camera_brightness", 530, _("Camera brightness")))
meta.register(Data("camera_model", 531, _("Camera model"), type=unicode))
meta.register(Data("camera_manufacturer", 532, _("Camera manufacturer"), type=unicode))
meta.register(Data("compression", 600, _("Compression")))
meta.register(Data("copyright", 601, _("Copyright"), type=unicode))
meta.register(Data("url", 602, _("URL"), type=unicode))
meta.register(Data("frame_rate", 603, _("Frame rate"), text_handler=humanFrameRate,
filter=NumberFilter(1, MAX_FRAME_RATE), type=(int, long, float)))
meta.register(Data("bit_rate", 604, _("Bit rate"), text_handler=humanBitRate,
filter=NumberFilter(1, MAX_BIT_RATE), type=(int, long, float)))
meta.register(Data("aspect_ratio", 604, _("Aspect ratio"), type=(int, long, float)))
meta.register(Data("os", 900, _("OS"), type=unicode))
meta.register(Data("producer", 901, _("Producer"), type=unicode))
meta.register(Data("comment", 902, _("Comment"), type=unicode))
meta.register(Data("format_version", 950, _("Format version"), type=unicode))
meta.register(Data("mime_type", 951, _("MIME type"), type=unicode))
meta.register(Data("endian", 952, _("Endianness"), type=unicode))
| gpl-3.0 |
aerokappa/SantaClaus | handCodedOptimum_v4.py | 1 | 2216 | import numpy as np
import pandas as pd
from processInput import processInput
def handCodedOptimum_v4 ( ):
fileName = 'gifts.csv'
giftList, giftListSummary = processInput( fileName )
packedBags = []
for i in np.arange(1000):
print i
currentBag = []
if (i< 333):
itemCount = np.array([0 ,3 ,0 ,0 ,0 ,0 ,0 ,3 ,0])
elif ((i>=333) & (i<458)):
itemCount = np.array([8, 0, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=458) & (i<583)):
itemCount = np.array([0, 0, 0, 0, 0, 0, 8, 0, 0])
elif ((i>=583) & (i<916)):
itemCount = np.array([0, 0, 0, 3, 0, 2, 0, 0, 0])
elif ((i>=916) & (i<924)):
itemCount = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 25])
elif ((i>=924) & (i<928)):
itemCount = np.array([ 0, 23, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=928) & (i<938)):
itemCount = np.array([ 0, 0, 0, 0, 0, 19, 0, 0, 0])
elif ((i>=938) & (i<939)):
itemCount = np.array([ 0, 0, 0, 0, 0, 11, 0, 1, 0])
elif ((i>=939) & (i<940)):
itemCount = np.array([0, 9, 0, 1, 0, 0, 0, 0, 0])
else:
itemCount = np.array([0, 0, 1, 0, 0, 5, 0, 0, 0])
for i in np.arange(len(itemCount)):
if (itemCount[i] <= giftListSummary['nGiftsNotPacked'][i]):
for j in np.arange(itemCount[i]):
giftName = giftListSummary['GiftType'][i]
currGiftID = giftListSummary['nGiftsPacked'][i]
currentBag.append(giftName+'_'+str(currGiftID))
giftListSummary['nGiftsPacked'][i] += 1
giftListSummary['nGiftsNotPacked'][i] -= 1
packedBags.append(currentBag)
# Write to File 'submission.csv'
subFile = open('submission_5.csv','w')
subFile.write('Gifts\n')
for currentBag in packedBags:
subFile.write(currentBag[0])
for currentItem in currentBag[1:]:
subFile.write(' ')
subFile.write(currentItem)
subFile.write('\n')
subFile.close()
return packedBags | mit |
Jbonnett/Mutagen-flo | mutagen/ogg.py | 1 | 17770 | # Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: ogg.py 3975 2007-01-13 21:51:17Z piman $
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from cStringIO import StringIO
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes, WrappedFileobj
class error(IOError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version -- stream structure version (currently always 0)
position -- absolute stream position (default -1)
serial -- logical stream serial number (default 0)
sequence -- page sequence number within logical stream (default 0)
offset -- offset this page was read from (default None)
complete -- if the last packet on this page is complete (default True)
packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0L
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags, self.position,
self.serial, self.sequence, crc, segments) = struct.unpack(
"<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != "OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, "OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in map(ord, lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = map(fileobj.read, lacings)
if map(len, self.packets) != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
__hash__ = object.__hash__
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append("\xff" * quot + chr(rem))
lacing_data = "".join(lacing_data)
if not self.complete and lacing_data.endswith("\x00"):
lacing_data = lacing_data[:-1]
data.append(chr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = "".join(data)
# Python's CRC is swapped relative to Ogg's needs.
crc = ~zlib.crc32(data.translate(cdata.bitswap), -1)
# Although we're using to_int_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_int_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
def __size(self):
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
size = property(__size, doc="Total frame size.")
def __set_flag(self, bit, val):
mask = 1 << bit
if val: self.__type_flags |= mask
else: self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
def renumber(klass, fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try: page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
renumber = classmethod(renumber)
def to_packets(klass, pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append("")
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else: sequence += 1
if page.continued: packets[-1] += page.packets[0]
else: packets.append(page.packets[0])
packets.extend(page.packets[1:])
return packets
to_packets = classmethod(to_packets)
def from_packets(klass, packets, sequence=0,
default_size=4096, wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append("")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1L
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = ""
if page.packets:
pages.append(page)
return pages
from_packets = classmethod(from_packets)
def replace(klass, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1L
new_data = "".join(map(klass.write, new_pages))
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
klass.renumber(fileobj, serial, sequence)
replace = classmethod(replace)
def find_last(klass, fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try: fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try: index = data.rindex("OggS")
except ValueError:
raise error("unable to find final Ogg header")
stringobj = StringIO(data[index:])
best_page = None
try:
page = OggPage(stringobj)
except error:
pass
else:
if page.serial == serial:
if page.last: return page
else: best_page = page
else: best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
find_last = classmethod(find_last)
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = WrappedFileobj(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
if self.info.length:
# The streaminfo gave us real length information,
# don't waste time scanning the Ogg.
return
last_page = OggPage.find_last(fileobj, self.info.serial)
samples = last_page.position
try:
denom = self.info.sample_rate
except AttributeError:
denom = self.info.fps
self.info.length = samples / float(denom)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
| gpl-2.0 |
albertomurillo/ansible | lib/ansible/modules/cloud/cloudstack/cs_domain.py | 30 | 6616 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_domain
short_description: Manages domains on Apache CloudStack based clouds.
description:
- Create, update and remove domains.
version_added: '2.0'
author: René Moser (@resmo)
options:
path:
description:
- Path of the domain.
- Prefix C(ROOT/) or C(/ROOT/) in path is optional.
type: str
required: true
network_domain:
description:
- Network domain for networks in the domain.
type: str
clean_up:
description:
- Clean up all domain resources like child domains and accounts.
- Considered on I(state=absent).
type: bool
default: no
state:
description:
- State of the domain.
type: str
choices: [ present, absent ]
default: present
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Create a domain
cs_domain:
path: ROOT/customers
network_domain: customers.example.com
delegate_to: localhost
- name: Create another subdomain
cs_domain:
path: ROOT/customers/xy
network_domain: xy.customers.example.com
delegate_to: localhost
- name: Remove a domain
cs_domain:
path: ROOT/customers/xy
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the domain.
returned: success
type: str
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of the domain.
returned: success
type: str
sample: customers
path:
description: Domain path.
returned: success
type: str
sample: /ROOT/customers
parent_domain:
description: Parent domain of the domain.
returned: success
type: str
sample: ROOT
network_domain:
description: Network domain of the domain.
returned: success
type: str
sample: example.local
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackDomain(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackDomain, self).__init__(module)
self.returns = {
'path': 'path',
'networkdomain': 'network_domain',
'parentdomainname': 'parent_domain',
}
self.domain = None
def _get_domain_internal(self, path=None):
if not path:
path = self.module.params.get('path')
if path.endswith('/'):
self.module.fail_json(msg="Path '%s' must not end with /" % path)
path = path.lower()
if path.startswith('/') and not path.startswith('/root/'):
path = "root" + path
elif not path.startswith('root/'):
path = "root/" + path
args = {
'listall': True,
'fetch_list': True,
}
domains = self.query_api('listDomains', **args)
if domains:
for d in domains:
if path == d['path'].lower():
return d
return None
def get_name(self):
# last part of the path is the name
name = self.module.params.get('path').split('/')[-1:]
return name
def get_domain(self, key=None):
if not self.domain:
self.domain = self._get_domain_internal()
return self._get_by_key(key, self.domain)
def get_parent_domain(self, key=None):
path = self.module.params.get('path')
# cut off last /*
path = '/'.join(path.split('/')[:-1])
if not path:
return None
parent_domain = self._get_domain_internal(path=path)
if not parent_domain:
self.module.fail_json(msg="Parent domain path %s does not exist" % path)
return self._get_by_key(key, parent_domain)
def present_domain(self):
domain = self.get_domain()
if not domain:
domain = self.create_domain(domain)
else:
domain = self.update_domain(domain)
return domain
def create_domain(self, domain):
self.result['changed'] = True
args = {
'name': self.get_name(),
'parentdomainid': self.get_parent_domain(key='id'),
'networkdomain': self.module.params.get('network_domain')
}
if not self.module.check_mode:
res = self.query_api('createDomain', **args)
domain = res['domain']
return domain
def update_domain(self, domain):
args = {
'id': domain['id'],
'networkdomain': self.module.params.get('network_domain')
}
if self.has_changed(args, domain):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateDomain', **args)
domain = res['domain']
return domain
def absent_domain(self):
domain = self.get_domain()
if domain:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'id': domain['id'],
'cleanup': self.module.params.get('clean_up')
}
res = self.query_api('deleteDomain', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'domain')
return domain
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
path=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
network_domain=dict(),
clean_up=dict(type='bool', default=False),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_dom = AnsibleCloudStackDomain(module)
state = module.params.get('state')
if state in ['absent']:
domain = acs_dom.absent_domain()
else:
domain = acs_dom.present_domain()
result = acs_dom.get_result(domain)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mSenyor/sl4a | python/src/Demo/threads/squasher.py | 48 | 3038 | # Coroutine example: general coroutine transfers
#
# The program is a variation of a Simula 67 program due to Dahl & Hoare,
# (Dahl/Dijkstra/Hoare, Structured Programming; Academic Press, 1972)
# who in turn credit the original example to Conway.
#
# We have a number of input lines, terminated by a 0 byte. The problem
# is to squash them together into output lines containing 72 characters
# each. A semicolon must be added between input lines. Runs of blanks
# and tabs in input lines must be squashed into single blanks.
# Occurrences of "**" in input lines must be replaced by "^".
#
# Here's a test case:
test = """\
d = sqrt(b**2 - 4*a*c)
twoa = 2*a
L = -b/twoa
R = d/twoa
A1 = L + R
A2 = L - R\0
"""
# The program should print:
# d = sqrt(b^2 - 4*a*c);twoa = 2*a; L = -b/twoa; R = d/twoa; A1 = L + R;
#A2 = L - R
#done
# getline: delivers the next input line to its invoker
# disassembler: grabs input lines from getline, and delivers them one
# character at a time to squasher, also inserting a semicolon into
# the stream between lines
# squasher: grabs characters from disassembler and passes them on to
# assembler, first replacing "**" with "^" and squashing runs of
# whitespace
# assembler: grabs characters from squasher and packs them into lines
# with 72 character each, delivering each such line to putline;
# when it sees a null byte, passes the last line to putline and
# then kills all the coroutines
# putline: grabs lines from assembler, and just prints them
from Coroutine import *
def getline(text):
for line in string.splitfields(text, '\n'):
co.tran(codisassembler, line)
def disassembler():
while 1:
card = co.tran(cogetline)
for i in range(len(card)):
co.tran(cosquasher, card[i])
co.tran(cosquasher, ';')
def squasher():
while 1:
ch = co.tran(codisassembler)
if ch == '*':
ch2 = co.tran(codisassembler)
if ch2 == '*':
ch = '^'
else:
co.tran(coassembler, ch)
ch = ch2
if ch in ' \t':
while 1:
ch2 = co.tran(codisassembler)
if ch2 not in ' \t':
break
co.tran(coassembler, ' ')
ch = ch2
co.tran(coassembler, ch)
def assembler():
line = ''
while 1:
ch = co.tran(cosquasher)
if ch == '\0':
break
if len(line) == 72:
co.tran(coputline, line)
line = ''
line = line + ch
line = line + ' ' * (72 - len(line))
co.tran(coputline, line)
co.kill()
def putline():
while 1:
line = co.tran(coassembler)
print line
import string
co = Coroutine()
cogetline = co.create(getline, test)
coputline = co.create(putline)
coassembler = co.create(assembler)
codisassembler = co.create(disassembler)
cosquasher = co.create(squasher)
co.tran(coputline)
print 'done'
# end of example
| apache-2.0 |
Kast0rTr0y/ansible | test/units/plugins/connection/test_netconf.py | 60 | 3533 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import re
import json
from io import StringIO
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock, PropertyMock
from ansible.errors import AnsibleConnectionFailure
from ansible.playbook.play_context import PlayContext
PY3 = sys.version_info[0] == 3
builtin_import = __import__
mock_ncclient = MagicMock(name='ncclient')
def import_mock(name, *args):
if name.startswith('ncclient'):
return mock_ncclient
return builtin_import(name, *args)
if PY3:
with patch('builtins.__import__', side_effect=import_mock):
from ansible.plugins.connection import netconf
else:
with patch('__builtin__.__import__', side_effect=import_mock):
from ansible.plugins.connection import netconf
class TestNetconfConnectionClass(unittest.TestCase):
def test_netconf_init(self):
pc = PlayContext()
new_stdin = StringIO()
conn = netconf.Connection(pc, new_stdin)
self.assertEqual('default', conn._network_os)
self.assertIsNone(conn._manager)
self.assertFalse(conn._connected)
def test_netconf__connect(self):
pc = PlayContext()
new_stdin = StringIO()
conn = netconf.Connection(pc, new_stdin)
mock_manager = MagicMock(name='self._manager.connect')
type(mock_manager).session_id = PropertyMock(return_value='123456789')
netconf.manager.connect.return_value = mock_manager
rc, out, err = conn._connect()
self.assertEqual(0, rc)
self.assertEqual('123456789', out)
self.assertEqual('', err)
self.assertTrue(conn._connected)
def test_netconf_exec_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = netconf.Connection(pc, new_stdin)
conn._connected = True
mock_manager = MagicMock(name='self._manager')
mock_reply = MagicMock(name='reply')
type(mock_reply).data_xml = PropertyMock(return_value='<test/>')
mock_manager.rpc.return_value = mock_reply
conn._manager = mock_manager
rc, out, err = conn.exec_command('<test/>')
netconf.to_ele.assert_called_with('<test/>')
self.assertEqual(0, rc)
self.assertEqual('<test/>', out)
self.assertEqual('', err)
def test_netconf_exec_command_invalid_request(self):
pc = PlayContext()
new_stdin = StringIO()
conn = netconf.Connection(pc, new_stdin)
conn._connected = True
netconf.to_ele.return_value = None
rc, out, err = conn.exec_command('test string')
self.assertEqual(1, rc)
self.assertEqual('', out)
self.assertEqual('unable to parse request', err)
| gpl-3.0 |
krischer/python-future | src/libpasteurize/fixes/fix_metaclass.py | 61 | 3268 | u"""
Fixer for (metaclass=X) -> __metaclass__ = X
Some semantics (see PEP 3115) may be altered in the translation."""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, syms, Node, Leaf, Newline, find_root
from lib2to3.pygram import token
from libfuturize.fixer_util import indentation, suitify
# from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify
def has_metaclass(parent):
results = None
for node in parent.children:
kids = node.children
if node.type == syms.argument:
if kids[0] == Leaf(token.NAME, u"metaclass") and \
kids[1] == Leaf(token.EQUAL, u"=") and \
kids[2]:
#Hack to avoid "class X(=):" with this case.
results = [node] + kids
break
elif node.type == syms.arglist:
# Argument list... loop through it looking for:
# Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
for child in node.children:
if results: break
if child.type == token.COMMA:
#Store the last comma, which precedes the metaclass
comma = child
elif type(child) == Node:
meta = equal = name = None
for arg in child.children:
if arg == Leaf(token.NAME, u"metaclass"):
#We have the (metaclass) part
meta = arg
elif meta and arg == Leaf(token.EQUAL, u"="):
#We have the (metaclass=) part
equal = arg
elif meta and equal:
#Here we go, we have (metaclass=X)
name = arg
results = (comma, meta, equal, name)
break
return results
class FixMetaclass(fixer_base.BaseFix):
PATTERN = u"""
classdef<any*>
"""
def transform(self, node, results):
meta_results = has_metaclass(node)
if not meta_results: return
for meta in meta_results:
meta.remove()
target = Leaf(token.NAME, u"__metaclass__")
equal = Leaf(token.EQUAL, u"=", prefix=u" ")
# meta is the last item in what was returned by has_metaclass(): name
name = meta
name.prefix = u" "
stmt_node = Node(syms.atom, [target, equal, name])
suitify(node)
for item in node.children:
if item.type == syms.suite:
for stmt in item.children:
if stmt.type == token.INDENT:
# Insert, in reverse order, the statement, a newline,
# and an indent right after the first indented line
loc = item.children.index(stmt) + 1
# Keep consistent indentation form
ident = Leaf(token.INDENT, stmt.value)
item.insert_child(loc, ident)
item.insert_child(loc, Newline())
item.insert_child(loc, stmt_node)
break
| mit |
franramirez688/common | edition/parsing/python/python_ast.py | 5 | 1576 |
import ast
import _ast
class PythonAst(ast.NodeVisitor):
PYTHON_C_MODULE = 'link_clib'
def __init__(self):
self.objects = []
self.c_includes = []
self.hasmain = False
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_Import(self, node):
pos = (node.lineno, node.col_offset)
for i in node.names:
if i.asname is None:
self.objects.append(('import', pos, i.name, 'import', ('import ' + i.name)))
else:
self.objects.append(('import', pos, i.asname, 'import',
('import ' + i.name + " as " + i.asname)))
def visit_ImportFrom(self, node):
pos = (node.lineno, node.col_offset)
for i in node.names:
if i.asname is None:
self.objects.append(('from', pos, i.name, 'import',
('from ' + node.module + ' import ' + i.name)))
else:
self.objects.append(('from', pos, i.asname, 'import',
('from ' + node.module + ' import ' + i.name
+ ' as ' + i.asname)))
def visit_Call(self, node):
if self.visit(node.func):
self.c_includes.append(node.args[0].s)
def visit_Assign(self, node):
self.visit(node.value)
def visit_Name(self, node):
return node.id == self.PYTHON_C_MODULE
def visit_Attribute(self, node):
return node.attr == self.PYTHON_C_MODULE
| mit |
kawamon/hue | desktop/core/ext-py/dnspython-1.15.0/tests/test_rdtypeanyeui.py | 4 | 9292 | # Copyright (C) 2015 Red Hat, Inc.
# Author: Petr Spacek <pspacek@redhat.com>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
try:
import unittest2 as unittest
except ImportError:
import unittest
from io import BytesIO
import dns.rrset
import dns.rdtypes.ANY.EUI48
import dns.rdtypes.ANY.EUI64
import dns.exception
class RdtypeAnyEUI48TestCase(unittest.TestCase):
def testInstOk(self):
'''Valid binary input.'''
eui = b'\x01\x23\x45\x67\x89\xab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
self.assertEqual(inst.eui, eui)
def testInstLength(self):
'''Incorrect input length.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd'
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
def testFromTextOk(self):
'''Valid text input.'''
r1 = dns.rrset.from_text('foo', 300, 'IN', 'EUI48',
'01-23-45-67-89-ab')
eui = b'\x01\x23\x45\x67\x89\xab'
self.assertEqual(r1[0].eui, eui)
def testFromTextLength(self):
'''Invalid input length.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48',
'00-01-23-45-67-89-ab')
def testFromTextDelim(self):
'''Invalid delimiter.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '01_23-45-67-89-ab')
def testFromTextExtraDash(self):
'''Extra dash instead of hex digit.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '0--23-45-67-89-ab')
def testFromTextMultipleTokens(self):
'''Invalid input divided to multiple tokens.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '01 23-45-67-89-ab')
def testFromTextInvalidHex(self):
'''Invalid hexadecimal input.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', 'g0-23-45-67-89-ab')
def testToTextOk(self):
'''Valid text output.'''
eui = b'\x01\x23\x45\x67\x89\xab'
exp_text = '01-23-45-67-89-ab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
text = inst.to_text()
self.assertEqual(exp_text, text)
def testToWire(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
buff = BytesIO()
inst.to_wire(buff)
self.assertEqual(buff.getvalue(), eui)
def testFromWireOk(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
inst = dns.rdtypes.ANY.EUI48.EUI48.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI48,
wire,
pad_len,
len(eui))
self.assertEqual(inst.eui, eui)
def testFromWireLength(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI48.EUI48.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI48,
wire,
pad_len,
len(eui))
class RdtypeAnyEUI64TestCase(unittest.TestCase):
def testInstOk(self):
'''Valid binary input.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
self.assertEqual(inst.eui, eui)
def testInstLength(self):
'''Incorrect input length.'''
eui = b'\x01\x23\x45\x67\x89\xab'
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
def testFromTextOk(self):
'''Valid text input.'''
r1 = dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01-23-45-67-89-ab-cd-ef')
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
self.assertEqual(r1[0].eui, eui)
def testFromTextLength(self):
'''Invalid input length.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01-23-45-67-89-ab')
def testFromTextDelim(self):
'''Invalid delimiter.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01_23-45-67-89-ab-cd-ef')
def testFromTextExtraDash(self):
'''Extra dash instead of hex digit.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'0--23-45-67-89-ab-cd-ef')
def testFromTextMultipleTokens(self):
'''Invalid input divided to multiple tokens.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01 23-45-67-89-ab-cd-ef')
def testFromTextInvalidHex(self):
'''Invalid hexadecimal input.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'g0-23-45-67-89-ab-cd-ef')
def testToTextOk(self):
'''Valid text output.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
exp_text = '01-23-45-67-89-ab-cd-ef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
text = inst.to_text()
self.assertEqual(exp_text, text)
def testToWire(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
buff = BytesIO()
inst.to_wire(buff)
self.assertEqual(buff.getvalue(), eui)
def testFromWireOk(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
inst = dns.rdtypes.ANY.EUI64.EUI64.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI64,
wire,
pad_len,
len(eui))
self.assertEqual(inst.eui, eui)
def testFromWireLength(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI64.EUI64.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI64,
wire,
pad_len,
len(eui))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
charbeljc/OCB | addons/account_followup/tests/__init__.py | 261 | 1088 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_account_followup
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dhenrygithub/QGIS | python/plugins/processing/algs/lidar/lastools/lasoverage.py | 3 | 3197 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lasoverage.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasoverage(LAStoolsAlgorithm):
CHECK_STEP = "CHECK_STEP"
OPERATION = "OPERATION"
OPERATIONS = ["classify as overlap", "flag as withheld", "remove from output"]
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasoverage')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParametersHorizontalFeetGUI()
self.addParametersFilesAreFlightlinesGUI()
self.addParameter(ParameterNumber(lasoverage.CHECK_STEP,
self.tr("size of grid used for scan angle check"), 0, None, 1.0))
self.addParameter(ParameterSelection(lasoverage.OPERATION,
self.tr("mode of operation"), lasoverage.OPERATIONS, 0))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasoverage")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
self.addParametersHorizontalFeetCommands(commands)
self.addParametersFilesAreFlightlinesCommands(commands)
step = self.getParameterValue(lasoverage.CHECK_STEP)
if step != 1.0:
commands.append("-step")
commands.append(unicode(step))
operation = self.getParameterValue(lasoverage.OPERATION)
if operation == 1:
commands.append("-flag_as_withheld")
elif operation == 2:
commands.append("-remove_overage")
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
mokshaproject/moksha | moksha.common/moksha/common/lib/cache.py | 2 | 1568 | # This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
log = logging.getLogger(__name__)
from moksha.exc import CacheBackendException
class Cache(object):
""" A memcached-specific caching interface """
def __init__(self, url, timeout=None, prefix=None):
try:
import memcache
except ImportError:
log.warning('Cannot import the `memcache` module. Install the '
'python-memcached package to enable Mokshas memcached '
'integration.')
return
self.mc = memcache.Client([url])
self.timeout = timeout or 300
self.prefix = prefix
if not self.mc.set('x', 'x', 1):
raise CacheBackendException("Cannot connect to Memcached")
def get(self, key):
return self.mc.get(key)
def set(self, key, value, timeout=None):
if self.prefix:
key = self.prefix + key
self.mc.set(key, value, timeout or self.timeout)
| apache-2.0 |
tjanez/ansible | lib/ansible/modules/network/eos/eos_system.py | 8 | 11370 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'core',
'version': '1.0'
}
DOCUMENTATION = """
---
module: eos_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Arista EOS devices
description:
- This module provides declarative management of node system attributes
on Arista EOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- The C(hostname) argument will configure the device hostname
parameter on Arista EOS devices. The C(hostname) value is an
ASCII string value.
required: false
default: null
domain_name:
description:
- The C(description) argument will configure the IP domain name
on the remote device to the provided value. The C(domain_name)
argument should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name
required: false
default: null
domain_list:
description:
- The C(domain_list) provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
required: false
default: null
lookup_source:
description:
- The C(lookup_source) argument provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) can only exist in a single VRF. This
argument accepts either a list of interface names or a list of
hashes that configure the interface name and VRF name. See
examples.
required: false
default: null
name_servers:
description:
- The C(name_serves) argument accepts a list of DNS name servers by
way of either FQDN or IP address to use to perform name resolution
lookups. This argument accepts wither a list of DNS servers or
a list of hashes that configure the name server and VRF name. See
examples.
required: false
default: null
state:
description:
- The C(state) argument configures the state of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
eos_system:
hostname: eos01
domain_name: eng.ansible.com
- name: remove configuration
eos_system:
state: absent
- name: configure DNS lookup sources
eos_system:
lookup_source: Management1
- name: configure DNS lookup sources with VRF support
eos_system:
lookup_source:
- interface: Management1
vrf: mgmt
- interface: Ethernet1
vrf: myvrf
- name: configure name servers
eos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
- name: configure name servers with VRF support
eos_system:
name_servers:
- { server: 8.8.8.8, vrf: mgmt }
- { server: 8.8.4.4, vrf: mgmt }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname eos01
- ip domain-name eng.ansible.com
session_name:
description: The EOS config session name used to load the configuration
returned: when changed is True
type: str
sample: ansible_1479315771
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.eos import load_config, get_config
from ansible.module_utils.eos import eos_argument_spec
_CONFIGURED_VRFS = None
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
return vrf in _CONFIGURED_VRFS
config = get_config(module)
_CONFIGURED_VRFS = re.findall('vrf definition (\S+)', config)
_CONFIGURED_VRFS.append('default')
return vrf in _CONFIGURED_VRFS
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['domain_name']:
commands.append('no ip domain-name')
if have['hostname'] != 'localhost':
commands.append('no hostname')
if state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_name'):
commands.append('ip domain-name %s' % want['domain_name'])
if want['domain_list']:
# handle domain_list items to be removed
for item in set(have['domain_list']).difference(want['domain_list']):
commands.append('no ip domain-list %s' % item)
# handle domain_list items to be added
for item in set(want['domain_list']).difference(have['domain_list']):
commands.append('ip domain-list %s' % item)
if want['lookup_source']:
# handle lookup_source items to be removed
for item in have['lookup_source']:
if item not in want['lookup_source']:
if item['vrf']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
values = (item['vrf'], item['interface'])
commands.append('no ip domain lookup vrf %s source-interface %s' % values)
else:
commands.append('no ip domain lookup source-interface %s' % item['interface'])
# handle lookup_source items to be added
for item in want['lookup_source']:
if item not in have['lookup_source']:
if item['vrf']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
values = (item['vrf'], item['interface'])
commands.append('ip domain lookup vrf %s source-interface %s' % values)
else:
commands.append('ip domain lookup source-interface %s' % item['interface'])
if want['name_servers']:
# handle name_servers items to be removed. Order does matter here
# since name servers can only be in one vrf at a time
for item in have['name_servers']:
if item not in want['name_servers']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
values = (item['vrf'], item['server'])
commands.append('no ip name-server vrf %s %s' % values)
# handle name_servers items to be added
for item in want['name_servers']:
if item not in have['name_servers']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
values = (item['vrf'], item['server'])
commands.append('ip name-server vrf %s %s' % values)
return commands
def parse_hostname(config):
match = re.search('^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.search('^ip domain-name (\S+)', config, re.M)
if match:
return match.group(1)
def parse_lookup_source(config):
objects = list()
regex = 'ip domain lookup (?:vrf (\S+) )*source-interface (\S+)'
for vrf, intf in re.findall(regex, config, re.M):
if len(vrf) == 0:
vrf= None
objects.append({'interface': intf, 'vrf': vrf})
return objects
def parse_name_servers(config):
objects = list()
for vrf, addr in re.findall('ip name-server vrf (\S+) (\S+)', config, re.M):
objects.append({'server': addr, 'vrf': vrf})
return objects
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_list': re.findall('^ip domain-list (\S+)', config, re.M),
'lookup_source': parse_lookup_source(config),
'name_servers': parse_name_servers(config)
}
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_list': module.params['domain_list']
}
lookup_source = ComplexList(dict(
interface=dict(key=True),
vrf=dict()
), module)
name_servers = ComplexList(dict(
server=dict(key=True),
vrf=dict(default='default')
), module)
for arg, cast in [('lookup_source', lookup_source), ('name_servers', name_servers)]:
if module.params[arg] is not None:
obj[arg] = cast(module.params[arg])
else:
obj[arg] = None
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_list=dict(type='list'),
# { interface: <str>, vrf: <str> }
lookup_source=dict(type='list'),
# { server: <str>; vrf: <str> }
name_servers=dict(type='list'),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
msingh172/youtube-dl | youtube_dl/extractor/aljazeera.py | 95 | 1253 | from __future__ import unicode_literals
from .common import InfoExtractor
class AlJazeeraIE(InfoExtractor):
_VALID_URL = r'http://www\.aljazeera\.com/programmes/.*?/(?P<id>[^/]+)\.html'
_TEST = {
'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html',
'info_dict': {
'id': '3792260579001',
'ext': 'mp4',
'title': 'The Slum - Episode 1: Deliverance',
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
'uploader': 'Al Jazeera English',
},
'add_ie': ['Brightcove'],
}
def _real_extract(self, url):
program_name = self._match_id(url)
webpage = self._download_webpage(url, program_name)
brightcove_id = self._search_regex(
r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id')
return {
'_type': 'url',
'url': (
'brightcove:'
'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc'
'&%40videoPlayer={0}'.format(brightcove_id)
),
'ie_key': 'Brightcove',
}
| unlicense |
luiseduardohdbackup/odoo | addons/l10n_uy/__init__.py | 438 | 1070 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Openerp.uy <openerp.uy@lists.launchpad.net>
# Proyecto de Localización de OperERP para Uruguay
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
pilou-/ansible | lib/ansible/modules/network/ftd/ftd_configuration.py | 45 | 5075 | #!/usr/bin/python
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ftd_configuration
short_description: Manages configuration on Cisco FTD devices over REST API
description:
- Manages configuration on Cisco FTD devices including creating, updating, removing configuration objects,
scheduling and staring jobs, deploying pending changes, etc. All operations are performed over REST API.
version_added: "2.7"
author: "Cisco Systems, Inc. (@annikulin)"
options:
operation:
description:
- The name of the operation to execute. Commonly, the operation starts with 'add', 'edit', 'get', 'upsert'
or 'delete' verbs, but can have an arbitrary name too.
required: true
type: str
data:
description:
- Key-value pairs that should be sent as body parameters in a REST API call
type: dict
query_params:
description:
- Key-value pairs that should be sent as query parameters in a REST API call.
type: dict
path_params:
description:
- Key-value pairs that should be sent as path parameters in a REST API call.
type: dict
register_as:
description:
- Specifies Ansible fact name that is used to register received response from the FTD device.
type: str
filters:
description:
- Key-value dict that represents equality filters. Every key is a property name and value is its desired value.
If multiple filters are present, they are combined with logical operator AND.
type: dict
"""
EXAMPLES = """
- name: Create a network object
ftd_configuration:
operation: "addNetworkObject"
data:
name: "Ansible-network-host"
description: "From Ansible with love"
subType: "HOST"
value: "192.168.2.0"
dnsResolution: "IPV4_AND_IPV6"
type: "networkobject"
isSystemDefined: false
register_as: "hostNetwork"
- name: Delete the network object
ftd_configuration:
operation: "deleteNetworkObject"
path_params:
objId: "{{ hostNetwork['id'] }}"
"""
RETURN = """
response:
description: HTTP response returned from the API call.
returned: success
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ftd.configuration import BaseConfigurationResource, CheckModeException, \
FtdInvalidOperationNameError
from ansible.module_utils.network.ftd.fdm_swagger_client import ValidationError
from ansible.module_utils.network.ftd.common import construct_ansible_facts, FtdConfigurationError, \
FtdServerError, FtdUnexpectedResponse
def main():
fields = dict(
operation=dict(type='str', required=True),
data=dict(type='dict'),
query_params=dict(type='dict'),
path_params=dict(type='dict'),
register_as=dict(type='str'),
filters=dict(type='dict')
)
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
params = module.params
connection = Connection(module._socket_path)
resource = BaseConfigurationResource(connection, module.check_mode)
op_name = params['operation']
try:
resp = resource.execute_operation(op_name, params)
module.exit_json(changed=resource.config_changed, response=resp,
ansible_facts=construct_ansible_facts(resp, module.params))
except FtdInvalidOperationNameError as e:
module.fail_json(msg='Invalid operation name provided: %s' % e.operation_name)
except FtdConfigurationError as e:
module.fail_json(msg='Failed to execute %s operation because of the configuration error: %s' % (op_name, e.msg))
except FtdServerError as e:
module.fail_json(msg='Server returned an error trying to execute %s operation. Status code: %s. '
'Server response: %s' % (op_name, e.code, e.response))
except FtdUnexpectedResponse as e:
module.fail_json(msg=e.args[0])
except ValidationError as e:
module.fail_json(msg=e.args[0])
except CheckModeException:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
marcli/sos | sos/plugins/radius.py | 12 | 1735 | # Copyright (C) 2007 Navid Sheikhol-Eslami <navid@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Radius(Plugin):
"""RADIUS service information
"""
plugin_name = "radius"
profiles = ('network', 'identity')
packages = ('freeradius',)
class RedHatRadius(Radius, RedHatPlugin):
files = ('/etc/raddb',)
def setup(self):
super(RedHatRadius, self).setup()
self.add_copy_spec([
"/etc/raddb",
"/etc/pam.d/radiusd",
"/var/log/radius"
])
def postproc(self):
self.do_file_sub(
"/etc/raddb/sql.conf", r"(\s*password\s*=\s*)\S+", r"\1***")
class DebianRadius(Radius, DebianPlugin, UbuntuPlugin):
files = ('/etc/freeradius',)
def setup(self):
super(DebianRadius, self).setup()
self.add_copy_spec([
"/etc/freeradius",
"/etc/pam.d/radiusd",
"/etc/default/freeradius",
"/var/log/freeradius"
])
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
wwj718/murp-edx | common/djangoapps/django_comment_common/migrations/0001_initial.py | 188 | 6980 | # -*- coding: utf-8 -*-
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
#
# cdodge: This is basically an empty migration since everything has - up to now - managed in the django_comment_client app
# But going forward we should be using this migration
#
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_comment_common.permission': {
'Meta': {'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'permissions'", 'symmetrical': 'False', 'to': "orm['django_comment_common.Role']"})
},
'django_comment_common.role': {
'Meta': {'object_name': 'Role'},
'course_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'roles'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
}
}
complete_apps = ['django_comment_common']
| agpl-3.0 |
j-carpentier/nova | nova/tests/unit/virt/libvirt/test_imagebackend.py | 13 | 65039 | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import contextlib
import inspect
import os
import shutil
import tempfile
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import keymgr
from nova import objects
from nova.openstack.common import imageutils
from nova import test
from nova.tests.unit import fake_processutils
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt.storage import rbd_utils
CONF = cfg.CONF
CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
class FakeSecret(object):
def value(self):
return base64.b64decode("MTIzNDU2Cg==")
class FakeConn(object):
def secretLookupByUUIDString(self, uuid):
return FakeSecret()
class _ImageTestCase(object):
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instances_path=self.INSTANCES_PATH)
self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.CONTEXT = context.get_admin_context()
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def tearDown(self):
super(_ImageTestCase, self).tearDown()
shutil.rmtree(self.INSTANCES_PATH)
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_libvirt_fs_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
fs = image.libvirt_fs_info("/mnt")
# check that exception hasn't been raised and the method
# returned correct object
self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys)
self.assertEqual(fs.target_dir, "/mnt")
if image.is_block_dev:
self.assertEqual(fs.source_type, "block")
self.assertEqual(fs.source_dev, image.path)
else:
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
'quota:disk_write_bytes_sec': 20 * units.Mi,
'quota:disk_write_iops_sec': 2 * units.Ki,
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
disk = image.libvirt_info(disk_bus="virtio",
disk_dev="/dev/vda",
device_type="cdrom",
cache_mode="none",
extra_specs=extra_specs,
hypervisor_version=4004001)
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
self.assertEqual("virtio", disk.target_bus)
self.assertEqual("none", disk.driver_cache)
self.assertEqual("cdrom", disk.source_device)
self.assertEqual(10 * units.Mi, disk.disk_read_bytes_sec)
self.assertEqual(1 * units.Ki, disk.disk_read_iops_sec)
self.assertEqual(20 * units.Mi, disk.disk_write_bytes_sec)
self.assertEqual(2 * units.Ki, disk.disk_write_iops_sec)
self.assertEqual(30 * units.Mi, disk.disk_total_bytes_sec)
self.assertEqual(3 * units.Ki, disk.disk_total_iops_sec)
@mock.patch('nova.virt.disk.api.get_disk_size')
def test_get_disk_size(self, get_disk_size):
get_disk_size.return_value = 2361393152
image = self.image_class(self.INSTANCE, self.NAME)
self.assertEqual(2361393152, image.get_disk_size(image.path))
get_disk_size.assert_called_once_with(image.path)
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
def test_create_image_extend(self, fake_qemu_img_info):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
image = imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_RAW)
imagebackend.disk.extend(image, self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
def test_correct_format(self):
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
os.path.exists(CONF.instances_path).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
self.mox.VerifyAll()
@mock.patch.object(images, 'qemu_img_info',
side_effect=exception.InvalidDiskInfo(
reason='invalid path'))
def test_resolve_driver_format(self, fake_qemu_img_info):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'raw')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_RAW),
model)
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / units.Gi))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(CONF.instances_path).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.Image,
'verify_base_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
image = imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_QCOW2)
imagebackend.disk.extend(image, self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_too_small(self):
fn = self.prepare_mocks()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, fn, self.TEMPLATE_PATH, 1)
self.mox.VerifyAll()
def test_generate_resized_backing_files(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
self.mox.StubOutWithMock(imagebackend.Image,
'verify_base_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(CONF.instances_path).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(self.QCOW2_BASE)
os.path.exists(self.QCOW2_BASE).AndReturn(False)
imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
self.QCOW2_BASE)
image = imgmodel.LocalFileImage(self.QCOW2_BASE,
imgmodel.FORMAT_QCOW2)
imagebackend.disk.extend(image, self.SIZE)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_qcow2_exists_and_has_no_backing_file(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
self.mox.StubOutWithMock(imagebackend.Image,
'verify_base_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(None)
imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_resolve_driver_format(self):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'qcow2')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_QCOW2),
model)
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.lvm = imagebackend.lvm
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.lvm, 'create_volume')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.lvm.create_volume(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
self.lvm.remove_volumes([self.PATH])
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.lvm.create_volume(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
self.lvm.remove_volumes([self.PATH])
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
super(EncryptedLvmTestCase, self).setUp()
self.image_class = imagebackend.Lvm
self.flags(enabled=True, group='ephemeral_storage_encryption')
self.flags(cipher='aes-xts-plain64',
group='ephemeral_storage_encryption')
self.flags(key_size=512, group='ephemeral_storage_encryption')
self.flags(fixed_key='00000000000000000000000000000000'
'00000000000000000000000000000000',
group='keymgr')
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
self.PATH = os.path.join('/dev/mapper',
imagebackend.dmcrypt.volume_name(self.LV))
self.key_manager = keymgr.API()
self.INSTANCE['ephemeral_key_uuid'] =\
self.key_manager.create_key(self.CONTEXT)
self.KEY = self.key_manager.get_key(self.CONTEXT,
self.INSTANCE['ephemeral_key_uuid']).get_encoded()
self.lvm = imagebackend.lvm
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
self.dmcrypt = imagebackend.dmcrypt
def _create_image(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT,
max_size=self.TEMPLATE_SIZE,
target=self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
def _create_image_generated(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(target=self.PATH,
ephemeral_size=None, context=self.CONTEXT)
def _create_image_resize(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.lvm.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(
self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_encrypt_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.dmcrypt.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.dmcrypt.volume_name(self.LV),
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(
target=self.PATH,
ephemeral_size=None,
context=self.CONTEXT)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_encrypt_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalBlockImage(self.PATH),
model)
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.mox.StubOutWithMock(rbd_utils, 'rbd')
self.mox.StubOutWithMock(rbd_utils, 'rados')
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
fn = self.mox.CreateMockAnything()
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.mox.CreateMockAnything()
fn(max_size=None, target=self.TEMPLATE_PATH)
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(False)
image.check_image_exists().AndReturn(False)
self.mox.ReplayAll()
image.create_image(fn, self.TEMPLATE_PATH, None)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
self.mox.VerifyAll()
def test_create_image_resize(self):
fn = self.mox.CreateMockAnything()
full_size = self.SIZE * 2
fn(max_size=full_size, target=self.TEMPLATE_PATH)
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(False)
image.check_image_exists().AndReturn(False)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
self.mox.StubOutWithMock(image, 'get_disk_size')
image.get_disk_size(rbd_name).AndReturn(self.SIZE)
self.mox.StubOutWithMock(image.driver, 'resize')
image.driver.resize(rbd_name, full_size)
self.mox.StubOutWithMock(image, 'verify_base_size')
image.verify_base_size(self.TEMPLATE_PATH, full_size)
self.mox.ReplayAll()
image.create_image(fn, self.TEMPLATE_PATH, full_size)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
self.mox.VerifyAll()
def test_create_image_already_exists(self):
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(True)
self.mox.StubOutWithMock(image, 'get_disk_size')
image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
image.check_image_exists().AndReturn(True)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
image.get_disk_size(rbd_name).AndReturn(self.SIZE)
self.mox.ReplayAll()
fn = self.mox.CreateMockAnything()
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
inspect.getargspec(self.image_class.libvirt_info))
def test_image_path(self):
conf = "FakeConf"
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
user, conf)
self.assertEqual(image.path, rbd_path)
def test_get_disk_size(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image.driver, 'size') as size_mock:
size_mock.return_value = 2361393152
self.assertEqual(2361393152, image.get_disk_size(image.path))
size_mock.assert_called_once_with(image.rbd_name)
def test_create_image_too_small(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'driver') as driver_mock:
driver_mock.exists.return_value = True
driver_mock.size.return_value = 2
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, mock.MagicMock(),
self.TEMPLATE_PATH, 1)
driver_mock.size.assert_called_once_with(image.rbd_name)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_libvirt_info(self, mock_mon_addrs):
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
super(RbdTestCase, self).test_libvirt_info()
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_get_model(self, mock_mon_addrs):
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
self.flags(rbd_secret_uuid="3306a5c4-8378-4b3c-aa1f-7b48d3a26172",
group='libvirt')
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.RBDImage(
self.INSTANCE["uuid"] + "_fake.vm",
"FakePool",
"FakeUser",
"MTIzNDU2Cg==",
["server1:1899", "server2:1920"]),
model)
def test_import_file(self):
image = self.image_class(self.INSTANCE, self.NAME)
@mock.patch.object(image, 'check_image_exists')
@mock.patch.object(image.driver, 'remove_image')
@mock.patch.object(image.driver, 'import_image')
def _test(mock_import, mock_remove, mock_exists):
mock_exists.return_value = True
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
mock_remove.assert_called_once_with(name)
mock_import.assert_called_once_with(mock.sentinel.file, name)
_test()
def test_import_file_not_found(self):
image = self.image_class(self.INSTANCE, self.NAME)
@mock.patch.object(image, 'check_image_exists')
@mock.patch.object(image.driver, 'remove_image')
@mock.patch.object(image.driver, 'import_image')
def _test(mock_import, mock_remove, mock_exists):
mock_exists.return_value = False
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
self.assertFalse(mock_remove.called)
mock_import.assert_called_once_with(mock.sentinel.file, name)
_test()
class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Ploop
super(PloopTestCase, self).setUp()
self.utils = imagebackend.utils
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
self.stubs.Set(imagebackend.Ploop, 'get_disk_size', lambda a, b: 2048)
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=2048, image_id=None)
img_path = os.path.join(self.PATH, "root.hds")
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, img_path)
self.utils.execute("ploop", "restore-descriptor", "-f", "raw",
self.PATH, img_path)
self.utils.execute("ploop", "grow", '-s', "2K",
os.path.join(self.PATH, "DiskDescriptor.xml"),
run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
class BackendTestCase(test.NoDBTestCase):
INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
NAME = 'fake-name.suffix'
def setUp(self):
super(BackendTestCase, self).setUp()
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_raw_preallocate_images(self):
flags = ('space', 'Space', 'SPACE')
for f in flags:
self.flags(preallocate_images=f)
raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(raw.preallocate)
def test_image_raw_preallocate_images_bad_conf(self):
self.flags(preallocate_images='space1')
raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(raw.preallocate)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_qcow2_preallocate_images(self):
flags = ('space', 'Space', 'SPACE')
for f in flags:
self.flags(preallocate_images=f)
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(qcow.preallocate)
def test_image_qcow2_preallocate_images_bad_conf(self):
self.flags(preallocate_images='space1')
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(qcow.preallocate)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_rbd(self):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.mox.StubOutWithMock(rbd_utils, 'rbd')
self.mox.StubOutWithMock(rbd_utils, 'rados')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
| apache-2.0 |
kervi/kervi | kervi-core/kervi/core/utility/kervi_logging.py | 1 | 2921 | #Copyright 2016 Tim Wentlau.
#Distributed under the MIT License. See LICENSE in root of project.
"""
Module that holds log functionality.
In general you dont need to include this in your kervi code as the spine class holds a reference
to a fully configured log.
"""
import logging
import logging.handlers
import os
VERBOSE = 15
logging.addLevelName(VERBOSE, "VERBOSE")
class BraceMessage(object):
def __init__(self, fmt, *args):
self.fmt = fmt
self.args = args
def __str__(self):
try:
if '{' in self.fmt:
return self.fmt.format(*self.args)
else:
return self.fmt % self.args
except:
return self.fmt
class KerviLog(object):
def __init__(self, name):
self.logger = logging.getLogger(name)
def info(self, message, *args):
self.logger.info(BraceMessage(message, *args))
def verbose(self, message, *args, **kwargs):
if args:
self.logger._log(VERBOSE, str(BraceMessage(message, *args)), None)
else:
self.logger._log(VERBOSE, message, *args)
def warn(self, message, *args):
self.logger.warning(BraceMessage(message, *args))
def warning(self, message, *args):
self.logger.warning(BraceMessage(message, *args))
def debug(self, message, *args):
if args:
self.logger.debug(BraceMessage(message, *args))
else:
self.logger.debug(message)
def error(self, message, *args):
self.logger.error(BraceMessage(message, *args))
def exception(self, message, *args):
self.logger.exception(BraceMessage(message, *args))
def fatal(self, message, *args):
self.logger.fatal(BraceMessage(message, *args))
def init_process_logging(process_name, config, log_queue=None):
logger = logging.getLogger()
if config.level == "verbose":
logger.setLevel(VERBOSE)
elif config.level == "info":
logger.setLevel(logging.INFO)
elif config.level == "warning":
logger.setLevel(logging.WARNING)
elif config.level == "debug":
logger.setLevel(logging.DEBUG)
if log_queue:
queue_handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(queue_handler)
else:
if config.resetLog:
try:
os.remove(config.file)
except:
pass
file_handler = logging.FileHandler(config.file)
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
| mit |
tquizzle/Sick-Beard | lib/hachoir_parser/archive/tar.py | 90 | 4459 | """
Tar archive parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
Enum, UInt8, SubFile, String, NullBytes)
from lib.hachoir_core.tools import humanFilesize, paddingSize, timestampUNIX
from lib.hachoir_core.endian import BIG_ENDIAN
import re
class FileEntry(FieldSet):
type_name = {
# 48 is "0", 49 is "1", ...
0: u"Normal disk file (old format)",
48: u"Normal disk file",
49: u"Link to previously dumped file",
50: u"Symbolic link",
51: u"Character special file",
52: u"Block special file",
53: u"Directory",
54: u"FIFO special file",
55: u"Contiguous file"
}
def getOctal(self, name):
return self.octal2int(self[name].value)
def getDatetime(self):
"""
Create modification date as Unicode string, may raise ValueError.
"""
timestamp = self.getOctal("mtime")
return timestampUNIX(timestamp)
def createFields(self):
yield String(self, "name", 100, "Name", strip="\0", charset="ISO-8859-1")
yield String(self, "mode", 8, "Mode", strip=" \0", charset="ASCII")
yield String(self, "uid", 8, "User ID", strip=" \0", charset="ASCII")
yield String(self, "gid", 8, "Group ID", strip=" \0", charset="ASCII")
yield String(self, "size", 12, "Size", strip=" \0", charset="ASCII")
yield String(self, "mtime", 12, "Modification time", strip=" \0", charset="ASCII")
yield String(self, "check_sum", 8, "Check sum", strip=" \0", charset="ASCII")
yield Enum(UInt8(self, "type", "Type"), self.type_name)
yield String(self, "lname", 100, "Link name", strip=" \0", charset="ISO-8859-1")
yield String(self, "magic", 8, "Magic", strip=" \0", charset="ASCII")
yield String(self, "uname", 32, "User name", strip=" \0", charset="ISO-8859-1")
yield String(self, "gname", 32, "Group name", strip=" \0", charset="ISO-8859-1")
yield String(self, "devmajor", 8, "Dev major", strip=" \0", charset="ASCII")
yield String(self, "devminor", 8, "Dev minor", strip=" \0", charset="ASCII")
yield NullBytes(self, "padding", 167, "Padding (zero)")
filesize = self.getOctal("size")
if filesize:
yield SubFile(self, "content", filesize, filename=self["name"].value)
size = paddingSize(self.current_size//8, 512)
if size:
yield NullBytes(self, "padding_end", size, "Padding (512 align)")
def convertOctal(self, chunk):
return self.octal2int(chunk.value)
def isEmpty(self):
return self["name"].value == ""
def octal2int(self, text):
try:
return int(text, 8)
except ValueError:
return 0
def createDescription(self):
if self.isEmpty():
desc = "(terminator, empty header)"
else:
filename = self["name"].value
filesize = humanFilesize(self.getOctal("size"))
desc = "(%s: %s, %s)" % \
(filename, self["type"].display, filesize)
return "Tar File " + desc
class TarFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "tar",
"category": "archive",
"file_ext": ("tar",),
"mime": (u"application/x-tar", u"application/x-gtar"),
"min_size": 512*8,
"magic": (("ustar \0", 257*8),),
"subfile": "skip",
"description": "TAR archive",
}
_sign = re.compile("ustar *\0|[ \0]*$")
def validate(self):
if not self._sign.match(self.stream.readBytes(257*8, 8)):
return "Invalid magic number"
if self[0].name == "terminator":
return "Don't contain any file"
try:
int(self["file[0]/uid"].value, 8)
int(self["file[0]/gid"].value, 8)
int(self["file[0]/size"].value, 8)
except ValueError:
return "Invalid file size"
return True
def createFields(self):
while not self.eof:
field = FileEntry(self, "file[]")
if field.isEmpty():
yield NullBytes(self, "terminator", 512)
break
yield field
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
def createContentSize(self):
return self["terminator"].address + self["terminator"].size
| gpl-3.0 |
canaltinova/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| mpl-2.0 |
j00bar/ansible | lib/ansible/modules/web_infrastructure/jenkins_script.py | 15 | 5365 | #!/usr/bin/python
# encoding: utf-8
# (c) 2016, James Hogarth <james.hogarth@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: James Hogarth
module: jenkins_script
short_description: Executes a groovy script in the jenkins instance
version_added: '2.3'
description:
- The C(jenkins_script) module takes a script plus a dict of values
to use within the script and returns the result of the script being run.
options:
script:
description:
- The groovy script to be executed.
This gets passed as a string Template if args is defined.
required: true
default: null
url:
description:
- The jenkins server to execute the script against. The default is a local
jenkins instance that is not being proxied through a webserver.
required: false
default: http://localhost:8080
validate_certs:
description:
- If set to C(no), the SSL certificates will not be validated.
This should only set to C(no) used on personally controlled sites
using self-signed certificates as it avoids verifying the source site.
required: false
default: True
user:
description:
- The username to connect to the jenkins server with.
required: false
default: null
password:
description:
- The password to connect to the jenkins server with.
required: false
default: null
args:
description:
- A dict of key-value pairs used in formatting the script.
required: false
default: null
notes:
- Since the script can do anything this does not report on changes.
Knowing the script is being run it's important to set changed_when
for the ansible output to be clear on any alterations made.
'''
EXAMPLES = '''
- name: Obtaining a list of plugins
jenkins_script:
script: 'println(Jenkins.instance.pluginManager.plugins)'
user: admin
password: admin
- name: Setting master using a variable to hold a more complicate script
vars:
setmaster_mode: |
import jenkins.model.*
instance = Jenkins.getInstance()
instance.setMode(${jenkins_mode})
instance.save()
- name: use the variable as the script
jenkins_script:
script: "{{ setmaster_mode }}"
args:
jenkins_mode: Node.Mode.EXCLUSIVE
- name: interacting with an untrusted HTTPS connection
jenkins_script:
script: "println(Jenkins.instance.pluginManager.plugins)"
user: admin
password: admin
url: https://localhost
validate_certs: no
'''
RETURN = '''
output:
description: Result of script
returned: success
type: string
sample: 'Result: true'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
try:
# python2
from urllib import urlencode
except ImportError:
# python3
from urllib.parse import urlencode
def main():
module = AnsibleModule(
argument_spec = dict(
script = dict(required=True, type="str"),
url = dict(required=False, type="str", default="http://localhost:8080"),
validate_certs = dict(required=False, type="bool", default=True),
user = dict(required=False, no_log=True, type="str",default=None),
password = dict(required=False, no_log=True, type="str",default=None),
args = dict(required=False, type="dict", default=None)
)
)
if module.params['user'] is not None:
if module.params['password'] is None:
module.fail_json(msg="password required when user provided")
module.params['url_username'] = module.params['user']
module.params['url_password'] = module.params['password']
module.params['force_basic_auth'] = True
if module.params['args'] is not None:
from string import Template
script_contents = Template(module.params['script']).substitute(module.params['args'])
else:
script_contents = module.params['script']
resp, info = fetch_url(module,
module.params['url'] + "/scriptText",
data=urlencode({'script': script_contents}),
method="POST")
if info["status"] != 200:
module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"])
result = resp.read()
if 'Exception:' in result and 'at java.lang.Thread' in result:
module.fail_json(msg="script failed with stacktrace:\n " + result)
module.exit_json(
output = result,
)
if __name__ == '__main__':
main()
| gpl-3.0 |
combust-ml/mleap | python/tests/pyspark/feature/math_binary_test.py | 2 | 6892 | import math
import os
import shutil
import tempfile
import unittest
import mleap.pyspark # noqa
from mleap.pyspark.spark_support import SimpleSparkSerializer # noqa
import pandas as pd
from pandas.testing import assert_frame_equal
from pyspark.ml import Pipeline
from pyspark.sql.types import FloatType
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
from mleap.pyspark.feature.math_binary import MathBinary
from mleap.pyspark.feature.math_binary import BinaryOperation
from tests.pyspark.lib.spark_session import spark_session
INPUT_SCHEMA = StructType([
StructField('f1', FloatType()),
StructField('f2', FloatType()),
])
class MathBinaryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.spark = spark_session()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def setUp(self):
self.input = self.spark.createDataFrame([
(
float(i),
float(i * 2),
)
for i in range(1, 10)
], INPUT_SCHEMA)
self.expected_add = pd.DataFrame(
[(
float(i + i * 2)
)
for i in range(1, 10)],
columns=['add(f1, f2)'],
)
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def _new_add_math_binary(self):
return MathBinary(
operation=BinaryOperation.Add,
inputA="f1",
inputB="f2",
outputCol="add(f1, f2)",
)
def test_add_math_binary(self):
add_transformer = self._new_add_math_binary()
result = add_transformer.transform(self.input).toPandas()[['add(f1, f2)']]
assert_frame_equal(self.expected_add, result)
def test_math_binary_pipeline(self):
add_transformer = self._new_add_math_binary()
mul_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
inputB="add(f1, f2)",
outputCol="mul(f1, add(f1, f2))",
)
expected = pd.DataFrame(
[(
float(i * (i + i * 2))
)
for i in range(1, 10)],
columns=['mul(f1, add(f1, f2))'],
)
pipeline = Pipeline(
stages=[add_transformer, mul_transformer]
)
pipeline_model = pipeline.fit(self.input)
result = pipeline_model.transform(self.input).toPandas()[['mul(f1, add(f1, f2))']]
assert_frame_equal(expected, result)
def test_can_instantiate_all_math_binary(self):
for binary_operation in BinaryOperation:
transformer = MathBinary(
operation=binary_operation,
inputA="f1",
inputB="f2",
outputCol="operation",
)
def test_serialize_deserialize_math_binary(self):
add_transformer = self._new_add_math_binary()
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_binary.zip'))
add_transformer.serializeToBundle(file_path, self.input)
deserialized_math_binary = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = deserialized_math_binary.transform(self.input).toPandas()[['add(f1, f2)']]
assert_frame_equal(self.expected_add, result)
def test_serialize_deserialize_pipeline(self):
add_transformer = self._new_add_math_binary()
mul_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
inputB="add(f1, f2)",
outputCol="mul(f1, add(f1, f2))",
)
expected = pd.DataFrame(
[(
float(i * (i + i * 2))
)
for i in range(1, 10)],
columns=['mul(f1, add(f1, f2))'],
)
pipeline = Pipeline(
stages=[add_transformer, mul_transformer]
)
pipeline_model = pipeline.fit(self.input)
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_binary_pipeline.zip'))
pipeline_model.serializeToBundle(file_path, self.input)
deserialized_pipeline = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = pipeline_model.transform(self.input).toPandas()[['mul(f1, add(f1, f2))']]
assert_frame_equal(expected, result)
def test_add_math_binary_defaults_none(self):
add_transformer = self._new_add_math_binary()
none_df = self.spark.createDataFrame([
(None, float(i * 2))
for i in range(1, 3)
], INPUT_SCHEMA)
# Summing None + int yields Nones
expected_df = pd.DataFrame([
(None,)
for i in range(1, 3)
], columns=['add(f1, f2)'])
result = add_transformer.transform(none_df).toPandas()[['add(f1, f2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_inputA(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputB="f2",
outputCol="mult(1, f2)",
defaultA=1.0,
)
none_df = self.spark.createDataFrame([
(None, float(i * 1234))
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(i * 1234), )
for i in range(1, 3)
], columns=['mult(1, f2)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(1, f2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_inputB(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
outputCol="mult(f1, 2)",
defaultB=2.0,
)
none_df = self.spark.createDataFrame([
(float(i * 1234), None)
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(i * 1234 * 2), )
for i in range(1, 3)
], columns=['mult(f1, 2)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(f1, 2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_both(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
outputCol="mult(7, 8)",
defaultA=7.0,
defaultB=8.0,
)
none_df = self.spark.createDataFrame([
(None, None)
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(7 * 8), )
for i in range(1, 3)
], columns=['mult(7, 8)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(7, 8)']]
assert_frame_equal(expected_df, result)
| apache-2.0 |
nvie/python-mode | pymode/libs3/rope/base/oi/runmod.py | 28 | 7804 |
def __rope_start_everything():
import os
import sys
import socket
import pickle
import marshal
import inspect
import types
import threading
class _MessageSender(object):
def send_data(self, data):
pass
class _SocketSender(_MessageSender):
def __init__(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
self.my_file = s.makefile('wb')
def send_data(self, data):
if not self.my_file.closed:
pickle.dump(data, self.my_file)
def close(self):
self.my_file.close()
class _FileSender(_MessageSender):
def __init__(self, file_name):
self.my_file = open(file_name, 'wb')
def send_data(self, data):
if not self.my_file.closed:
marshal.dump(data, self.my_file)
def close(self):
self.my_file.close()
def _cached(func):
cache = {}
def newfunc(self, arg):
if arg in cache:
return cache[arg]
result = func(self, arg)
cache[arg] = result
return result
return newfunc
class _FunctionCallDataSender(object):
def __init__(self, send_info, project_root):
self.project_root = project_root
if send_info.isdigit():
self.sender = _SocketSender(int(send_info))
else:
self.sender = _FileSender(send_info)
def global_trace(frame, event, arg):
# HACK: Ignoring out->in calls
# This might lose some information
if self._is_an_interesting_call(frame):
return self.on_function_call
sys.settrace(global_trace)
threading.settrace(global_trace)
def on_function_call(self, frame, event, arg):
if event != 'return':
return
args = []
returned = ('unknown',)
code = frame.f_code
for argname in code.co_varnames[:code.co_argcount]:
try:
args.append(self._object_to_persisted_form(frame.f_locals[argname]))
except (TypeError, AttributeError):
args.append(('unknown',))
try:
returned = self._object_to_persisted_form(arg)
except (TypeError, AttributeError):
pass
try:
data = (self._object_to_persisted_form(frame.f_code),
tuple(args), returned)
self.sender.send_data(data)
except (TypeError):
pass
return self.on_function_call
def _is_an_interesting_call(self, frame):
#if frame.f_code.co_name in ['?', '<module>']:
# return False
#return not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)
if not self._is_code_inside_project(frame.f_code) and \
(not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)):
return False
return True
def _is_code_inside_project(self, code):
source = self._path(code.co_filename)
return source is not None and os.path.exists(source) and \
_realpath(source).startswith(self.project_root)
@_cached
def _get_persisted_code(self, object_):
source = self._path(object_.co_filename)
if not os.path.exists(source):
raise TypeError('no source')
return ('defined', _realpath(source), str(object_.co_firstlineno))
@_cached
def _get_persisted_class(self, object_):
try:
return ('defined', _realpath(inspect.getsourcefile(object_)),
object_.__name__)
except (TypeError, AttributeError):
return ('unknown',)
def _get_persisted_builtin(self, object_):
if isinstance(object_, str):
return ('builtin', 'str')
if isinstance(object_, list):
holding = None
if len(object_) > 0:
holding = object_[0]
return ('builtin', 'list', self._object_to_persisted_form(holding))
if isinstance(object_, dict):
keys = None
values = None
if len(object_) > 0:
keys = list(object_.keys())[0]
values = object_[keys]
if values == object_ and len(object_) > 1:
keys = list(object_.keys())[1]
values = object_[keys]
return ('builtin', 'dict',
self._object_to_persisted_form(keys),
self._object_to_persisted_form(values))
if isinstance(object_, tuple):
objects = []
if len(object_) < 3:
for holding in object_:
objects.append(self._object_to_persisted_form(holding))
else:
objects.append(self._object_to_persisted_form(object_[0]))
return tuple(['builtin', 'tuple'] + objects)
if isinstance(object_, set):
holding = None
if len(object_) > 0:
for o in object_:
holding = o
break
return ('builtin', 'set', self._object_to_persisted_form(holding))
return ('unknown',)
def _object_to_persisted_form(self, object_):
if object_ is None:
return ('none',)
if isinstance(object_, types.CodeType):
return self._get_persisted_code(object_)
if isinstance(object_, types.FunctionType):
return self._get_persisted_code(object_.__code__)
if isinstance(object_, types.MethodType):
return self._get_persisted_code(object_.__func__.__code__)
if isinstance(object_, types.ModuleType):
return self._get_persisted_module(object_)
if isinstance(object_, (str, list, dict, tuple, set)):
return self._get_persisted_builtin(object_)
if isinstance(object_, type):
return self._get_persisted_class(object_)
return ('instance', self._get_persisted_class(type(object_)))
@_cached
def _get_persisted_module(self, object_):
path = self._path(object_.__file__)
if path and os.path.exists(path):
return ('defined', _realpath(path))
return ('unknown',)
def _path(self, path):
if path.endswith('.pyc'):
path = path[:-1]
if path.endswith('.py'):
return path
def close(self):
self.sender.close()
sys.settrace(None)
def _realpath(path):
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
send_info = sys.argv[1]
project_root = sys.argv[2]
file_to_run = sys.argv[3]
run_globals = globals()
run_globals.update({'__name__': '__main__',
'builtins': __builtins__,
'__file__': file_to_run})
if send_info != '-':
data_sender = _FunctionCallDataSender(send_info, project_root)
del sys.argv[1:4]
with open(file_to_run) as file:
exec(compile(file.read(), file_to_run, 'exec'), run_globals)
if send_info != '-':
data_sender.close()
if __name__ == '__main__':
__rope_start_everything()
| lgpl-3.0 |
jcpowermac/ansible | lib/ansible/modules/network/avi/avi_sslprofile.py | 15 | 8059 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Enum options - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_aes_128_gcm_sha256,
- tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha384,
- tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha384, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_gcm_sha384,
- tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_256_cbc_sha256, tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_cbc_sha,
- tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_256_cbc_sha,
- tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_rc4_128_sha.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
- Units(SEC).
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
"""
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
jasonabele/gnuradio | gr-trellis/doc/make_numbered_listing.py | 32 | 1196 | #!/usr/bin/env python
import sys
import os, os.path
from optparse import OptionParser
def quote_line (line):
line = line.replace ('&', '&')
line = line.replace ('<', '<')
line = line.replace ('>', '>')
line = line.replace ("'", ''')
line = line.replace ('"', '"')
return line
def generate_listing (input_filename, title=None):
inf = open (input_filename, "r")
output_filename = os.path.basename (input_filename) + '.xml'
outf = open (output_filename, "w")
outf.write ('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
# outf.write ('<example id="%s">\n' % (input_filename,))
# if not title:
# title = input_filename
# outf.write ('<title>')
# outf.write (title)
# outf.write ('</title>\n')
outf.write ('<programlisting>\n');
lineno = 0
for line in inf:
line = line.expandtabs (8)
line = quote_line (line)
lineno = lineno + 1
outf.write ('%3d %s' % (lineno, line))
outf.write ('</programlisting>\n')
# outf.write ('</example>\n')
def main ():
for file in sys.argv[1:]:
generate_listing (file)
if __name__ == '__main__':
main ()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.