repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
damdam-s/partner-contact | portal_partner_merge/__openerp__.py | 15 | 1376 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Portal Partner Merge',
'version': '1.0',
'category': 'Hidden',
'description': """
Link module for base_partner_merge which extract portal dependency
""",
'author': "Camptocamp,Odoo Community Association (OCA)",
'maintainer': 'Camptocamp',
'website': 'http://www.camptocamp.com/',
'depends': ['portal', 'base_partner_merge'],
'data': [],
'test': [],
'installable': True,
'auto_install': True,
'application': False,
}
| agpl-3.0 |
mgraupe/acq4 | acq4/devices/DAQGeneric/DOChannelTemplate.py | 3 | 5756 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DOChannelTemplate.ui'
#
# Created: Sun Feb 22 13:29:09 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.verticalLayout_3 = QtGui.QVBoxLayout(Form)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setContentsMargins(5, 0, 0, 0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.preSetCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.preSetCheck.setFont(font)
self.preSetCheck.setObjectName(_fromUtf8("preSetCheck"))
self.gridLayout.addWidget(self.preSetCheck, 0, 0, 1, 1)
self.holdingCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.holdingCheck.setFont(font)
self.holdingCheck.setObjectName(_fromUtf8("holdingCheck"))
self.gridLayout.addWidget(self.holdingCheck, 1, 0, 1, 1)
self.preSetSpin = QtGui.QSpinBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.preSetSpin.setFont(font)
self.preSetSpin.setMaximum(1)
self.preSetSpin.setObjectName(_fromUtf8("preSetSpin"))
self.gridLayout.addWidget(self.preSetSpin, 0, 1, 1, 1)
self.holdingSpin = QtGui.QSpinBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.holdingSpin.setFont(font)
self.holdingSpin.setMaximum(1)
self.holdingSpin.setObjectName(_fromUtf8("holdingSpin"))
self.gridLayout.addWidget(self.holdingSpin, 1, 1, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
self.frame = QtGui.QFrame(self.groupBox)
self.frame.setFrameShape(QtGui.QFrame.Box)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.verticalLayout = QtGui.QVBoxLayout(self.frame)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.functionCheck = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.functionCheck.setFont(font)
self.functionCheck.setObjectName(_fromUtf8("functionCheck"))
self.horizontalLayout.addWidget(self.functionCheck)
self.displayCheck = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.horizontalLayout.addWidget(self.displayCheck)
self.verticalLayout.addLayout(self.horizontalLayout)
self.waveGeneratorWidget = StimGenerator(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.waveGeneratorWidget.sizePolicy().hasHeightForWidth())
self.waveGeneratorWidget.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.waveGeneratorWidget.setFont(font)
self.waveGeneratorWidget.setObjectName(_fromUtf8("waveGeneratorWidget"))
self.verticalLayout.addWidget(self.waveGeneratorWidget)
self.verticalLayout_2.addWidget(self.frame)
self.verticalLayout_3.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.preSetCheck.setText(_translate("Form", "Pre-set", None))
self.holdingCheck.setText(_translate("Form", "Holding", None))
self.functionCheck.setText(_translate("Form", "Enable Function", None))
self.displayCheck.setText(_translate("Form", "Display", None))
from acq4.pyqtgraph import GroupBox
from acq4.util.generator.StimGenerator import StimGenerator
| mit |
azureplus/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/admindocs/tests/test_fields.py | 111 | 1192 | from __future__ import absolute_import, unicode_literals
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils import unittest
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
| apache-2.0 |
todaychi/hue | desktop/core/ext-py/Django-1.6.10/django/utils/translation/trans_null.py | 119 | 1990 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1: return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
# date formats shouldn't be used using gettext anymore. This
# is kept for backward compatibility
TECHNICAL_ID_MAP = {
"DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
"DATE_FORMAT": settings.DATE_FORMAT,
"DATETIME_FORMAT": settings.DATETIME_FORMAT,
"TIME_FORMAT": settings.TIME_FORMAT,
"YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
"MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
}
def gettext(message):
result = TECHNICAL_ID_MAP.get(message, message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request, supported=None):
return None
| apache-2.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/setuptools/monkey.py | 39 | 5789 | """
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
from importlib import import_module
import inspect
from setuptools.extern import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def _get_mro(cls):
"""
Returns the bases classes for cls sorted by the MRO.
Works around an issue on Jython where inspect.getmro will not return all
base classes if multiple classes share the same name. Instead, this
function will return a tuple containing the class itself, and the contents
of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
"""
if platform.python_implementation() == "Jython":
return (cls,) + cls.__bases__
return inspect.getmro(cls)
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in _get_mro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 0) < sys.version_info < (3, 3, 7)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata_write_pkg_file()
_patch_distribution_metadata_write_pkg_info()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata_write_pkg_file():
"""Patch write_pkg_file to also write Requires-Python/Requires-External"""
distutils.dist.DistributionMetadata.write_pkg_file = (
setuptools.dist.write_pkg_file
)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
distutils.dist.DistributionMetadata.write_pkg_info = (
setuptools.dist.write_pkg_info
)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass
| apache-2.0 |
imsplitbit/nova | nova/tests/integrated/api_samples_test_base.py | 10 | 13015 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from lxml import etree
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.integrated import integrated_helpers
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
def _pretty_data(self, data):
if self.ctype == 'json':
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
else:
if data is None:
# Likely from missing XML file.
return ""
xml = etree.XML(data)
data = etree.tostring(xml, encoding="UTF-8",
xml_declaration=True, pretty_print=True)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
if self.ctype == 'json':
# NOTE(vish): allow non-quoted replacements to survive json
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
else:
def to_dict(node):
ret = {}
if node.items():
ret.update(dict(node.items()))
if node.text:
ret['__content__'] = node.text
if node.tag:
ret['__tag__'] = node.tag
if node.nsmap:
ret['__nsmap__'] = node.nsmap
for element in node:
ret.setdefault(node.tag, [])
ret[node.tag].append(to_dict(element))
return ret
return to_dict(etree.fromstring(data))
@classmethod
def _get_sample_path(cls, name, dirname, suffix=''):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
if cls.extension_name:
alias = importutils.import_class(cls.extension_name).alias
parts.append(alias)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname, "../../../doc"))
return cls._get_sample_path(name, dirname)
@classmethod
def _get_template(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
return cls._get_sample_path(name, dirname, suffix='.tpl')
def _read_template(self, name):
template = self._get_template(name)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(name), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch(_('%(result_str)s: %(result)s is not a dict.')
% {'result_str': result_str, 'result': result})
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
_('Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n') %
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
_('%(result_str)s: %(result)s is not a list.') %
{'result_str': result_str, 'result': result})
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append(_('Extra list items in template:'))
error.extend([repr(o) for o in expected])
if extra:
error.append(_('Extra list items in %(result_str)s:') %
{'result_str': result_str})
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, basestring) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s') %
{'expected': expected, 'result_str': result_str,
'result': result})
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, basestring):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
result = result.strip()
if expected != result:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: '
'%(result)s') % {'expected': expected,
'result_str': result_str,
'result': result})
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
This may be needed by some tests to convert exact matches expected
from the server into pattern matches to verify what is in the
sample file.
If there are no changes to be made, subs is returned unharmed.
"""
return subs
def _verify_response(self, name, subs, response, exp_code):
self.assertEqual(response.status, exp_code)
response_data = response.read()
response_data = self._pretty_data(response_data)
if not os.path.exists(self._get_template(name)):
self._write_template(name, response_data)
template_data = response_data
else:
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(name))):
self._write_sample(name, response_data)
sample_data = response_data
else:
with file(self._get_sample(name)) as sample:
sample_data = sample.read()
try:
template_data = self._objectify(template_data)
response_data = self._objectify(response_data)
response_result = self._compare_result(subs, template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
# correct compute host name. Also let the test do some of its
# own generalization, if necessary
vanilla_regexes = self._get_regexes()
subs['compute_host'] = vanilla_regexes['host_name']
subs['id'] = vanilla_regexes['id']
subs = self.generalize_subs(subs, vanilla_regexes)
sample_data = self._objectify(sample_data)
self._compare_result(subs, template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
def _get_host(self):
return 'http://openstack.example.com'
def _get_glance_host(self):
return 'http://glance.openstack.example.com'
def _get_regexes(self):
if self.ctype == 'json':
text = r'(\\"|[^"])*'
else:
text = r'[^<]*'
return {
# NOTE(treinish): Could result in a false positive, but it
# shouldn't be an issue for this case.
'timestamp': '\d{4}-[0,1]\d-[0-3]\d[ ,T]'
'\d{2}:\d{2}:\d{2}'
'(Z|(\+|-)\d{2}:\d{2}|\.\d{6}|)',
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'reservation_id': 'r-[0-9a-zA-Z]{8}',
'private_key': '-----BEGIN RSA PRIVATE KEY-----'
'[a-zA-Z0-9\n/+=]*'
'-----END RSA PRIVATE KEY-----',
'public_key': 'ssh-rsa[ a-zA-Z0-9/+=]*'
'Generated by Nova',
'fingerprint': '([0-9a-f]{2}:){15}[0-9a-f]{2}',
'host': self._get_host(),
'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
'int': '[0-9]+',
}
def _get_response(self, url, method, body=None, strip_version=False):
headers = {}
headers['Content-Type'] = 'application/' + self.ctype
headers['Accept'] = 'application/' + self.ctype
return self.api.api_request(url, body=body, method=method,
headers=headers, strip_version=strip_version)
def _do_get(self, url, strip_version=False):
return self._get_response(url, 'GET', strip_version=strip_version)
def _do_post(self, url, name, subs, method='POST'):
body = self._read_template(name) % subs
sample = self._get_sample(name)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
return self._get_response(url, method, body)
def _do_put(self, url, name, subs):
return self._do_post(url, name, subs, method='PUT')
def _do_delete(self, url):
return self._get_response(url, 'DELETE')
| apache-2.0 |
melviso/phycpp | beatle/activity/models/ui/pane/PyModulePane.py | 2 | 12276 | # -*- coding: utf-8 -*-
"""Subclass of PyModulePane, which is generated by wxFormBuilder."""
#import wx
import wx.stc
from beatle import tran, localpath
from beatle.ctx import THE_CONTEXT as context
from beatle.app.ui import pane, dlg
from beatle.lib.handlers import Identifiers
from beatle.activity.models.ui import ui as ui
from beatle.activity.models.handlers.py import EditorHandler
# Implementing PyModulePane
class PyModulePane(ui.PyModulePane):
"""Implements method pane editor"""
#command ids: run
_closePane = Identifiers.register('close-pane')
_saveCode = Identifiers.new()
_undoCode = Identifiers.register('undo')
_redoCode = Identifiers.register('redo')
_copyCode = Identifiers.register('copy')
_cutCode = Identifiers.register('cut')
_pasteCode = Identifiers.register('paste')
_deleteCode = Identifiers.register('delete')
_toggleFull = Identifiers.register('fullscreen')
_navigator = Identifiers.new()
def _create_toolbars(self):
"""Create custom toolbar"""
self.m_localToolbar = wx.aui.AuiToolBar(self, wx.ID_ANY, wx.DefaultPosition,
wx.DefaultSize, wx.aui.AUI_TB_HORZ_LAYOUT)
self.m_save = self.m_localToolbar.AddTool(self._saveCode,
u"save project", wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR),
wx.NullBitmap, wx.ITEM_NORMAL, u"Save", wx.EmptyString, None)
self.m_localToolbar.AddSeparator()
self.m_undo = self.m_localToolbar.AddTool(self._undoCode, u"tool",
wx.ArtProvider.GetBitmap(wx.ART_UNDO, wx.ART_TOOLBAR), wx.NullBitmap, wx.ITEM_NORMAL,
u"Undo", wx.EmptyString, None)
self.m_redo = self.m_localToolbar.AddTool(self._redoCode, u"tool",
wx.ArtProvider.GetBitmap(wx.ART_REDO, wx.ART_TOOLBAR), wx.NullBitmap, wx.ITEM_NORMAL,
u"Redo", wx.EmptyString, None)
self.m_localToolbar.AddSeparator()
self.m_copy = self.m_localToolbar.AddTool(self._copyCode, u"copy",
wx.ArtProvider.GetBitmap(wx.ART_COPY, wx.ART_TOOLBAR), wx.NullBitmap, wx.ITEM_NORMAL,
u"Copy", wx.EmptyString, None)
self.m_cut = self.m_localToolbar.AddTool(self._cutCode, u"cut", wx.ArtProvider.GetBitmap(
wx.ART_CUT, wx.ART_TOOLBAR), wx.NullBitmap, wx.ITEM_NORMAL, u"Cut", wx.EmptyString, None)
self.m_paste = self.m_localToolbar.AddTool(self._pasteCode, u"paste",
wx.ArtProvider.GetBitmap(wx.ART_PASTE, wx.ART_TOOLBAR), wx.NullBitmap,
wx.ITEM_NORMAL, u"Paste", wx.EmptyString, None)
self.m_localToolbar.AddSeparator()
self.m_delete = self.m_localToolbar.AddTool(self._deleteCode, u"delete",
wx.ArtProvider.GetBitmap(wx.ART_DELETE, wx.ART_TOOLBAR), wx.NullBitmap, wx.ITEM_NORMAL,
u"Delete", wx.EmptyString, None)
self.m_localToolbar.AddSeparator()
self.m_fullScreen = self.m_localToolbar.AddTool(self._toggleFull, u"tool",
wx.ArtProvider.GetBitmap(u"gtk-fullscreen", wx.ART_TOOLBAR), wx.NullBitmap, wx.ITEM_NORMAL,
u"Toggle fullscreen", wx.EmptyString, None)
self.m_localToolbar.AddSeparator()
self.m_navigator = self.m_localToolbar.AddTool(self._navigator, u"tool", wx.Bitmap(localpath("app/res/method.xpm"),
wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, u"insert call", wx.EmptyString, None)
self.m_localToolbar.Realize()
self.m_toolbarSizer.Add(self.m_localToolbar, 0, wx.ALL, 5)
self.Layout()
# Connect Events
self.m_editor.Bind(wx.EVT_KILL_FOCUS, self.OnLeaveFocus)
self.m_editor.Bind(wx.EVT_SET_FOCUS, self.OnGetFocus)
def __init__(self, parent, mainframe, method):
"""Intialization of method editor"""
self._editorArgs = {
'language': 'python',
'handler': EditorHandler(text=method._content,
use_bookmarks=False,
use_breakpoints=False,
read_only=method._readOnly)
}
super(PyModulePane, self).__init__(parent)
self._mainframe = mainframe
self._object = method
self._notebook = parent
self._create_toolbars()
"""
if method.note:
self.m_editor.AnnotationSetVisible(wx.stc.STC_ANNOTATION_BOXED)
self.m_editor.AnnotationSetText(0, method.note)"""
#self.m_editor.SendMsg(2540, wp="Prueba de anotacion", lp=long(0))
#self.m_editor.SendMsg(2548, wp=1, lp=long(0))
self.Layout()
wx.stc.EVT_STC_CHANGE(self.m_editor,
self.m_editor.GetId(), self.OnEditorChange)
#self.Bind(EVT_STC_STYLENEEDED, self.handleStyleNeeded,
#id=self.m_editor.GetId())
frame = context.frame
self.Bind(wx.EVT_MENU, frame.CloseCurrentDocPane, id=self._closePane)
self.Bind(wx.EVT_MENU, self.OnDeleteCode, id=self._deleteCode)
self.Bind(wx.EVT_MENU, self.OnUndoCode, id=self._undoCode)
self.Bind(wx.EVT_MENU, self.OnRedoCode, id=self._redoCode)
self.Bind(wx.EVT_MENU, self.OnCopyCode, id=self._copyCode)
self.Bind(wx.EVT_MENU, self.OnPasteCode, id=self._pasteCode)
self.Bind(wx.EVT_MENU, self.OnCutCode, id=self._cutCode)
self.Bind(wx.EVT_MENU, self.OnSaveCode, id=self._saveCode)
self.Bind(wx.EVT_MENU, self.OnFullScreen, id=self._toggleFull)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateDeleteCode, id=self._deleteCode)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUndoCode, id=self._undoCode)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateRedoCode, id=self._redoCode)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateCopyCode, id=self._copyCode)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdatePasteCode, id=self._pasteCode)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateCutCode, id=self._cutCode)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateSaveCode, id=self._saveCode)
self.m_editor.popup_handler = self
self._fullScreen = None
aTable = wx.AcceleratorTable([
wx.AcceleratorEntry(wx.ACCEL_ALT | wx.ACCEL_CTRL, ord('F'), self._toggleFull),
wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('W'), self._closePane),
wx.AcceleratorEntry(wx.ACCEL_CTRL, ord('S'), self._saveCode),
wx.AcceleratorEntry(wx.ACCEL_NORMAL, wx.WXK_DELETE, self._deleteCode)
])
self.SetAcceleratorTable(aTable)
def Refresh(self):
"""Update editor from external changes (like undo/redo)"""
if not self.m_editor.IsModified():
if self.m_editor.GetText() != self._object._content:
line = self.m_editor.GetFirstVisibleLine()
pos = self.m_editor.GetCurrentPos()
self.m_editor.Freeze()
self.m_editor.SetText(self._object._content)
try:
self.m_editor.SetFirstVisibleLine(line)
self.m_editor.SetCurrentPos(pos)
except:
pass
self.m_editor.Thaw()
wx.YieldIfNeeded()
self.m_editor.SetModified(False)
def PreDelete(self):
"""Remove toolbar first in order to avoid gtk-collision at close"""
#self.m_editor.Destroy()
self.m_localToolbar.Destroy()
def Popup(self, editor, text=None):
"""Shows a custom popup"""
menu = wx.Menu()
if text:
# in the future, we need to show a complete floating dialog here
# we receive "this->" or "object." here ..
pass
else:
menu.Append(self._mainframe.editProperties.GetId(),
"test menu", "Edit relation properties")
pos = editor.GetCurrentPos()
p = editor.PointFromPosition(pos)
editor.PopupMenu(menu, p)
menu.Destroy()
def OnNavigator(self, event):
"""Navigate throug the class structure"""
pos = self.m_editor.GetCurrentPos()
p = self.m_editor.PointFromPosition(pos)
p = self.m_editor.ClientToScreen(p)
dialog = dlg.CodeNavigator(self, self._object)
dialog.Move(p)
if dialog.ShowModal() != wx.ID_OK:
return
pass
def OnUndoCode(self, event):
"""Edit undo"""
self.m_editor.Undo()
def OnRedoCode(self, event):
"""Edit redo"""
self.m_editor.Redo()
def OnUpdatePasteCode(self, event):
"""Edit paste?"""
event.Enable(self.m_editor.CanPaste())
def OnUpdateCopyCode(self, event):
"""Edit copy?"""
(b, e) = self.m_editor.GetSelection()
event.Enable(b != e)
def OnUpdateCutCode(self, event):
"""Edit cut?"""
if self._object._readOnly:
event.Enable(False)
else:
(b, e) = self.m_editor.GetSelection()
event.Enable(b != e)
def OnUpdateRedoCode(self, event):
"""Edit redo?"""
if self.m_editor.CanRedo():
event.SetText("Redo edit")
event.Enable(True)
else:
event.SetText("Can't redo")
event.Enable(False)
def OnUpdateUndoCode(self, event):
"""Edit undo?"""
if self.m_editor.CanUndo():
event.SetText("Undo edit")
event.Enable(True)
else:
event.SetText("Can't undo")
event.Enable(False)
def OnUpdateDeleteCode(self, event):
"""Edit delete?"""
event.Enable(not self._object._readOnly)
def Enabled(self, event):
"""Joker event"""
event.Enable(True)
def Disabled(self, event):
"""Joker event"""
event.Enable(False)
def UpdateModifiedTitle(self):
"""Update the tile after modifications"""
if not self._fullScreen:
i = self._notebook.GetPageIndex(self)
s = self._notebook.GetPageText(i)
m = '[modified] '
if self.m_editor.IsModified():
if s.find(m) == wx.NOT_FOUND:
self._notebook.SetPageText(i, m + s)
else:
if s.find(m) != wx.NOT_FOUND:
s = s.replace(m, '')
self._notebook.SetPageText(i, s)
def OnEditorChange(self, event):
"""Called when editor status change"""
if not self._fullScreen:
self.UpdateModifiedTitle()
@tran.TransactionalMethod('edit method')
def Commit(self):
"""Update document"""
text = self.m_editor.GetText()
o = self._object
if text == o._content:
return False
o.SaveState()
o._content = text
o.project.SetModified(True)
o.ExportPythonCodeFiles()
return True
def OnDeleteCode(self, event):
"""Handle delete key"""
(b, e) = self.m_editor.GetSelection()
if b == e:
self.m_editor.CharRight()
self.m_editor.DeleteBack()
def OnCopyCode(self, event):
"""Handle a copy event"""
self.m_editor.Copy()
def OnPasteCode(self, event):
"""Handle a paste event"""
self.m_editor.Paste()
def OnCutCode(self, event):
"""Handle a paste event"""
self.m_editor.Cut()
def OnUpdateSaveCode(self, event):
"""Handles update event"""
event.Enable(self.m_editor.IsModified())
def OnSaveCode(self, event):
"""Handles save event"""
self.Commit()
self.m_editor.ResetModified()
self.UpdateModifiedTitle()
def OnFullScreen(self, event):
"""ShowFullscreen"""
if self._fullScreen:
self._fullScreen.LeaveFullScreen()
wx.YieldIfNeeded()
self._fullScreen.Close(True)
self._fullScreen = None
self.UpdateModifiedTitle()
else:
self._fullScreen = pane.FullScreen(self)
self.m_editor.SetFocus()
def handleStyleNeeded(self, event):
"""Handle scintylla events"""
wnd = self.m_editor
pos = wnd.GetCurrentPos()
if pos == 0:
return
if wnd.GetCharAt(pos - 1) == wx.T(':'):
lnno = wnd.GetCurrentLine()
iden = wnd.GetLineIndentation(lnno - 1)
wnd.SetLineIndentation(lnno, iden)
| gpl-2.0 |
hyperized/ansible | lib/ansible/module_utils/docker/common.py | 13 | 41043 | #
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
import sys
from datetime import timedelta
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker (Docker SDK for Python >= 2.0.0)
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py (Docker SDK for Python < 2.0.0)
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
try:
from requests.exceptions import RequestException
except ImportError:
# Either docker-py is no longer using requests, or docker-py isn't around either,
# or docker-py's dependency requests is missing. In any case, define an exception
# class RequestException so that our code doesn't break.
class RequestException(Exception):
pass
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost'
MIN_DOCKER_VERSION = "1.8.0"
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
debug=dict(type='bool', default=False)
)
DOCKER_MUTUALLY_EXCLUSIVE = []
DOCKER_REQUIRED_TOGETHER = [
['client_cert', 'client_key']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
docker_version = None
# No Docker SDK for Python. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def is_image_name_id(name):
"""Check whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def is_valid_tag(tag, allow_empty=False):
"""Check whether the given string is a valid docker tag name."""
if not tag:
return allow_empty
# See here ("Extended description") for a definition what tags can be:
# https://docs.docker.com/engine/reference/commandline/tag/
return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def update_tls_hostname(result):
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
def _get_tls_config(fail_function, **kwargs):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
def get_connect_params(auth, fail_function):
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
else:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = _get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = _get_tls_config(verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
"Hint: if you do not need Python 2.6 support, try "
"`pip uninstall docker-py` instead, followed by `pip install docker`.")
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
"python module (for %s's Python %s). It is recommended to install the docker module if no "
"support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
"can leave the other module in a broken state." % (platform.node(), sys.executable))
if not HAS_DOCKER_PY:
if NEEDS_DOCKER_PY2:
msg = missing_required_lib("Docker SDK for Python: docker")
msg = msg + ", for example via `pip install docker`. The error was: %s"
else:
msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
self.fail(msg % HAS_DOCKER_ERROR)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
if not NEEDS_DOCKER_PY2:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py) for non-Python-2.6 users.
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif docker_version < LooseVersion('2.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
self.docker_api_version_str = self.version()['ApiVersion']
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.docker_api_version = LooseVersion(self.docker_api_version_str)
if min_docker_api_version is not None:
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['validate_certs'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
update_tls_hostname(result)
return result
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if 'docker_py_version' in data:
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
elif not support_docker_py:
msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif self.docker_py_version < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
try:
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
def get_network(self, name=None, network_id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and network_id is None:
return None
result = None
if network_id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
network_id = result['Id']
if network_id is not None:
try:
self.log("Inspecting network Id %s" % network_id)
result = self.inspect_network(network_id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case: if docker.io wasn't there, it can be that
# the image wasn't found either (#15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, image_id):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
try:
inspection = self.inspect_image(image_id)
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
return inspection
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if (tags and lookup in tags) or (digests and lookup_digest in digests):
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
def report_warnings(self, result, warnings_key=None):
'''
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
'''
if warnings_key is None:
warnings_key = ['Warnings']
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn('Docker warning: {0}'.format(warning))
elif isinstance(result, string_types) and result:
self.module.warn('Docker warning: {0}'.format(result))
def inspect_distribution(self, image, **kwargs):
'''
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
since prior versions did not support accessing private repositories.
'''
if self.docker_py_version < LooseVersion('4.0.0'):
registry = auth.resolve_repository_name(image)[0]
header = auth.get_config_header(self, registry)
if header:
return self._result(self._get(
self._url('/distribution/{0}/json', image),
headers={'X-Registry-Auth': header}
), json=True)
return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
def compare_dict_allow_more_present(av, bv):
'''
Compare two dictionaries for whether every entry of the first is in the second.
'''
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(a, b, method, datatype):
'''
Compare values a and b as described by method and datatype.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``datatype == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``datatype`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
'''
if method == 'ignore':
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if datatype == 'value':
return False
# For allow_more_present, allow a to be None
if method == 'allow_more_present' and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if datatype == 'value':
return a == b
elif datatype == 'list':
if method == 'strict':
return a == b
else:
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
elif datatype == 'dict':
if method == 'strict':
return a == b
else:
return compare_dict_allow_more_present(a, b)
elif datatype == 'set':
set_a = set(a)
set_b = set(b)
if method == 'strict':
return set_a == set_b
else:
return set_b >= set_a
elif datatype == 'set(dict)':
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == 'strict':
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker(object):
def __init__(self):
self._diff = []
def add(self, name, parameter=None, active=None):
self._diff.append(dict(
name=name,
parameter=parameter,
active=active,
))
def merge(self, other_tracker):
self._diff.extend(other_tracker._diff)
@property
def empty(self):
return len(self._diff) == 0
def get_before_after(self):
'''
Return texts ``before`` and ``after``.
'''
before = dict()
after = dict()
for item in self._diff:
before[item['name']] = item['active']
after[item['name']] = item['parameter']
return before, after
def has_difference_for(self, name):
'''
Returns a boolean if a difference exists for name
'''
return any(diff for diff in self._diff if diff['name'] == name)
def get_legacy_docker_container_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = []
for entry in self._diff:
item = dict()
item[entry['name']] = dict(
parameter=entry['parameter'],
container=entry['active'],
)
result.append(item)
return result
def get_legacy_docker_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = [entry['name'] for entry in self._diff]
return result
def clean_dict_booleans_for_docker_api(data):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters).
'''
result = dict()
if data is not None:
for k, v in data.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
else:
v = str(v)
result[str(k)] = v
return result
def convert_duration_to_nanosecond(time_str):
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
r'((?P<minutes>\d+)m(?!s))?'
r'((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?'
r'((?P<microseconds>\d+)us)?)$'
)
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
parts = parts.groupdict()
time_params = {}
for (name, value) in parts.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) * 10 ** 3
return time_in_nanoseconds
def parse_healthcheck(healthcheck):
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get('test')):
return None, None
result = dict()
# All supported healthcheck parameters
options = dict(
test='test',
interval='interval',
timeout='timeout',
start_period='start_period',
retries='retries'
)
duration_options = ['interval', 'timeout', 'start_period']
for (key, value) in options.items():
if value in healthcheck:
if healthcheck.get(value) is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value in duration_options:
time = convert_duration_to_nanosecond(healthcheck.get(value))
if time:
result[key] = time
elif healthcheck.get(value):
result[key] = healthcheck.get(value)
if key == 'test':
if isinstance(result[key], (tuple, list)):
result[key] = [str(e) for e in result[key]]
else:
result[key] = ['CMD-SHELL', str(result[key])]
elif key == 'retries':
try:
result[key] = int(result[key])
except ValueError:
raise ValueError(
'Cannot parse number of retries for healthcheck. '
'Expected an integer, got "{0}".'.format(result[key])
)
if result['test'] == ['NONE']:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
def omit_none_from_dict(d):
"""
Return a copy of the dictionary with all keys with value None omitted.
"""
return dict((k, v) for (k, v) in d.items() if v is not None)
| gpl-3.0 |
fayf/pyload | module/plugins/hoster/MegaCoNz.py | 14 | 8273 | # -*- coding: utf-8 -*-
import array
import os
# import pycurl
import random
import re
from base64 import standard_b64decode
from Crypto.Cipher import AES
from Crypto.Util import Counter
from module.common.json_layer import json_loads, json_dumps
from module.plugins.internal.Hoster import Hoster
from module.utils import decode, fs_decode, fs_encode
############################ General errors ###################################
# EINTERNAL (-1): An internal error has occurred. Please submit a bug report, detailing the exact circumstances in which this error occurred
# EARGS (-2): You have passed invalid arguments to this command
# EAGAIN (-3): (always at the request level) A temporary congestion or server malfunction prevented your request from being processed. No data was altered. Retry. Retries must be spaced with exponential backoff
# ERATELIMIT (-4): You have exceeded your command weight per time quota. Please wait a few seconds, then try again (this should never happen in sane real-life applications)
#
############################ Upload errors ####################################
# EFAILED (-5): The upload failed. Please restart it from scratch
# ETOOMANY (-6): Too many concurrent IP addresses are accessing this upload target URL
# ERANGE (-7): The upload file packet is out of range or not starting and ending on a chunk boundary
# EEXPIRED (-8): The upload target URL you are trying to access has expired. Please request a fresh one
#
############################ Stream/System errors #############################
# ENOENT (-9): Object (typically, node or user) not found
# ECIRCULAR (-10): Circular linkage attempted
# EACCESS (-11): Access violation (e.g., trying to write to a read-only share)
# EEXIST (-12): Trying to create an object that already exists
# EINCOMPLETE (-13): Trying to access an incomplete resource
# EKEY (-14): A decryption operation failed (never returned by the API)
# ESID (-15): Invalid or expired user session, please relogin
# EBLOCKED (-16): User blocked
# EOVERQUOTA (-17): Request over quota
# ETEMPUNAVAIL (-18): Resource temporarily not available, please try again later
# ETOOMANYCONNECTIONS (-19): Too many connections on this resource
# EWRITE (-20): Write failed
# EREAD (-21): Read failed
# EAPPKEY (-22): Invalid application key; request not processed
class MegaCoNz(Hoster):
__name__ = "MegaCoNz"
__type__ = "hoster"
__version__ = "0.31"
__status__ = "testing"
__pattern__ = r'(https?://(?:www\.)?mega(\.co)?\.nz/|mega:|chrome:.+?)#(?P<TYPE>N|)!(?P<ID>[\w^_]+)!(?P<KEY>[\w,-]+)'
__description__ = """Mega.co.nz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "ranan@pyload.org"),
("Walter Purcaro", "vuolter@gmail.com")]
API_URL = "https://eu.api.mega.co.nz/cs"
FILE_SUFFIX = ".crypted"
def b64_decode(self, data):
data = data.replace("-", "+").replace("_", "/")
return standard_b64decode(data + '=' * (-len(data) % 4))
def get_cipher_key(self, key):
"""
Construct the cipher key from the given data
"""
a = array.array("I", self.b64_decode(key))
k = array.array("I", (a[0] ^ a[4], a[1] ^ a[5], a[2] ^ a[6], a[3] ^ a[7]))
iv = a[4:6] + array.array("I", (0, 0))
meta_mac = a[6:8]
return k, iv, meta_mac
def api_response(self, **kwargs):
"""
Dispatch a call to the api, see https://mega.co.nz/#developers
"""
#: Generate a session id, no idea where to obtain elsewhere
uid = random.randint(10 << 9, 10 ** 10)
res = self.load(self.API_URL, get={'id': uid}, post=json_dumps([kwargs]))
self.log_debug("Api Response: " + res)
return json_loads(res)
def decrypt_attr(self, data, key):
k, iv, meta_mac = self.get_cipher_key(key)
cbc = AES.new(k, AES.MODE_CBC, "\0" * 16)
attr = decode(cbc.decrypt(self.b64_decode(data)))
self.log_debug("Decrypted Attr: %s" % attr)
if not attr.startswith("MEGA"):
self.fail(_("Decryption failed"))
#: Data is padded, 0-bytes must be stripped
return json_loads(re.search(r'{.+?}', attr).group(0))
def decrypt_file(self, key):
"""
Decrypts the file at last_download`
"""
#: Upper 64 bit of counter start
n = self.b64_decode(key)[16:24]
#: Convert counter to long and shift bytes
k, iv, meta_mac = self.get_cipher_key(key)
ctr = Counter.new(128, initial_value=long(n.encode("hex"), 16) << 64)
cipher = AES.new(k, AES.MODE_CTR, counter=ctr)
self.pyfile.setStatus("decrypting")
self.pyfile.setProgress(0)
file_crypted = fs_encode(self.last_download)
file_decrypted = file_crypted.rsplit(self.FILE_SUFFIX)[0]
try:
f = open(file_crypted, "rb")
df = open(file_decrypted, "wb")
except IOError, e:
self.fail(e)
chunk_size = 2 ** 15 #: Buffer size, 32k
# file_mac = [0, 0, 0, 0] # calculate CBC-MAC for checksum
chunks = os.path.getsize(file_crypted) / chunk_size + 1
for i in xrange(chunks):
buf = f.read(chunk_size)
if not buf:
break
chunk = cipher.decrypt(buf)
df.write(chunk)
self.pyfile.setProgress(int((100.0 / chunks) * i))
# chunk_mac = [iv[0], iv[1], iv[0], iv[1]]
# for i in xrange(0, chunk_size, 16):
# block = chunk[i:i+16]
# if len(block) % 16:
# block += '=' * (16 - (len(block) % 16))
# block = array.array("I", block)
# chunk_mac = [chunk_mac[0] ^ a_[0], chunk_mac[1] ^ block[1], chunk_mac[2] ^ block[2], chunk_mac[3] ^ block[3]]
# chunk_mac = aes_cbc_encrypt_a32(chunk_mac, k)
# file_mac = [file_mac[0] ^ chunk_mac[0], file_mac[1] ^ chunk_mac[1], file_mac[2] ^ chunk_mac[2], file_mac[3] ^ chunk_mac[3]]
# file_mac = aes_cbc_encrypt_a32(file_mac, k)
self.pyfile.setProgress(100)
f.close()
df.close()
# if file_mac[0] ^ file_mac[1], file_mac[2] ^ file_mac[3] is not meta_mac:
# os.remove(file_decrypted)
# self.fail(_("Checksum mismatch"))
os.remove(file_crypted)
self.last_download = fs_decode(file_decrypted)
def check_error(self, code):
ecode = abs(code)
if ecode in (9, 16, 21):
self.offline()
elif ecode in (3, 13, 17, 18, 19):
self.temp_offline()
elif ecode in (1, 4, 6, 10, 15, 21):
self.retry(5, 30, _("Error code: [%s]") % -ecode)
else:
self.fail(_("Error code: [%s]") % -ecode)
def process(self, pyfile):
pattern = re.match(self.__pattern__, pyfile.url).groupdict()
id = pattern['ID']
key = pattern['KEY']
public = pattern['TYPE'] == ""
self.log_debug("ID: %s" % id, "Key: %s" % key, "Type: %s" % ("public" if public else "node"))
#: G is for requesting a download url
#: This is similar to the calls in the mega js app, documentation is very bad
if public:
mega = self.api_response(a="g", g=1, p=id, ssl=1)[0]
else:
mega = self.api_response(a="g", g=1, n=id, ssl=1)[0]
if isinstance(mega, int):
self.check_error(mega)
elif "e" in mega:
self.check_error(mega['e'])
attr = self.decrypt_attr(mega['at'], key)
pyfile.name = attr['n'] + self.FILE_SUFFIX
pyfile.size = mega['s']
# self.req.http.c.setopt(pycurl.SSL_CIPHER_LIST, "RC4-MD5:DEFAULT")
self.download(mega['g'])
self.decrypt_file(key)
#: Everything is finished and final name can be set
pyfile.name = attr['n']
| gpl-3.0 |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/archiveuploader/tests/test_nascentupload.py | 1 | 5083 | # Copyright 2010-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test NascentUpload functionality."""
__metaclass__ = type
from testtools import TestCase
from testtools.matchers import MatchesStructure
from lp.archiveuploader.changesfile import determine_file_class_and_name
from lp.archiveuploader.nascentupload import (
EarlyReturnUploadError,
NascentUpload,
)
from lp.archiveuploader.tests import (
datadir,
getPolicy,
)
from lp.archiveuploader.uploadpolicy import ArchiveUploadType
from lp.services.log.logger import DevNullLogger
from lp.testing.layers import (
LaunchpadZopelessLayer,
ZopelessDatabaseLayer,
)
class FakeChangesFile:
def __init__(self):
self.files = []
class TestMatchDDEBs(TestCase):
"""Tests that NascentUpload correctly links DEBs to their DDEBs.
Also verifies detection of DDEB-related error cases.
"""
layer = LaunchpadZopelessLayer
def setUp(self):
super(TestMatchDDEBs, self).setUp()
self.changes = FakeChangesFile()
self.upload = NascentUpload(self.changes, None, DevNullLogger())
def addFile(self, filename, comp_and_section='main/devel',
priority='extra'):
"""Add a file of the right type to the upload."""
package, cls = determine_file_class_and_name(filename)
file = cls(
filename, None, 100, comp_and_section, priority, package, '666',
self.changes, None, self.upload.logger)
self.changes.files.append(file)
return file
def assertMatchDDEBErrors(self, error_list):
self.assertEquals(
error_list, [str(e) for e in self.upload._matchDDEBs()])
def testNoLinksWithNoBinaries(self):
# No links will be made if there are no binaries whatsoever.
self.addFile('something_1.0.diff.gz')
self.assertMatchDDEBErrors([])
def testNoLinksWithJustDEBs(self):
# No links will be made if there are no DDEBs.
self.addFile('blah_1.0_all.deb')
self.addFile('libblah_1.0_i386.deb')
self.assertMatchDDEBErrors([])
for file in self.changes.files:
self.assertIs(None, file.ddeb_file)
def testLinksMatchingDDEBs(self):
# DDEBs will be linked to their matching DEBs.
self.addFile('blah_1.0_all.deb')
self.addFile('libblah_1.0_i386.deb')
self.addFile('libblah-dbgsym_1.0_i386.ddeb')
self.addFile('libfooble_1.0_i386.udeb')
self.addFile('libfooble-dbgsym_1.0_i386.ddeb')
self.assertMatchDDEBErrors([])
self.assertIs(None, self.changes.files[0].ddeb_file)
self.assertIs(self.changes.files[2], self.changes.files[1].ddeb_file)
self.assertIs(self.changes.files[1], self.changes.files[2].deb_file)
self.assertIs(None, self.changes.files[2].ddeb_file)
def testDuplicateDDEBsCauseErrors(self):
# An error will be raised if a DEB has more than one matching
# DDEB.
self.addFile('libblah_1.0_i386.deb')
self.addFile('libblah-dbgsym_1.0_i386.ddeb')
self.addFile('libblah-dbgsym_1.0_i386.ddeb')
self.assertMatchDDEBErrors(
['Duplicated debug packages: libblah-dbgsym 666 (i386)'])
def testMismatchedDDEBsCauseErrors(self):
# An error will be raised if a DDEB has no matching DEB.
self.addFile('libblah_1.0_i386.deb')
self.addFile('libblah-dbgsym_1.0_amd64.ddeb')
self.assertMatchDDEBErrors(
['Orphaned debug packages: libblah-dbgsym 666 (amd64)'])
class TestOverrideDDEBs(TestMatchDDEBs):
def test_DDEBsGetOverrideFromDEBs(self):
# Test the basic case ensuring that DDEB files always match the
# DEB's overrides.
deb = self.addFile("foo_1.0_i386.deb", "main/devel", "extra")
ddeb = self.addFile(
"foo-dbgsym_1.0_i386.ddeb", "universe/web", "low")
self.assertMatchDDEBErrors([])
self.upload._overrideDDEBSs()
self.assertThat(
ddeb,
MatchesStructure.fromExample(
deb, "component_name", "section_name", "priority_name"))
class TestNascentUpload(TestCase):
layer = ZopelessDatabaseLayer
def test_hash_mismatch_rejects(self):
# A hash mismatch for any uploaded file will cause the upload to
# be rejected.
policy = getPolicy(
name="sync", distro="ubuntu", distroseries="hoary")
policy.accepted_type = ArchiveUploadType.BINARY_ONLY
upload = NascentUpload.from_changesfile_path(
datadir("suite/badhash_1.0-1/badhash_1.0-1_i386.changes"),
policy, DevNullLogger())
upload.process()
self.assertTrue(upload.is_rejected)
self.assertEqual(
'File badhash_1.0-1_i386.deb mentioned in the changes has a SHA1 '
'mismatch. 2ca33cf32a45852c62b465aaf9063fb7deb31725 != '
'91556113ad38eb35d2fe03d27ae646e0ed487a3d',
upload.rejection_message)
| agpl-3.0 |
iambibhas/django | tests/admin_custom_urls/tests.py | 6 | 5517 | from __future__ import unicode_literals
from django.contrib.admin.utils import quote
from django.core.urlresolvers import reverse
from django.template.response import TemplateResponse
from django.test import TestCase, override_settings
from .models import Action, Person, Car
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_custom_urls.urls',)
class AdminCustomUrlsTest(TestCase):
"""
Remember that:
* The Action model has a CharField PK.
* The ModelAdmin for Action customizes the add_view URL, it's
'<app name>/<model name>/!add/'
"""
fixtures = ['users.json', 'actions.json']
def setUp(self):
self.client.login(username='super', password='secret')
def test_basic_add_GET(self):
"""
Ensure GET on the add_view works.
"""
response = self.client.get('/admin/admin_custom_urls/action/!add/')
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
"""
Ensure GET on the add_view plus specifying a field value in the query
string works.
"""
response = self.client.get('/admin/admin_custom_urls/action/!add/', {'name': 'My Action'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'value="My Action"')
def test_basic_add_POST(self):
"""
Ensure POST on add_view works.
"""
post_data = {
'_popup': '1',
"name": 'Action added through a popup',
"description": "Description of added action",
}
response = self.client.post('/admin/admin_custom_urls/action/!add/', post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddAnotherPopup')
self.assertContains(response, 'Action added through a popup')
def test_admin_URLs_no_clash(self):
"""
Test that some admin URLs work correctly.
"""
# Should get the change_view for model instance with PK 'add', not show
# the add_view
response = self.client.get('/admin/admin_custom_urls/action/add/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Change action')
# Ditto, but use reverse() to build the URL
url = reverse('admin:%s_action_change' % Action._meta.app_label,
args=(quote('add'),))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Change action')
# Should correctly get the change_view for the model instance with the
# funny-looking PK (the one wth a 'path/to/html/document.html' value)
url = reverse('admin:%s_action_change' % Action._meta.app_label,
args=(quote("path/to/html/document.html"),))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Change action')
self.assertContains(response, 'value="path/to/html/document.html"')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_custom_urls.urls',)
class CustomRedirects(TestCase):
fixtures = ['users.json', 'actions.json']
def setUp(self):
self.client.login(username='super', password='secret')
def test_post_save_add_redirect(self):
"""
Ensures that ModelAdmin.response_post_save_add() controls the
redirection after the 'Save' button has been pressed when adding a
new object.
Refs 8001, 18310, 19505.
"""
post_data = {'name': 'John Doe'}
self.assertEqual(Person.objects.count(), 0)
response = self.client.post(
reverse('admin:admin_custom_urls_person_add'), post_data)
persons = Person.objects.all()
self.assertEqual(len(persons), 1)
self.assertRedirects(
response, reverse('admin:admin_custom_urls_person_history', args=[persons[0].pk]))
def test_post_save_change_redirect(self):
"""
Ensures that ModelAdmin.response_post_save_change() controls the
redirection after the 'Save' button has been pressed when editing an
existing object.
Refs 8001, 18310, 19505.
"""
Person.objects.create(name='John Doe')
self.assertEqual(Person.objects.count(), 1)
person = Person.objects.all()[0]
post_data = {'name': 'Jack Doe'}
response = self.client.post(
reverse('admin:admin_custom_urls_person_change', args=[person.pk]), post_data)
self.assertRedirects(
response, reverse('admin:admin_custom_urls_person_delete', args=[person.pk]))
def test_post_url_continue(self):
"""
Ensures that the ModelAdmin.response_add()'s parameter `post_url_continue`
controls the redirection after an object has been created.
"""
post_data = {'name': 'SuperFast', '_continue': '1'}
self.assertEqual(Car.objects.count(), 0)
response = self.client.post(
reverse('admin:admin_custom_urls_car_add'), post_data)
cars = Car.objects.all()
self.assertEqual(len(cars), 1)
self.assertRedirects(
response, reverse('admin:admin_custom_urls_car_history', args=[cars[0].pk]))
| bsd-3-clause |
primiano/blink-gitcs | Source/bindings/scripts/compute_interfaces_info_individual.py | 5 | 13781 | #!/usr/bin/python
#
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compute global interface information for individual IDL files.
Auxiliary module for compute_interfaces_info_overall, which consolidates
this individual information, computing info that spans multiple files
(dependencies and ancestry).
This distinction is so that individual interface info can be computed
separately for each component (avoiding duplicated reading of individual
files), then consolidated using *only* the info visible to a given component.
Design doc: http://www.chromium.org/developers/design-documents/idl-build
"""
from collections import defaultdict
import optparse
import os
import posixpath
import sys
from idl_reader import IdlReader
from utilities import get_file_contents, read_file_to_list, idl_filename_to_interface_name, idl_filename_to_component, write_pickle_file, get_interface_extended_attributes_from_idl, is_callback_interface_from_idl
module_path = os.path.dirname(__file__)
source_path = os.path.normpath(os.path.join(module_path, os.pardir, os.pardir))
class IdlBadFilenameError(Exception):
"""Raised if an IDL filename disagrees with the interface name in the file."""
pass
def parse_options():
usage = 'Usage: %prog [options] [generated1.idl]...'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--cache-directory', help='cache directory')
parser.add_option('--idl-files-list', help='file listing IDL files')
parser.add_option('--interfaces-info-file', help='interface info pickle file')
parser.add_option('--component-info-file', help='component wide info pickle file')
parser.add_option('--write-file-only-if-changed', type='int', help='if true, do not write an output file if it would be identical to the existing one, which avoids unnecessary rebuilds in ninja')
options, args = parser.parse_args()
if options.interfaces_info_file is None:
parser.error('Must specify an output file using --interfaces-info-file.')
if options.idl_files_list is None:
parser.error('Must specify a file listing IDL files using --idl-files-list.')
if options.write_file_only_if_changed is None:
parser.error('Must specify whether file is only written if changed using --write-file-only-if-changed.')
options.write_file_only_if_changed = bool(options.write_file_only_if_changed)
return options, args
################################################################################
# Computations
################################################################################
def relative_dir_posix(idl_filename):
"""Returns relative path to the directory of idl_file in POSIX format."""
relative_path_local = os.path.relpath(idl_filename, source_path)
relative_dir_local = os.path.dirname(relative_path_local)
return relative_dir_local.replace(os.path.sep, posixpath.sep)
def include_path(idl_filename, implemented_as=None):
"""Returns relative path to header file in POSIX format; used in includes.
POSIX format is used for consistency of output, so reference tests are
platform-independent.
"""
relative_dir = relative_dir_posix(idl_filename)
# IDL file basename is used even if only a partial interface file
idl_file_basename, _ = os.path.splitext(os.path.basename(idl_filename))
cpp_class_name = implemented_as or idl_file_basename
return posixpath.join(relative_dir, cpp_class_name + '.h')
def get_implements_from_definitions(definitions, definition_name):
left_interfaces = []
right_interfaces = []
for implement in definitions.implements:
if definition_name == implement.left_interface:
right_interfaces.append(implement.right_interface)
elif definition_name == implement.right_interface:
left_interfaces.append(implement.left_interface)
else:
raise IdlBadFilenameError(
'implements statement found in unrelated IDL file.\n'
'Statement is:\n'
' %s implements %s;\n'
'but filename is unrelated "%s.idl"' %
(implement.left_interface, implement.right_interface, definition_name))
return left_interfaces, right_interfaces
def get_put_forward_interfaces_from_definition(definition):
return sorted(set(attribute.idl_type.base_type
for attribute in definition.attributes
if 'PutForwards' in attribute.extended_attributes))
def collect_union_types_from_definitions(definitions):
"""Traverse definitions and collect all union types."""
def union_types_from(things):
return (thing.idl_type for thing in things
if thing.idl_type.is_union_type)
this_union_types = set()
for interface in definitions.interfaces.itervalues():
this_union_types.update(union_types_from(interface.attributes))
for operation in interface.operations:
this_union_types.update(union_types_from(operation.arguments))
if operation.idl_type.is_union_type:
this_union_types.add(operation.idl_type)
for constructor in interface.constructors:
this_union_types.update(union_types_from(constructor.arguments))
for constructor in interface.custom_constructors:
this_union_types.update(union_types_from(constructor.arguments))
for callback_function in definitions.callback_functions.itervalues():
this_union_types.update(union_types_from(callback_function.arguments))
if callback_function.idl_type.is_union_type:
this_union_types.add(callback_function.idl_type)
for dictionary in definitions.dictionaries.itervalues():
this_union_types.update(union_types_from(dictionary.members))
for idl_type in definitions.typedefs.itervalues():
if idl_type.is_union_type:
this_union_types.add(idl_type)
return this_union_types
class InterfaceInfoCollector(object):
"""A class that collects interface information from idl files."""
def __init__(self, cache_directory=None):
self.reader = IdlReader(interfaces_info=None, outputdir=cache_directory)
self.interfaces_info = {}
self.partial_interface_files = defaultdict(lambda: {
'full_paths': [],
'include_paths': [],
})
self.union_types = set()
self.typedefs = {}
def add_paths_to_partials_dict(self, partial_interface_name, full_path,
include_paths):
paths_dict = self.partial_interface_files[partial_interface_name]
paths_dict['full_paths'].append(full_path)
paths_dict['include_paths'].extend(include_paths)
def collect_info(self, idl_filename):
"""Reads an idl file and collects information which is required by the
binding code generation."""
definitions = self.reader.read_idl_file(idl_filename)
if definitions.interfaces:
definition = next(definitions.interfaces.itervalues())
interface_info = {
'is_callback_interface': definition.is_callback,
'is_dictionary': False,
# Interfaces that are referenced (used as types) and that we
# introspect during code generation (beyond interface-level
# data ([ImplementedAs], is_callback_interface, ancestors, and
# inherited extended attributes): deep dependencies.
# These cause rebuilds of referrers, due to the dependency,
# so these should be minimized; currently only targets of
# [PutForwards].
'referenced_interfaces': get_put_forward_interfaces_from_definition(definition),
}
elif definitions.dictionaries:
definition = next(definitions.dictionaries.itervalues())
interface_info = {
'is_callback_interface': False,
'is_dictionary': True,
'referenced_interfaces': None,
}
else:
raise Exception('IDL file must contain one interface or dictionary')
this_union_types = collect_union_types_from_definitions(definitions)
self.union_types.update(this_union_types)
self.typedefs.update(definitions.typedefs)
extended_attributes = definition.extended_attributes
implemented_as = extended_attributes.get('ImplementedAs')
full_path = os.path.realpath(idl_filename)
this_include_path = None if 'NoImplHeader' in extended_attributes else include_path(idl_filename, implemented_as)
if definition.is_partial:
# We don't create interface_info for partial interfaces, but
# adds paths to another dict.
partial_include_paths = []
if this_include_path:
partial_include_paths.append(this_include_path)
if this_union_types:
component = idl_filename_to_component(idl_filename)
partial_include_paths.append(
'bindings/%s/v8/UnionTypes%s.h' % (component, component.capitalize()))
self.add_paths_to_partials_dict(definition.name, full_path, partial_include_paths)
return
# 'implements' statements can be included in either the file for the
# implement*ing* interface (lhs of 'implements') or implement*ed* interface
# (rhs of 'implements'). Store both for now, then merge to implement*ing*
# interface later.
left_interfaces, right_interfaces = get_implements_from_definitions(
definitions, definition.name)
interface_info.update({
'extended_attributes': extended_attributes,
'full_path': full_path,
'has_union_types': bool(this_union_types),
'implemented_as': implemented_as,
'implemented_by_interfaces': left_interfaces,
'implements_interfaces': right_interfaces,
'include_path': this_include_path,
# FIXME: temporary private field, while removing old treatement of
# 'implements': http://crbug.com/360435
'is_legacy_treat_as_partial_interface': 'LegacyTreatAsPartialInterface' in extended_attributes,
'parent': definition.parent,
'relative_dir': relative_dir_posix(idl_filename),
})
self.interfaces_info[definition.name] = interface_info
def get_info_as_dict(self):
"""Returns info packaged as a dict."""
return {
'interfaces_info': self.interfaces_info,
# Can't pickle defaultdict, convert to dict
# FIXME: this should be included in get_component_info.
'partial_interface_files': dict(self.partial_interface_files),
}
def get_component_info_as_dict(self):
"""Returns component wide information as a dict."""
return {
'typedefs': self.typedefs,
'union_types': self.union_types,
}
################################################################################
def main():
options, args = parse_options()
# Static IDL files are passed in a file (generated at GYP time), due to OS
# command line length limits
idl_files = read_file_to_list(options.idl_files_list)
# Generated IDL files are passed at the command line, since these are in the
# build directory, which is determined at build time, not GYP time, so these
# cannot be included in the file listing static files
idl_files.extend(args)
# Compute information for individual files
# Information is stored in global variables interfaces_info and
# partial_interface_files.
info_collector = InterfaceInfoCollector(options.cache_directory)
for idl_filename in idl_files:
info_collector.collect_info(idl_filename)
write_pickle_file(options.interfaces_info_file,
info_collector.get_info_as_dict(),
options.write_file_only_if_changed)
write_pickle_file(options.component_info_file,
info_collector.get_component_info_as_dict(),
options.write_file_only_if_changed)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
pwong-mapr/private-hue | desktop/core/ext-py/elementtree/setup.py | 45 | 1306 | #!/usr/bin/env python
#
# Setup script for the elementtree library
# $Id: setup.py 2326 2005-03-17 07:45:21Z fredrik $
#
# Usage: python setup.py install
#
from distutils.core import setup
try:
# add download_url syntax to distutils
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
except:
pass
DESCRIPTION="ElementTree - a light-weight XML object model for Python."
LONG_DESCRIPTION="""\
The Element type is a flexible container object, designed to store
hierarchical data structures in memory. Element structures can be
converted to and from XML."""
setup(
name="elementtree",
version="1.2.6-20050316",
author="Fredrik Lundh",
author_email="fredrik@pythonware.com",
url="http://effbot.org/zone/element-index.htm",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
download_url="http://effbot.org/downloads#elementtree",
license="Python (MIT style)",
packages=["elementtree"],
platforms="Python 1.5.2 and later.",
classifiers=[
"Development Status :: 6 - Mature",
"Operating System :: OS Independent",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML",
]
)
| apache-2.0 |
savoirfairelinux/OpenUpgrade | addons/website_mail/models/mail_message.py | 27 | 4601 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
from openerp.tools.translate import _
from openerp.osv import osv, fields, expression
class MailMessage(osv.Model):
_inherit = 'mail.message'
def _get_description_short(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, False)
for message in self.browse(cr, uid, ids, context=context):
if message.subject:
res[message.id] = message.subject
else:
plaintext_ct = html2plaintext(message.body)
res[message.id] = plaintext_ct[:30] + '%s' % (' [...]' if len(plaintext_ct) >= 30 else '')
return res
_columns = {
'description': fields.function(
_get_description_short, type='char',
help='Message description: either the subject, or the beginning of the body'
),
'website_published': fields.boolean(
'Published', help="Visible on the website as a comment"
),
}
def default_get(self, cr, uid, fields_list, context=None):
defaults = super(MailMessage, self).default_get(cr, uid, fields_list, context=context)
# Note: explicitly implemented in default_get() instead of _defaults,
# to avoid setting to True for all existing messages during upgrades.
# TODO: this default should probably be dynamic according to the model
# on which the messages are attached, thus moved to create().
if 'website_published' in fields_list:
defaults.setdefault('website_published', True)
return defaults
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to restrict
messages to published messages for public users. """
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
args = expression.AND([[('website_published', '=', True)], list(args)])
return super(MailMessage, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Add Access rules of mail.message for non-employee user:
- read:
- raise if the type is comment and subtype NULL (internal note)
"""
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
cr.execute('SELECT id FROM "%s" WHERE website_published IS FALSE AND id = ANY (%%s)' % (self._table), (ids,))
if cr.fetchall():
raise osv.except_osv(
_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % (self._description, operation))
return super(MailMessage, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context)
| agpl-3.0 |
DURAARK/duraark-pointcloud-viewer | lastools/ArcGIS_toolbox/scripts_production/lasboundaryPro.py | 1 | 5636 | #
# lasboundaryPro.py
#
# (c) 2013, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# uses lasboundary.exe to compute a boundary polygon for the
# points. This is a concave hull of the points with a "concavity"
# defined by the user that is 50 m (or 150 ft) by default. It is
# usually a single polygon where "islands of points" are connected
# by edges that are traversed in each direction once, but the user
# can chose to generate a disjoint set of polygons as well.
#
# LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM
# vector output: SHP/WKT/KML/TXT
#
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lasboundary production ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lasboundary executable
lasboundary_path = lastools_path+"\\lasboundary.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lasboundary.exe at " + lasboundary_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasboundary_path + " ...")
### create the command string for lasboundary.exe
command = ['"'+lasboundary_path+'"']
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### counting up the arguments
c = 1
### add input LiDAR
wildcards = sys.argv[c+1].split()
for wildcard in wildcards:
command.append("-i")
command.append('"' + sys.argv[c] + "\\" + wildcard + '"')
c = c + 2
### maybe we should merge all files into one
if sys.argv[c] == "true":
command.append("-merged")
c = c + 1
### maybe use a user-specified concavity
if sys.argv[c] != "50":
command.append("-concavity")
command.append(sys.argv[c].replace(",","."))
### maybe thin with grid first
if sys.argv[c+1] == "true":
command.append("-thin_with_grid")
command.append(str(float(sys.argv[c].replace(",","."))/4))
c = c + 2
### what should we contour
if sys.argv[c] == "ground points only":
command.append("-keep_class")
command.append("2")
elif sys.argv[c] == "vegetation":
command.append("-keep_class")
command.append("3")
command.append("4")
command.append("5")
elif sys.argv[c] == "buildings":
command.append("-keep_class")
command.append("6")
elif sys.argv[c] == "keypoints":
command.append("-keep_class")
command.append("8")
elif sys.argv[c] == "water":
command.append("-keep_class")
command.append("9")
elif sys.argv[c] == "overlap points":
command.append("-keep_class")
command.append("12")
c = c + 1
### maybe discover disjoint polygons
if sys.argv[c] == "true":
command.append("-disjoint")
c = c + 1
### maybe report holes in the interior
if sys.argv[c] == "true":
command.append("-holes")
c = c + 1
### maybe an output format was selected
if sys.argv[c] != "#":
command.append("-o" + sys.argv[c])
c = c + 1
### maybe an output file name was selected
if sys.argv[c] != "#":
command.append("-o")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output directory was selected
if sys.argv[c] != "#":
command.append("-odir")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output appendix was selected
if sys.argv[c] != "#":
command.append("-odix")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe we should run on multiple cores
if sys.argv[c] != "1":
command.append("-cores")
command.append(sys.argv[c])
c = c + 1
### maybe there are additional input options
if sys.argv[c] != "#":
additional_options = sys.argv[c].split()
for option in additional_options:
command.append(option)
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasboundary
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lasboundary failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lasboundary done.")
| mit |
zhhf/charging | charging/tests/unit/ml2/drivers/brocade/test_brocade_mechanism_driver.py | 5 | 2414 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import config as ml2_config
from neutron.plugins.ml2.drivers.brocade import (mechanism_brocade
as brocademechanism)
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
MECHANISM_NAME = ('neutron.plugins.ml2.'
'drivers.brocade.mechanism_brocade.BrocadeMechanism')
class TestBrocadeMechDriverV2(test_db_plugin.NeutronDbPluginV2TestCase):
"""Test Brocade VCS/VDX mechanism driver.
"""
_mechanism_name = MECHANISM_NAME
def setUp(self):
_mechanism_name = MECHANISM_NAME
ml2_opts = {
'mechanism_drivers': ['brocade'],
'tenant_network_types': ['vlan']}
for opt, val in ml2_opts.items():
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
self.addCleanup(ml2_config.cfg.CONF.reset)
def mocked_brocade_init(self):
self._driver = mock.MagicMock()
with mock.patch.object(brocademechanism.BrocadeMechanism,
'brocade_init', new=mocked_brocade_init):
super(TestBrocadeMechDriverV2, self).setUp()
self.mechanism_driver = importutils.import_object(_mechanism_name)
class TestBrocadeMechDriverNetworksV2(test_db_plugin.TestNetworksV2,
TestBrocadeMechDriverV2):
pass
class TestBrocadeMechDriverPortsV2(test_db_plugin.TestPortsV2,
TestBrocadeMechDriverV2):
pass
class TestBrocadeMechDriverSubnetsV2(test_db_plugin.TestSubnetsV2,
TestBrocadeMechDriverV2):
pass
| apache-2.0 |
letuananh/beautifulsoup | bs4-python3/tests/test_lxml.py | 16 | 2962 | """Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError as e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual("<b/>", str(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
def test_real_xhtml_document(self):
"""lxml strips the XML definition from an XHTML doc, which is fine."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b''),
markup.replace(b'\n', b'').replace(
b'<?xml version="1.0" encoding="utf-8"?>', b''))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
| mit |
yongtang/tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py | 14 | 12091 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_block_lower_triangular as block_lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _block_lower_triangular_dense(expected_shape, blocks):
"""Convert a list of blocks into a dense blockwise lower-triangular matrix."""
rows = []
num_cols = 0
for row_blocks in blocks:
# Get the batch shape for the block.
batch_row_shape = array_ops.shape(row_blocks[0])[:-1]
num_cols += array_ops.shape(row_blocks[-1])[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape, [expected_shape[-2] - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
zeros_to_pad_after_shape, dtype=row_blocks[-1].dtype)
row_blocks.append(zeros_to_pad_after)
rows.append(array_ops.concat(row_blocks, axis=-1))
return array_ops.concat(rows, axis=-2)
@test_util.run_all_in_graph_and_eager_modes
class SquareLinearOperatorBlockLowerTriangularTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-5
self._atol[dtypes.float32] = 1e-5
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
super(SquareLinearOperatorBlockLowerTriangularTest, self).setUp()
@staticmethod
def use_blockwise_arg():
return True
@staticmethod
def skip_these_tests():
# Skipping since `LinearOperatorBlockLowerTriangular` is in general not
# self-adjoint.
return ["cholesky", "eigvalsh"]
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((5, 5), blocks=[[(2, 2)], [(3, 2), (3, 3)]]),
shape_info((3, 7, 7),
blocks=[[(1, 2, 2)], [(1, 3, 2), (3, 3, 3)],
[(1, 2, 2), (1, 2, 3), (1, 2, 2)]]),
shape_info((2, 4, 6, 6),
blocks=[[(2, 1, 2, 2)], [(1, 4, 2), (4, 4, 4)]]),
]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
expected_blocks = (
shape_info.__dict__["blocks"] if "blocks" in shape_info.__dict__
else [[list(shape_info.shape)]])
matrices = []
for i, row_shapes in enumerate(expected_blocks):
row = []
for j, block_shape in enumerate(row_shapes):
if i == j: # operator is on the diagonal
row.append(
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True))
else:
row.append(
linear_operator_test_util.random_normal(block_shape, dtype=dtype))
matrices.append(row)
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [[
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in row] for row in matrices]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix( # pylint:disable=g-complex-comprehension
l,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
for l in row] for row in lin_op_matrices])
# Should be auto-set.
self.assertTrue(operator.is_square)
# Broadcast the shapes.
expected_shape = list(shape_info.shape)
broadcasted_matrices = linear_operator_util.broadcast_matrix_batch_dims(
[op for row in matrices for op in row]) # pylint: disable=g-complex-comprehension
matrices = [broadcasted_matrices[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]
for i in range(len(matrices))]
block_lower_triangular_dense = _block_lower_triangular_dense(
expected_shape, matrices)
if not use_placeholder:
block_lower_triangular_dense.set_shape(expected_shape)
return operator, block_lower_triangular_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix)]],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_block_lower_triangular_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)],
[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)]],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
block_lower_triangular.LinearOperatorBlockLowerTriangular)
self.assertEqual(2, len(inverse.operators))
self.assertEqual(1, len(inverse.operators[0]))
self.assertEqual(2, len(inverse.operators[1]))
def test_tape_safe(self):
operator_1 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[1., 0.], [0., 1.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator_2 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[2., 0.], [1., 0.]]))
operator_3 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[3., 1.], [1., 3.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_self_adjoint=False,
is_positive_definite=True)
diagonal_grads_only = ["diag_part", "trace", "determinant",
"log_abs_determinant"]
self.check_tape_safe(operator, skip_options=diagonal_grads_only)
for y in diagonal_grads_only:
for diag_block in [operator_1, operator_3]:
with backprop.GradientTape() as tape:
grads = tape.gradient(getattr(operator, y)(), diag_block.variables)
for item in grads:
self.assertIsNotNone(item)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_3 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegex(ValueError, "always non-singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]], is_non_singular=False)
operator_4 = linalg.LinearOperatorFullMatrix(
[[1., 0.], [2., 0.]], is_non_singular=False)
# A singular operator off of the main diagonal shouldn't raise
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_4, operator_2]], is_non_singular=True)
with self.assertRaisesRegex(ValueError, "always singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_4]], is_non_singular=True)
def test_different_dtypes_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))]
]
with self.assertRaisesRegex(TypeError, "same dtype"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_non_square_operator_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4), is_square=False)],
[linalg.LinearOperatorFullMatrix(rng.rand(4, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(4, 4))]
]
with self.assertRaisesRegex(ValueError, "must be square"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegex(ValueError, "non-empty"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([])
def test_operators_wrong_length_raises(self):
with self.assertRaisesRegex(ValueError, "must contain `i` blocks"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))
for _ in range(3)]])
def test_operators_mismatched_dimension_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(3, 3))]
]
with self.assertRaisesRegex(ValueError, "must be equal"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_incompatible_input_blocks_raises(self):
matrix_1 = array_ops.placeholder_with_default(rng.rand(4, 4), shape=None)
matrix_2 = array_ops.placeholder_with_default(rng.rand(3, 4), shape=None)
matrix_3 = array_ops.placeholder_with_default(rng.rand(3, 3), shape=None)
operators = [
[linalg.LinearOperatorFullMatrix(matrix_1, is_square=True)],
[linalg.LinearOperatorFullMatrix(matrix_2),
linalg.LinearOperatorFullMatrix(matrix_3, is_square=True)]
]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
operators)
x = np.random.rand(2, 4, 5).tolist()
msg = ("dimension does not match" if context.executing_eagerly()
else "input structure is ambiguous")
with self.assertRaisesRegex(ValueError, msg):
operator.matmul(x)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
SquareLinearOperatorBlockLowerTriangularTest)
test.main()
| apache-2.0 |
anand-c-goog/tensorflow | tensorflow/python/kernel_tests/array_ops_test.py | 3 | 33485 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
def testNonBatchMatrix(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
transposed = tf.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
def testBatchMatrix(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
transposed = tf.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
def testNonBatchMatrixDynamicallyDefined(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
matrix_ph = tf.placeholder(tf.int32)
transposed = tf.matrix_transpose(matrix_ph)
self.assertAllEqual(
expected_transposed,
transposed.eval(feed_dict={matrix_ph: matrix}))
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
batch_matrix_ph = tf.placeholder(tf.int32)
transposed = tf.matrix_transpose(batch_matrix_ph)
self.assertAllEqual(
expected_transposed,
transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
vector = [1, 2, 3]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "should be a "):
tf.matrix_transpose(vector)
class BooleanMaskTest(test_util.TensorFlowTestCase):
def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None):
"""Check equivalence between boolean_mask and numpy masking."""
if make_mask is None:
make_mask = lambda shape: np.random.randint(0, 2, size=shape).astype(bool)
arr = np.random.rand(*arr_shape)
mask = make_mask(arr_shape[: ndims_mask])
masked_arr = arr[mask]
with self.test_session():
masked_tensor = array_ops.boolean_mask(arr, mask)
np.testing.assert_allclose(
masked_arr,
masked_tensor.eval(),
err_msg="masked_arr:\n%s\n\nmasked_tensor:\n%s" % (
masked_arr, masked_tensor.eval()))
masked_tensor.get_shape().assert_is_compatible_with(masked_arr.shape)
self.assertSequenceEqual(
masked_tensor.get_shape()[1:].as_list(),
masked_arr.shape[1:],
msg="shape information lost %s -> %s" % (
masked_arr.shape, masked_tensor.get_shape()))
def testOneDimensionalMask(self):
# Do 1d separately because it's the only easy one to debug!
ndims_mask = 1
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testMultiDimensionalMask(self):
for ndims_mask in range(1, 4):
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape)
def testEmptyOutput(self):
make_mask = lambda shape: np.zeros(shape, dtype=bool)
for ndims_mask in range(1, 4):
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask)
def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.test_session() as sess:
ph_tensor = array_ops.placeholder(dtypes.int32, shape=None)
ph_mask = array_ops.placeholder(dtypes.bool, shape=[None])
arr = np.array([[1, 2], [3, 4]])
mask = np.array([False, True])
masked_tensor = sess.run(
array_ops.boolean_mask(ph_tensor, ph_mask),
feed_dict={ph_tensor: arr, ph_mask: mask})
np.testing.assert_allclose(masked_tensor, arr[mask])
def testMaskDimensionsSetToNoneRaises(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.test_session():
tensor = array_ops.placeholder(dtypes.int32, shape=[None, 2])
mask = array_ops.placeholder(dtypes.bool, shape=None)
with self.assertRaisesRegexp(ValueError, "dimensions must be specified"):
array_ops.boolean_mask(tensor, mask)
def testMaskHasMoreDimsThanTensorRaises(self):
mask = [[True, True], [False, False]]
tensor = [1, 2, 3, 4]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskIsScalarRaises(self):
mask = True
tensor = 1
with self.test_session():
with self.assertRaisesRegexp(ValueError, "mask.*scalar"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
mask = [True, True, True]
tensor = [[1, 2], [3, 4]]
with self.test_session():
with self.assertRaisesRegexp(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
class OperatorShapeTest(test_util.TensorFlowTestCase):
def testExpandScalar(self):
scalar = "hello"
scalar_expanded = array_ops.expand_dims(scalar, [0])
self.assertEqual(scalar_expanded.get_shape(), (1,))
def testSqueezeScalar(self):
scalar = "hello"
scalar_squeezed = array_ops.squeeze(scalar, ())
self.assertEqual(scalar_squeezed.get_shape(), ())
def testSqueezeMatrix(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, [0])
self.assertEqual(matrix_squeezed.get_shape(), (3))
with self.assertRaises(ValueError):
matrix_squeezed = array_ops.squeeze(matrix, [1])
def testSqueezeScalarDim(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, 0)
self.assertEqual(matrix_squeezed.get_shape(), (3))
class ReverseTest(test_util.TensorFlowTestCase):
def testReverse0DimAuto(self):
x_np = 4
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse(x_np, []).eval()
self.assertAllEqual(x_tf, x_np)
def _reverse1DimAuto(self, np_dtype):
x_np = np.array([1, 2, 3, 4, 5], dtype=np_dtype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse(x_np, [True]).eval()
self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
def testReverse1DimAuto(self):
for dtype in [np.uint8, np.int8, np.int32, np.int64, np.bool, np.float16,
np.float32, np.float64, np.complex64, np.complex128]:
self._reverse1DimAuto(dtype)
def testUnknownDims(self):
data_t = tf.placeholder(tf.float32)
dims_known_t = tf.placeholder(tf.bool, shape=[3])
reverse_known_t = tf.reverse(data_t, dims_known_t)
self.assertEqual(3, reverse_known_t.get_shape().ndims)
dims_unknown_t = tf.placeholder(tf.bool)
reverse_unknown_t = tf.reverse(data_t, dims_unknown_t)
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
data_2d_t = tf.placeholder(tf.float32, shape=[None, None])
dims_2d_t = tf.placeholder(tf.bool, shape=[2])
reverse_2d_t = tf.reverse(data_2d_t, dims_2d_t)
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
dims_3d_t = tf.placeholder(tf.bool, shape=[3])
with self.assertRaisesRegexp(ValueError, "must be rank 3"):
tf.reverse(data_2d_t, dims_3d_t)
class ReverseV2Test(test_util.TensorFlowTestCase):
def testReverse0DimAuto(self):
x_np = 4
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np, []).eval()
self.assertAllEqual(x_tf, x_np)
def _reverse1DimAuto(self, np_dtype):
x_np = np.array([1, 2, 3, 4, 5], dtype=np_dtype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np, [0]).eval()
self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
def _reverse2DimAuto(self, np_dtype):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np_dtype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf_1 = array_ops.reverse_v2(x_np, [0]).eval()
x_tf_2 = array_ops.reverse_v2(x_np, [-2]).eval()
x_tf_3 = array_ops.reverse_v2(x_np, [1]).eval()
x_tf_4 = array_ops.reverse_v2(x_np, [-1]).eval()
x_tf_5 = array_ops.reverse_v2(x_np, [1, 0]).eval()
self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
# This is the version of reverse that uses axis indices rather than
# bool tensors
# TODO(b/32254538): Change this test to use array_ops.reverse
def testInvalid(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
with self.test_session():
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [-30]).eval()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"is out of valid range"):
array_ops.reverse_v2(x_np, [2]).eval()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"axis 0 specified more than once"):
array_ops.reverse_v2(x_np, [0, -2]).eval()
def testReverse1DimAuto(self):
for dtype in [
np.uint8, np.int8, np.int32, np.int64, np.bool, np.float16, np.float32,
np.float64, np.complex64, np.complex128
]:
self._reverse1DimAuto(dtype)
def testReverse2DimAuto(self):
for dtype in [
np.uint8, np.int8, np.int32, np.int64, np.bool, np.float16, np.float32,
np.float64, np.complex64, np.complex128
]:
self._reverse2DimAuto(dtype)
def testUnknownDims(self):
reverse_v2 = array_ops.reverse_v2
data_t = tf.placeholder(tf.float32)
axis_known_t = tf.placeholder(tf.int32, shape=[3])
reverse_known_t = reverse_v2(data_t, axis_known_t)
# Unlike V1 we cannot know this anymore
self.assertEqual(None, reverse_known_t.get_shape().ndims)
axis_unknown_t = tf.placeholder(tf.int32)
reverse_unknown_t = reverse_v2(data_t, axis_unknown_t)
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
data_2d_t = tf.placeholder(tf.float32, shape=[None, None])
axis_2d_t = tf.placeholder(tf.int32, shape=[3])
reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t)
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
class MeshgridTest(test_util.TensorFlowTestCase):
def _compareDiff(self, x, y, use_gpu):
for index in ('ij', 'xy'):
numpy_out = np.meshgrid(x, y, indexing=index)
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.test_session(use_gpu=use_gpu):
for xx, yy in zip(numpy_out, tf_out):
self.assertAllEqual(xx, yy.eval())
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
for index in ('ij', 'xy'):
for i in range(n):
x = np.linspace(-10, 10, 5).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
x += 1j
inputs.append(x)
numpy_out = np.meshgrid(*inputs, indexing=index)
with self.test_session(use_gpu=use_gpu):
tf_out = array_ops.meshgrid(*inputs, indexing=index)
for X, _X in zip(numpy_out, tf_out):
self.assertAllEqual(X, _X.eval())
def testCompare(self):
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128):
self._compareDiffType(2, t, False)
self._compareDiffType(3, t, False)
x = [1, 2, 3]
y = [4, 5]
a = [[1, 1], [1, 1]]
self._compareDiff(x, y, False)
self._compareDiff(x, a, False)
class StridedSliceChecker(object):
"""Check a given tensor against the numpy result."""
REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
def __init__(self, test, x, tensor_type=tf.int32, check_type_infer=True):
self.test = test
self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
self.x_np = np.array(x)
self.check_type_infer = check_type_infer
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
if not isinstance(spec, (list, tuple)):
spec = [spec]
tensor = op.eval()
# Make a numpy spec that pre-evals the tensors
np_specs = []
def eval_if_tensor(x):
try:
return x.eval()
except AttributeError:
return x
for s in spec:
if isinstance(s, slice):
start = eval_if_tensor(s.start)
stop = eval_if_tensor(s.stop)
step = eval_if_tensor(s.step)
np_specs.append(slice(start, stop, step))
else:
np_specs.append(eval_if_tensor(s))
self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor)
if self.check_type_infer:
self.test.assertAllEqual(tensor.shape, op.get_shape())
return tensor
class StridedSliceTest(test_util.TensorFlowTestCase):
"""Test the strided slice operation with variants of slices."""
def test_basic_slice(self):
for tensor_type in [tf.int32, tf.int64, tf.int16, tf.int8, tf.float32,
tf.float64]:
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
checker = StridedSliceChecker(
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
_ = checker[:, :, :]
# Various ways of representing identity slice
_ = checker[:, :, :]
_ = checker[::, ::, ::]
_ = checker[::1, ::1, ::1]
# Not zero slice
_ = checker[::1, ::5, ::2]
# Reverse in each dimension independently
_ = checker[::-1, :, :]
_ = checker[:, ::-1, :]
_ = checker[:, :, ::-1]
## negative index tests i.e. n-2 in first component
_ = checker[-2::-1, :, ::1]
# negative index tests i.e. n-2 in first component, non-unit stride
_ = checker[-2::-1, :, ::2]
# Check rank-0 examples
checker2 = StridedSliceChecker(self, 5, tensor_type=tf.int32)
_ = checker2[None]
_ = checker2[...]
_ = checker2[tuple()]
def testDegenerateSlices(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
# degenerate by offering a forward interval with a negative stride
_ = checker[0:-1:-1, :, :]
# degenerate with a reverse interval with a positive stride
_ = checker[-1:0, :, :]
# empty interval in every dimension
_ = checker[-1:0, 2:2, 2:3:-1]
def testEllipsis(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[0:]
# implicit ellipsis
_ = checker[0:, ...]
# ellipsis alone
_ = checker[...]
# ellipsis at end
_ = checker[0:1, ...]
# ellipsis at begin
_ = checker[..., 0:1]
# ellipsis at middle
_ = checker[0:1, ..., 0:1]
# multiple ellipses not allowed
with self.assertRaisesRegexp(ValueError, "Multiple ellipses"):
_ = checker[..., :, ...].eval()
def testShrink(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[:, :, :, :, 3]
_ = checker[..., 3]
_ = checker[:, 0]
_ = checker[:, :, 0]
def testTensorIndexing(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw, check_type_infer=False)
bar = tf.constant(2)
bar2 = tf.constant(3)
_ = checker[..., bar:bar2]
_ = checker[..., bar]
with self.assertRaisesRegexp(TypeError,
"DataType float32 for attr 'Index'"):
_ = checker[..., 3.0]
_ = checker[..., 3]
def testExpand(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
# new axis (followed by implicit ellipsis)
_ = checker[np.newaxis]
# newaxis after ellipsis
_ = checker[..., np.newaxis]
# newaxis in between ellipsis and explicit range
_ = checker[..., np.newaxis, :]
_ = checker[:, ..., np.newaxis, :, :]
# Reverse final dimension with new axis
_ = checker[:, :, np.newaxis, :, 2::-1]
# Ellipsis in middle of two newaxis
_ = checker[np.newaxis, ..., np.newaxis]
def testExpandVariable(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
x = tf.Variable(7, dtype=tf.int32)
x.initializer.run()
y = x[None].eval()
self.assertEqual(y.shape, (1,))
self.assertAllEqual(y, (7,))
def testOptimizedCases(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
checker = StridedSliceChecker(self,
StridedSliceChecker.REF_TENSOR_ALIGNED)
# Identity
_ = checker[:]
# Identity
_ = checker[...]
# Identity
_ = checker[np.newaxis, ..., np.newaxis]
# First axis slice
_ = checker[1:]
# First axis slice
_ = checker[np.newaxis, 1:]
class StridedSliceShapeChecker(object):
def __init__(self, x):
self.x = x
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
return op.get_shape()
class StridedSliceShapeTest(test_util.TensorFlowTestCase):
"""Test the shape inference of StridedSliceShapes."""
def testUnknown(self):
with self.test_session(use_gpu=False):
uncertain_tensor = tf.placeholder(tf.float32)
a = StridedSliceShapeChecker(uncertain_tensor)
a_slice_shape = a[...]
self.assertAllEqual(a_slice_shape.ndims, None)
def tensorShapeEqual(self, x, y):
self.assertTrue(x is not None and y is not None or x is None and y is None)
self.assertEqual(x.as_list(), y.as_list())
def testTensorShapeUncertain(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
uncertain_tensor = tf.placeholder(tf.float32, shape=(5, None, 7))
a = StridedSliceShapeChecker(uncertain_tensor)
self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7]))
self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, 3:4, 4],
tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, :, 5:10],
tensor_shape.TensorShape([2, None, 2]))
self.tensorShapeEqual(a[3:5, :, 50:3],
tensor_shape.TensorShape([2, None, 0]))
self.tensorShapeEqual(a[3:5, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[1:5:2, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:5:3, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:2:3, :, tf.newaxis, 50:3,],
tensor_shape.TensorShape([1, None, 1, 0]))
self.tensorShapeEqual(a[::-1, :, tf.newaxis, ::-2],
tensor_shape.TensorShape([5, None, 1, 4]))
def testTensorValuedIndexShape(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
defined_shape_tensor = tf.placeholder(tf.float32, shape=(5, 3, 7))
index_value = tf.placeholder(tf.int32, shape=())
a = StridedSliceShapeChecker(defined_shape_tensor)
self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-1],
tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-2],
tensor_shape.TensorShape([2, 7]))
other_scalar = tf.placeholder(tf.int32, shape=())
self.tensorShapeEqual(a[index_value, other_scalar:2],
tensor_shape.TensorShape([None, 7]))
class GradSliceChecker(object):
"""Tests that we can compute a gradient for var^2."""
def __init__(self, test, sess, var, varnp):
self.test = test
self.sess = sess
self.val = var * var
self.var = var
self.varnp = varnp
def __getitem__(self, spec):
slice_var = self.var[spec]
slice_val = self.val[spec]
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
dy = tf.Variable(tf.ones(shape=slice_var.get_shape(), dtype=tf.int32))
assign = dy.assign(slice_var)
slice_val_grad, = tf.gradients(slice_val, self.var, grad_ys=dy)
slice_val_grad2, = tf.gradients(slice_val_grad, dy, grad_ys=self.var)
self.sess.run(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.sess.run([slice_val_grad, slice_val_grad2]))
analytic_grad2_evaled = analytic_grad2.eval()
self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
# compute analytic gradient for slice
np_val_grad = (2 * self.varnp * self.varnp)
np_sliceval_grad = np.zeros(self.var.get_shape())
np_sliceval_grad[spec] = np_val_grad[spec]
# verify gradient
self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
class StridedSliceGradTest(test_util.TensorFlowTestCase):
"""Test that strided slice's custom gradient produces correct gradients."""
def testGradient(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
var = tf.Variable(tf.reshape(tf.range(1, 97, 1), shape=(6, 4, 4)))
init = tf.initialize_all_variables()
sess.run(init)
grad = GradSliceChecker(self, sess, var,
np.array(range(1, 97, 1)).reshape((6, 4, 4)))
_ = grad[2:6:2, 1:3, 1:3]
_ = grad[3:0:-2, 1:3, 1:3]
_ = grad[3:0:-2, tf.newaxis, 1:3, 2, tf.newaxis]
_ = grad[3:0:-2, 1:3, 2]
_ = grad[:, -1, :]
_ = grad[:, -2, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, -200, :]
with self.assertRaisesRegexp(ValueError, "out of bounds"):
_ = grad[:, 200, :]
def testGradientZero(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
var = tf.Variable(8)
init = tf.initialize_all_variables()
sess.run(init)
grad = GradSliceChecker(self, sess, var,
np.array(8))
_ = grad[tuple()]
class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
"""Test varied index types and host located memory."""
def testHostVsDevice(self):
with self.test_session(use_gpu=True) as sess:
var2 = tf.Variable(
tf.reshape(
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1)))
varshape = tf.Variable([6, 4, 4], dtype=tf.int32)
sess.run(tf.initialize_all_variables())
begin = tf.constant([0, 0, 0])
end = tf.constant([4, 1, 1])
strides = tf.constant([1, 1, 1])
foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
sess.run(foo)
def testInt64Shape(self):
with self.test_session(use_gpu=True) as sess:
original_dy = tf.reshape(
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
sess.run(tf.initialize_all_variables())
begin = tf.constant([0, 0, 0], dtype=tf.int64)
end = tf.constant([4, 1, 1], dtype=tf.int64)
strides = tf.constant([1, 1, 1], dtype=tf.int64)
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
def testMixedIndexTypes(self):
with self.test_session(use_gpu=True) as sess:
original_dy = tf.reshape(
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
sess.run(tf.initialize_all_variables())
begin = tf.constant([0, 0, 0], dtype=tf.int32)
end = tf.constant([4, 1, 1], dtype=tf.int64)
strides = tf.constant([1, 1, 1], dtype=tf.int64)
with self.assertRaisesRegexp(
TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
" that does not match type int64 of argument 'shape'"):
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
class BenchmarkSlice(object):
def __init__(self, tensor):
self.tensor = tensor
def __getitem__(self, x):
return self.tensor[x]
class StridedSliceBenchmark(tf.test.Benchmark):
"""Benchmark new strided slice operation on non-trivial case."""
def run_and_time(self, slice_op):
tf.initialize_all_variables().run()
for _ in range(10):
_ = slice_op.eval()
iters = 1000
t0 = time.time()
for _ in range(iters):
slice_op.eval()
t1 = time.time()
self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
def make_variable(self):
n = 256
shape = (n, n, n)
items = n**3
var = tf.Variable(
tf.reshape(
tf.linspace(1., float(items), items), shape),
dtype=tf.float32)
return var
def benchmark_strided_slice_skip(self):
with tf.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[::2, ::1, ::2]
self.run_and_time(slice_op)
def benchmark_strided_slice_easy(self):
with tf.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
def benchmark_slice_easy(self):
with tf.Session():
var = self.make_variable()
slice_op = var[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
class StridedSliceAssignChecker(object):
def __init__(self, test, x, tensor_type=tf.int32):
self.tensor_type = tensor_type
self.test = test
self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
self.x_np = np.array(x)
def __setitem__(self, index, value):
with self.test.test_session() as sess:
var = tf.Variable(self.x)
sess.run(tf.initialize_variables([var]))
val = sess.run(var[index].assign(
tf.constant(
value, dtype=self.tensor_type)))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
class SliceAssignTest(test_util.TensorFlowTestCase):
def testInvalidSlice(self):
with self.test_session() as sess:
foo = tf.constant([1, 2, 3])
with self.assertRaisesRegexp(ValueError, "Sliced assignment"
" is only supported for variables"):
bar = foo[:2].assign(tf.constant([1, 2]))
sess.run(bar)
def testSliceAssign(self):
checker = StridedSliceAssignChecker(self, [[1, 2, 3], [4, 5, 6]])
# Check if equal
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Check trivial (1,1) shape tensor
checker[1:2, 1:2] = [[666]]
# shrinks shape changes
checker[1:2, 1] = [666]
checker[1, 1:2] = [666]
checker[1, 1] = 666
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[999]]]
# Non unit strides
checker[::1, ::-2] = [[33, 333], [44, 444]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 2225)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
def testUninitialized(self):
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
"Attempting to use uninitialized value Variable"):
with self.test_session() as sess:
v = tf.Variable([1, 2])
sess.run(v[:].assign([1, 2]))
class ShapeSizeRankTest(test_util.TensorFlowTestCase):
def testDenseShape(self):
with self.test_session():
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), tf.shape(t_value).eval())
self.assertEqual(4, tf.size(t_value).eval())
self.assertEqual(2, tf.rank(t_value).eval())
t = tf.constant(t_value)
self.assertAllEqual((2, 2), tf.shape(t).eval())
self.assertEqual(4, tf.size(t).eval())
self.assertEqual(2, tf.rank(t).eval())
def testSparseShape(self):
with self.test_session():
sp_value = tf.SparseTensorValue(
indices=((0, 1), (1, 0)),
values=(42, 24),
shape=(2, 2))
self.assertAllEqual((2, 2), tf.shape(sp_value).eval())
self.assertEqual(4, tf.size(sp_value).eval())
self.assertEqual(2, tf.rank(sp_value).eval())
sp = tf.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), tf.shape(sp).eval())
self.assertEqual(4, tf.size(sp).eval())
self.assertEqual(2, tf.rank(sp).eval())
class SequenceMaskTest(test_util.TensorFlowTestCase):
def testExceptions(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "lengths must be 1D"):
tf.sequence_mask([[10, 20]], [10, 20])
with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"):
tf.sequence_mask([10, 20], [10, 20])
def testNormal(self):
with self.test_session():
res = tf.sequence_mask(tf.constant([1, 3, 2]), 5)
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]])
# test dtype and default maxlen:
res = tf.sequence_mask(tf.constant([0, 1, 4]), dtype=tf.float32)
self.assertAllEqual(res.get_shape().as_list(), [3, None])
self.assertAllEqual(res.eval(), [[0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0]])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
CKehl/pylearn2 | pylearn2/sampling/remove_samples.py | 1 | 6030 | '''
Created on Apr 16, 2015
@author: christian
'''
from optparse import OptionParser
import numpy
import os
from os import listdir
from os.path import isfile, join
import cPickle
import glob
def unpickle(file):
fo = open(file, 'rb')
dictionary = cPickle.load(fo)
fo.close()
return dictionary
if __name__ == '__main__':
optionParser = OptionParser("usage: %prog -i INPUT_FILE -m META_FILE -t ADDITIONAL_TAG -I NUM_IMAGES")
optionParser.add_option("-i", "--input", action="store", dest="input", type="string", metavar="FILE", help="pickled python dataset file")
optionParser.add_option("-m", "--meta", action="store", dest="meta", type="string", metavar="FILE", help="pickled python metadata file")
optionParser.add_option("-t", "--tag", action="store", dest="tag", type="int", help="selected tag to add the image", default = 0)
optionParser.add_option("-I", "--images", action="store", dest="nimages", type="int", help="number of images to add", default = 0)
(options, args) = optionParser.parse_args()
my_obj = dict()
meta_obj = dict()
label_str = ''
meta_label_str = ''
test_img_array = None
test_array = []
test_classes = []
test_obj = dict()
if options.input != None:
my_obj = unpickle(options.input)
in_dir = os.path.dirname(options.input)
test_obj = unpickle(os.path.join(in_dir, "test"))
else:
exit()
ds = 0
if("fine_labels" in my_obj.keys()):
ds = 1 #CIFAR-100
label_str = 'fine_labels'
meta_label_str = 'fine_label_names'
else:
ds = 0 #CIFAR-10 and combined
label_str = 'labels'
meta_label_str = 'label_names'
meta_inputs = []
if(options.meta == None) or (options.meta == ""):
meta_inputs = glob.glob(os.path.dirname(options.input)+os.path.sep+"*meta*")
else:
meta_inputs.append(options.meta)
meta_obj = unpickle(meta_inputs[0])
num_img_base_array = [0]*(len(my_obj[label_str]))
img_base_array = [[0]*3072]*(my_obj['data'].shape[0])
img_array = numpy.array(img_base_array, dtype=numpy.uint8)
class_array = [0]*(my_obj['data'].shape[0])
cursor_point = 0
for i in range(0, my_obj['data'].shape[0]):
data_entry = my_obj['data'][i]
tag_no = my_obj[label_str][i]
img_array[cursor_point] = data_entry
class_array[cursor_point] = tag_no
num_img_base_array[tag_no]+=1
cursor_point+=1
# Test array generation
tcursor_point = 0
#print "Test Data fieldsize: "+str(test_obj['data'].shape[0])
for i in range(0, test_obj['data'].shape[0]):
data_entry = test_obj['data'][i]
tag_no = test_obj['labels'][i]
#print "Test Image: "+str(i)+" => Tag: "+str(tag_no)
test_array.append(data_entry.tolist())
test_classes.append(tag_no)
tcursor_point+=1
test_img_array = numpy.array(test_array, dtype=numpy.uint8)
rm_images_only = False
img_to_remove = []
if(options.nimages > 0):
rm_images_only = True
print "just removing images, not the full tag."
else:
rm_images_only = False
tag_img_number = num_img_base_array[options.tag]
img_of_tag = []
for i in range(0, len(class_array)):
if(class_array[i] == options.tag):
img_of_tag.append(i)
print "Data with selected tag: "+str(img_of_tag)+" ("+str(tag_img_number)+")"
for j in range(0, len(img_of_tag)):
i = (len(img_of_tag)-1)-j
if(rm_images_only == False):
img_to_remove.append(img_of_tag[i])
else:
if((tag_img_number-i)<=options.nimages):
img_to_remove.append(img_of_tag[i])
print "Data to remove: "+str(img_to_remove)
#rm_img_array = None
#rm_class_array = []
print "Dataset size before deletion: "+str(len(class_array))+" | "+str(img_array.shape[0])
for i in range(0, len(img_to_remove)):
entry = img_to_remove[i]
#print "removing "+str(entry)+" ..."
del class_array[entry]
if(rm_images_only == True):
num_img_base_array[options.tag]-=len(img_to_remove)
img_array = numpy.delete(img_array, img_to_remove, 0)
print "Dataset size after deletion: "+str(len(class_array))+" | "+str(img_array.shape[0])
print "Label dictionary before deletion: "+str(len(meta_obj[meta_label_str]))
if(rm_images_only == False):
del num_img_base_array[options.tag]
del meta_obj[meta_label_str][options.tag]
# re-adapt mapping
for i in range(0, len(class_array)):
if(class_array[i] > options.tag):
class_array[i]-=1
#testing data
del img_of_tag[:]
for i in range(0, len(test_classes)):
if(test_classes[i] == options.tag):
img_of_tag.append(i)
for i in range(0, len(img_of_tag)):
j = (len(img_of_tag)-1)-i
entry = img_of_tag[j]
#print "removing "+str(entry)+" ..."
del test_classes[entry]
test_img_array = numpy.delete(test_img_array, img_of_tag, 0)
# re-mapping test data
for i in range(0, len(test_classes)):
if(test_classes[i] > options.tag):
test_classes[i]-=1
print "Label dictionary after deletion: "+str(len(meta_obj[meta_label_str]))
out_obj = dict()
out_obj['data']=img_array
out_obj['labels']=class_array
out_dir = os.path.dirname(options.input)
#fo = open(os.path.join(options.output,"experiment"), 'wb')
cPickle.dump(out_obj, open(os.path.join(out_dir,"experiment_rm"), "wb"), protocol=2)
test_n_obj = dict()
test_n_obj['data']=test_img_array
test_n_obj['labels']=test_classes
cPickle.dump(test_n_obj, open(os.path.join(out_dir,"test_rm"), "wb"), protocol=2)
meta_obj_out = dict()
meta_obj_out['label_names'] = meta_obj[meta_label_str]
cPickle.dump(meta_obj_out, open(os.path.join(out_dir,"meta_rm"), "wb"), protocol=2)
| bsd-3-clause |
jjscarafia/odoo | openerp/tools/mail.py | 1 | 29387 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import cgi
import logging
import lxml.html
import lxml.html.clean as clean
import random
import re
import socket
import threading
import time
from email.utils import getaddresses
import openerp
from openerp.loglevels import ustr
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# HTML Sanitizer
#----------------------------------------------------------
tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"]
tags_to_remove = ['html', 'body', 'font']
# allow new semantic HTML5 tags
allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure main'.split() + [etree.Comment])
safe_attrs = clean.defs.safe_attrs | frozenset(
['style',
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translate', 'data-oe-nodeid',
'data-publish', 'data-id', 'data-res_id', 'data-member_id', 'data-view-id'
])
def html_sanitize(src, silent=True, strict=False):
if not src:
return src
src = ustr(src, errors='replace')
logger = logging.getLogger(__name__ + '.html_sanitize')
# html encode email tags
part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
src = part.sub(lambda m: cgi.escape(m.group(1)), src)
# html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner
src = src.replace('<%', cgi.escape('<%'))
src = src.replace('%>', cgi.escape('%>'))
kwargs = {
'page_structure': True,
'style': False, # do not remove style attributes
'forms': True, # remove form tags
'remove_unknown_tags': False,
'allow_tags': allowed_tags,
'comments': False,
'processing_instructions': False
}
if etree.LXML_VERSION >= (2, 3, 1):
# kill_tags attribute has been added in version 2.3.1
kwargs.update({
'kill_tags': tags_to_kill,
'remove_tags': tags_to_remove,
})
else:
kwargs['remove_tags'] = tags_to_kill + tags_to_remove
if strict:
if etree.LXML_VERSION >= (3, 1, 0):
# lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
kwargs.update({
'safe_attrs_only': True,
'safe_attrs': safe_attrs,
})
else:
kwargs['safe_attrs_only'] = False # keep oe-data attributes + style
kwargs['frames'] = False, # do not remove frames (embbed video in CMS blogs)
try:
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
cleaner = clean.Cleaner(**kwargs)
cleaned = cleaner.clean_html(src)
# MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
cleaned = cleaned.replace('%24', '$')
cleaned = cleaned.replace('%7B', '{')
cleaned = cleaned.replace('%7D', '}')
cleaned = cleaned.replace('%20', ' ')
cleaned = cleaned.replace('%5B', '[')
cleaned = cleaned.replace('%5D', ']')
cleaned = cleaned.replace('<%', '<%')
cleaned = cleaned.replace('%>', '%>')
except etree.ParserError, e:
if 'empty' in str(e):
return ""
if not silent:
raise
logger.warning('ParserError obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>ParserError when sanitizing</p>'
except Exception:
if not silent:
raise
logger.warning('unknown error obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>Unknown error when sanitizing</p>'
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if cleaned.startswith('<div>') and cleaned.endswith('</div>'):
cleaned = cleaned[5:-6]
return cleaned
#----------------------------------------------------------
# HTML Cleaner
#----------------------------------------------------------
def html_email_clean(html, remove=False, shorten=False, max_length=300, expand_options=None,
protect_sections=False):
""" html_email_clean: clean the html by doing the following steps:
- try to strip email quotes, by removing blockquotes or having some client-
specific heuristics
- try to strip signatures
- shorten the html to a maximum number of characters if requested
Some specific use case:
- MsOffice: ``div.style = border-top:solid;`` delimitates the beginning of
a quote; detecting by finding WordSection1 of MsoNormal
- Hotmail: ``hr.stopSpelling`` delimitates the beginning of a quote; detect
Hotmail by funding ``SkyDrivePlaceholder``
:param string html: sanitized html; tags like html or head should not
be present in the html string. This method therefore
takes as input html code coming from a sanitized source,
like fields.html.
:param boolean remove: remove the html code that is unwanted; otherwise it
is only flagged and tagged
:param boolean shorten: shorten the html; every excessing content will
be flagged as to remove
:param int max_length: if shortening, maximum number of characters before
shortening
:param dict expand_options: options for the read more link when shortening
the content.The used keys are the following:
- oe_expand_container_tag: class applied to the
container of the whole read more link
- oe_expand_container_class: class applied to the
link container (default: oe_mail_expand)
- oe_expand_container_content: content of the
container (default: ...)
- oe_expand_separator_node: optional separator, like
adding ... <br /><br /> <a ...>read more</a> (default: void)
- oe_expand_a_href: href of the read more link itself
(default: #)
- oe_expand_a_class: class applied to the <a> containing
the link itself (default: oe_mail_expand)
- oe_expand_a_content: content of the <a> (default: read more)
The formatted read more link is the following:
<cont_tag class="oe_expand_container_class">
oe_expand_container_content
if expand_options.get('oe_expand_separator_node'):
<oe_expand_separator_node/>
<a href="oe_expand_a_href" class="oe_expand_a_class">
oe_expand_a_content
</a>
</span>
"""
def _replace_matching_regex(regex, source, replace=''):
""" Replace all matching expressions in source by replace """
if not source:
return source
dest = ''
idx = 0
for item in re.finditer(regex, source):
dest += source[idx:item.start()] + replace
idx = item.end()
dest += source[idx:]
return dest
def _create_node(tag, text, tail=None, attrs={}):
new_node = etree.Element(tag)
new_node.text = text
new_node.tail = tail
for key, val in attrs.iteritems():
new_node.set(key, val)
return new_node
def _insert_new_node(node, index, new_node_tag, new_node_text, new_node_tail=None, new_node_attrs={}):
new_node = _create_node(new_node_tag, new_node_text, new_node_tail, new_node_attrs)
node.insert(index, new_node)
return new_node
def _tag_matching_regex_in_text(regex, node, new_node_tag='span', new_node_attrs={}):
text = node.text or ''
if not re.search(regex, text):
return
cur_node = node
node.text = ''
idx, iteration = 0, 0
for item in re.finditer(regex, text):
if iteration == 0:
cur_node.text = text[idx:item.start()]
else:
_insert_new_node(node, (iteration - 1) * 2 + 1, new_node_tag, text[idx:item.start()])
new_node = _insert_new_node(node, iteration * 2, new_node_tag, text[item.start():item.end()], None, new_node_attrs)
cur_node = new_node
idx = item.end()
iteration += 1
new_node = _insert_new_node(node, -1, new_node_tag, text[idx:] + (cur_node.tail or ''), None, {})
def _truncate_node(node, position, simplify_whitespaces=True):
""" Truncate a node text at a given position. This algorithm will shorten
at the end of the word whose ending character exceeds position.
:param bool simplify_whitespaces: whether to try to count all successive
whitespaces as one character. This
option should not be True when trying
to keep 'pre' consistency.
"""
if node.text is None:
node.text = ''
truncate_idx = -1
if simplify_whitespaces:
cur_char_nbr = 0
word = None
node_words = node.text.strip(' \t\r\n').split()
for word in node_words:
cur_char_nbr += len(word)
if cur_char_nbr >= position:
break
if word:
truncate_idx = node.text.find(word) + len(word)
else:
truncate_idx = position
if truncate_idx == -1 or truncate_idx > len(node.text):
truncate_idx = len(node.text)
# compose new text bits
innertext = node.text[0:truncate_idx]
outertext = node.text[truncate_idx:]
node.text = innertext
# create <span> ... <a href="#">read more</a></span> node
read_more_node = _create_node(
expand_options.get('oe_expand_container_tag', 'span'),
expand_options.get('oe_expand_container_content', ' ... '),
None,
{'class': expand_options.get('oe_expand_container_class', 'oe_mail_expand')}
)
if expand_options.get('oe_expand_separator_node'):
read_more_separator_node = _create_node(
expand_options.get('oe_expand_separator_node'),
'',
None,
{}
)
read_more_node.append(read_more_separator_node)
read_more_link_node = _create_node(
'a',
expand_options.get('oe_expand_a_content', 'read more'),
None,
{
'href': expand_options.get('oe_expand_a_href', '#'),
'class': expand_options.get('oe_expand_a_class', 'oe_mail_expand'),
}
)
read_more_node.append(read_more_link_node)
# create outertext node
overtext_node = _create_node('span', outertext)
# tag node
overtext_node.set('in_overlength', '1')
# add newly created nodes in dom
node.append(read_more_node)
node.append(overtext_node)
if expand_options is None:
expand_options = {}
if not html or not isinstance(html, basestring):
return html
html = ustr(html)
# Pre processing
# ------------------------------------------------------------
# TDE TODO: --- MAIL ORIGINAL ---: '[\-]{4,}([^\-]*)[\-]{4,}'
# html: remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
html = doctype.sub(r"", html)
# html: ClEditor seems to love using <div><br /><div> -> replace with <br />
br_div_tags = re.compile(r'(<div>\s*<br\s*\/>\s*<\/div>)', re.IGNORECASE)
html = _replace_matching_regex(br_div_tags, html, '<br />')
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
signature = re.compile(r'([-]{2,}[\s]?[\r\n]{1,2}[\s\S]+)')
for node in root.iter():
# remove all tails and replace them by a span element, because managing text and tails can be a pain in the ass
if node.tail:
tail_node = _create_node('span', node.tail)
node.tail = None
node.addnext(tail_node)
# form node and tag text-based quotes and signature
_tag_matching_regex_in_text(quote_tags, node, 'span', {'text_quote': '1'})
_tag_matching_regex_in_text(signature, node, 'span', {'text_signature': '1'})
# Processing
# ------------------------------------------------------------
# tree: tag nodes
# signature_begin = False # try dynamic signature recognition
quote_begin = False
overlength = False
overlength_section_id = None
overlength_section_count = 0
cur_char_nbr = 0
for node in root.iter():
# comments do not need processing
# note: bug in node.get(value, default) for HtmlComments, default never returned
if node.tag == etree.Comment:
continue
# do not take into account multiple spaces that are displayed as max 1 space in html
node_text = ' '.join((node.text and node.text.strip(' \t\r\n') or '').split())
# root: try to tag the client used to write the html
if 'WordSection1' in node.get('class', '') or 'MsoNormal' in node.get('class', ''):
root.set('msoffice', '1')
if 'SkyDrivePlaceholder' in node.get('class', '') or 'SkyDrivePlaceholder' in node.get('id', ''):
root.set('hotmail', '1')
# protect sections by tagging section limits and blocks contained inside sections, using an increasing id to re-find them later
if node.tag == 'section':
overlength_section_count += 1
node.set('section_closure', str(overlength_section_count))
if node.getparent() is not None and (node.getparent().get('section_closure') or node.getparent().get('section_inner')):
node.set('section_inner', str(overlength_section_count))
# state of the parsing: flag quotes and tails to remove
if quote_begin:
node.set('in_quote', '1')
node.set('tail_remove', '1')
# state of the parsing: flag when being in over-length content, depending on section content if defined (only when having protect_sections)
if overlength:
if not overlength_section_id or int(node.get('section_inner', overlength_section_count + 1)) > overlength_section_count:
node.set('in_overlength', '1')
node.set('tail_remove', '1')
# find quote in msoffice / hotmail / blockquote / text quote and signatures
if root.get('msoffice') and node.tag == 'div' and 'border-top:solid' in node.get('style', ''):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if root.get('hotmail') and node.tag == 'hr' and ('stopSpelling' in node.get('class', '') or 'stopSpelling' in node.get('id', '')):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if node.tag == 'blockquote' or node.get('text_quote') or node.get('text_signature'):
# here no quote_begin because we want to be able to remove some quoted
# text without removing all the remaining context
node.set('in_quote', '1')
if node.getparent() is not None and node.getparent().get('in_quote'):
# inside a block of removed text but not in quote_begin (see above)
node.set('in_quote', '1')
# shorten:
# if protect section:
# 1/ find the first parent not being inside a section
# 2/ add the read more link
# else:
# 1/ truncate the text at the next available space
# 2/ create a 'read more' node, next to current node
# 3/ add the truncated text in a new node, next to 'read more' node
node_text = (node.text or '').strip().strip('\n').strip()
if shorten and not overlength and cur_char_nbr + len(node_text) > max_length:
node_to_truncate = node
while node_to_truncate.getparent() is not None:
if node_to_truncate.get('in_quote'):
node_to_truncate = node_to_truncate.getparent()
elif protect_sections and (node_to_truncate.getparent().get('section_inner') or node_to_truncate.getparent().get('section_closure')):
node_to_truncate = node_to_truncate.getparent()
overlength_section_id = node_to_truncate.get('section_closure')
else:
break
overlength = True
node_to_truncate.set('truncate', '1')
if node_to_truncate == node:
node_to_truncate.set('truncate_position', str(max_length - cur_char_nbr))
else:
node_to_truncate.set('truncate_position', str(len(node.text or '')))
cur_char_nbr += len(node_text)
# Tree modification
# ------------------------------------------------------------
for node in root.iter():
if node.get('truncate'):
_truncate_node(node, int(node.get('truncate_position', '0')), node.tag != 'pre')
# Post processing
# ------------------------------------------------------------
to_remove = []
for node in root.iter():
if node.get('in_quote') or node.get('in_overlength'):
# copy the node tail into parent text
if node.tail and not node.get('tail_remove'):
parent = node.getparent()
parent.tail = node.tail + (parent.tail or '')
to_remove.append(node)
if node.get('tail_remove'):
node.tail = ''
# clean node
for attribute_name in ['in_quote', 'tail_remove', 'in_overlength', 'msoffice', 'hotmail', 'truncate', 'truncate_position']:
node.attrib.pop(attribute_name, None)
for node in to_remove:
if remove:
node.getparent().remove(node)
else:
if not expand_options.get('oe_expand_a_class', 'oe_mail_expand') in node.get('class', ''): # trick: read more link should be displayed even if it's in overlength
node_class = node.get('class', '') + ' oe_mail_cleaned'
node.set('class', node_class)
# html: \n that were tail of elements have been encapsulated into <span> -> back to \n
html = etree.tostring(root, pretty_print=False)
linebreaks = re.compile(r'<span[^>]*>([\s]*[\r\n]+[\s]*)<\/span>', re.IGNORECASE | re.DOTALL)
html = _replace_matching_regex(linebreaks, html, '\n')
return html
#----------------------------------------------------------
# HTML/Text management
#----------------------------------------------------------
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <peter@fry-it.com>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
html = html.replace('>', '>')
html = html.replace('<', '<')
html = html.replace('&', '&')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html
def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using cgi.escape().
- all \n,\r are replaced by <br />
- enclose content into <p>
- 2 or more consecutive <br /> are considered as paragraph breaks
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
text = cgi.escape(ustr(text))
# 1. replace \n and \r
text = text.replace('\n', '<br/>')
text = text.replace('\r', '<br/>')
# 2-3: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*\/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 4. container
if container_tag:
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
return ustr(final)
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
Content conversion can be done in two ways:
- wrapping it into a pre (preserve=True)
- use plaintext2html (preserve=False, using container_tag to wrap the
whole content)
A side-effect of this method is to coerce all HTML tags to
lowercase in ``html``, and strip enclosing <html> or <body> tags in
content if ``plaintext`` is False.
:param str html: html tagsoup (doesn't have to be XHTML)
:param str content: extra content to append
:param bool plaintext: whether content is plaintext and should
be wrapped in a <pre/> tag.
:param bool preserve: if content is plaintext, wrap it into a <pre>
instead of converting it into html
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % ustr(content)
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?html.*>|</?body.*>|<!\W*DOCTYPE.*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)\W*(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')
if insert_location == -1:
return '%s%s' % (html, content)
return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
# matches any email in a body of text
email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6})""", re.VERBOSE)
# matches a string containing only one email
single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}$""", re.VERBOSE)
res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE)
command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE)
# Updated in 7.0 to match the model name as well
# Typical form of references is <timestamp-openerp-record_id-model_name@domain>
# group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain
reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?.*@(.*)>", re.UNICODE)
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
Used to track the replies related to a given object thanks to the "In-Reply-To"
or "References" fields that Mail User Agents will set.
"""
try:
rnd = random.SystemRandom().random()
except NotImplementedError:
rnd = random.random()
rndstr = ("%.15f" % rnd)[2:]
return "<%.15f.%s-openerp-%s@%s>" % (time.time(), rndstr, res_id, socket.gethostname())
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None,
smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None):
"""Low-level function for sending an email (deprecated).
:deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead.
:param email_from: A string used to fill the `From` header, if falsy,
config['email_from'] is used instead. Also used for
the `Reply-To` header if `reply_to` is not provided
:param email_to: a sequence of addresses to send the mail to.
"""
# If not cr, get cr from current thread database
local_cr = None
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
local_cr = cr = openerp.registry(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
# Send Email
try:
mail_server_pool = openerp.registry(cr.dbname)['ir.mail_server']
res = False
# Pack Message into MIME Object
email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to,
attachments, message_id, references, openobject_id, subtype, headers=headers)
res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None,
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), smtp_debug=debug)
except Exception:
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
if local_cr:
cr.close()
return res
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [addr[1] for addr in getaddresses([text])
# getaddresses() returns '' when email parsing fails, and
# sometimes returns emails without at least '@'. The '@'
# is strictly required in RFC2822's `addr-spec`.
if addr[1]
if '@' in addr[1]]
| agpl-3.0 |
hsbhathiya/stratos | components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/databridge/thrift/gen/ThriftSecureEventTransmissionService/ThriftSecureEventTransmissionService.py | 17 | 47704 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ttypes import *
from ...thrift.Thrift import TProcessor
from ...thrift.transport import TTransport
from ..Exception import ttypes
from .. import Data
try:
from ...thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def connect(self, userName, password):
"""
Parameters:
- userName
- password
"""
pass
def disconnect(self, sessionId):
"""
Parameters:
- sessionId
"""
pass
def defineStream(self, sessionId, streamDefinition):
"""
Parameters:
- sessionId
- streamDefinition
"""
pass
def findStreamId(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
pass
def publish(self, eventBundle):
"""
Parameters:
- eventBundle
"""
pass
def deleteStreamById(self, sessionId, streamId):
"""
Parameters:
- sessionId
- streamId
"""
pass
def deleteStreamByNameVersion(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def connect(self, userName, password):
"""
Parameters:
- userName
- password
"""
self.send_connect(userName, password)
return self.recv_connect()
def send_connect(self, userName, password):
self._oprot.writeMessageBegin('connect', TMessageType.CALL, self._seqid)
args = connect_args()
args.userName = userName
args.password = password
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_connect(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = connect_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ae is not None:
raise result.ae
raise TApplicationException(TApplicationException.MISSING_RESULT, "connect failed: unknown result");
def disconnect(self, sessionId):
"""
Parameters:
- sessionId
"""
self.send_disconnect(sessionId)
self.recv_disconnect()
def send_disconnect(self, sessionId):
self._oprot.writeMessageBegin('disconnect', TMessageType.CALL, self._seqid)
args = disconnect_args()
args.sessionId = sessionId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_disconnect(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = disconnect_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def defineStream(self, sessionId, streamDefinition):
"""
Parameters:
- sessionId
- streamDefinition
"""
self.send_defineStream(sessionId, streamDefinition)
return self.recv_defineStream()
def send_defineStream(self, sessionId, streamDefinition):
self._oprot.writeMessageBegin('defineStream', TMessageType.CALL, self._seqid)
args = defineStream_args()
args.sessionId = sessionId
args.streamDefinition = streamDefinition
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_defineStream(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = defineStream_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ade is not None:
raise result.ade
if result.mtd is not None:
raise result.mtd
if result.tde is not None:
raise result.tde
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "defineStream failed: unknown result");
def findStreamId(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
self.send_findStreamId(sessionId, streamName, streamVersion)
return self.recv_findStreamId()
def send_findStreamId(self, sessionId, streamName, streamVersion):
self._oprot.writeMessageBegin('findStreamId', TMessageType.CALL, self._seqid)
args = findStreamId_args()
args.sessionId = sessionId
args.streamName = streamName
args.streamVersion = streamVersion
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_findStreamId(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = findStreamId_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.tnde is not None:
raise result.tnde
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "findStreamId failed: unknown result");
def publish(self, eventBundle):
"""
Parameters:
- eventBundle
"""
self.send_publish(eventBundle)
self.recv_publish()
def send_publish(self, eventBundle):
self._oprot.writeMessageBegin('publish', TMessageType.CALL, self._seqid)
args = publish_args()
args.eventBundle = eventBundle
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_publish(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = publish_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ue is not None:
raise result.ue
if result.se is not None:
raise result.se
return
def deleteStreamById(self, sessionId, streamId):
"""
Parameters:
- sessionId
- streamId
"""
self.send_deleteStreamById(sessionId, streamId)
return self.recv_deleteStreamById()
def send_deleteStreamById(self, sessionId, streamId):
self._oprot.writeMessageBegin('deleteStreamById', TMessageType.CALL, self._seqid)
args = deleteStreamById_args()
args.sessionId = sessionId
args.streamId = streamId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteStreamById(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteStreamById_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteStreamById failed: unknown result");
def deleteStreamByNameVersion(self, sessionId, streamName, streamVersion):
"""
Parameters:
- sessionId
- streamName
- streamVersion
"""
self.send_deleteStreamByNameVersion(sessionId, streamName, streamVersion)
return self.recv_deleteStreamByNameVersion()
def send_deleteStreamByNameVersion(self, sessionId, streamName, streamVersion):
self._oprot.writeMessageBegin('deleteStreamByNameVersion', TMessageType.CALL, self._seqid)
args = deleteStreamByNameVersion_args()
args.sessionId = sessionId
args.streamName = streamName
args.streamVersion = streamVersion
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteStreamByNameVersion(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deleteStreamByNameVersion_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteStreamByNameVersion failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["connect"] = Processor.process_connect
self._processMap["disconnect"] = Processor.process_disconnect
self._processMap["defineStream"] = Processor.process_defineStream
self._processMap["findStreamId"] = Processor.process_findStreamId
self._processMap["publish"] = Processor.process_publish
self._processMap["deleteStreamById"] = Processor.process_deleteStreamById
self._processMap["deleteStreamByNameVersion"] = Processor.process_deleteStreamByNameVersion
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_connect(self, seqid, iprot, oprot):
args = connect_args()
args.read(iprot)
iprot.readMessageEnd()
result = connect_result()
try:
result.success = self._handler.connect(args.userName, args.password)
except ttypes.ThriftAuthenticationException, ae:
result.ae = ae
oprot.writeMessageBegin("connect", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_disconnect(self, seqid, iprot, oprot):
args = disconnect_args()
args.read(iprot)
iprot.readMessageEnd()
result = disconnect_result()
self._handler.disconnect(args.sessionId)
oprot.writeMessageBegin("disconnect", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_defineStream(self, seqid, iprot, oprot):
args = defineStream_args()
args.read(iprot)
iprot.readMessageEnd()
result = defineStream_result()
try:
result.success = self._handler.defineStream(args.sessionId, args.streamDefinition)
except ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException, ade:
result.ade = ade
except ttypes.ThriftMalformedStreamDefinitionException, mtd:
result.mtd = mtd
except ttypes.ThriftStreamDefinitionException, tde:
result.tde = tde
except ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("defineStream", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_findStreamId(self, seqid, iprot, oprot):
args = findStreamId_args()
args.read(iprot)
iprot.readMessageEnd()
result = findStreamId_result()
try:
result.success = self._handler.findStreamId(args.sessionId, args.streamName, args.streamVersion)
except ttypes.ThriftNoStreamDefinitionExistException, tnde:
result.tnde = tnde
except ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("findStreamId", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_publish(self, seqid, iprot, oprot):
args = publish_args()
args.read(iprot)
iprot.readMessageEnd()
result = publish_result()
try:
self._handler.publish(args.eventBundle)
except ttypes.ThriftUndefinedEventTypeException, ue:
result.ue = ue
except ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("publish", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteStreamById(self, seqid, iprot, oprot):
args = deleteStreamById_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteStreamById_result()
try:
result.success = self._handler.deleteStreamById(args.sessionId, args.streamId)
except ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("deleteStreamById", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deleteStreamByNameVersion(self, seqid, iprot, oprot):
args = deleteStreamByNameVersion_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteStreamByNameVersion_result()
try:
result.success = self._handler.deleteStreamByNameVersion(args.sessionId, args.streamName, args.streamVersion)
except ttypes.ThriftSessionExpiredException, se:
result.se = se
oprot.writeMessageBegin("deleteStreamByNameVersion", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class connect_args:
"""
Attributes:
- userName
- password
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'userName', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
)
def __init__(self, userName=None, password=None,):
self.userName = userName
self.password = password
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.userName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('connect_args')
if self.userName is not None:
oprot.writeFieldBegin('userName', TType.STRING, 1)
oprot.writeString(self.userName)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class connect_result:
"""
Attributes:
- success
- ae
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ae', (ttypes.ThriftAuthenticationException, ttypes.ThriftAuthenticationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ae=None,):
self.success = success
self.ae = ae
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ae = ttypes.ThriftAuthenticationException()
self.ae.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('connect_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.ae is not None:
oprot.writeFieldBegin('ae', TType.STRUCT, 1)
self.ae.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class disconnect_args:
"""
Attributes:
- sessionId
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
)
def __init__(self, sessionId=None,):
self.sessionId = sessionId
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('disconnect_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class disconnect_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('disconnect_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class defineStream_args:
"""
Attributes:
- sessionId
- streamDefinition
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamDefinition', None, None, ), # 2
)
def __init__(self, sessionId=None, streamDefinition=None,):
self.sessionId = sessionId
self.streamDefinition = streamDefinition
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamDefinition = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('defineStream_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamDefinition is not None:
oprot.writeFieldBegin('streamDefinition', TType.STRING, 2)
oprot.writeString(self.streamDefinition)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class defineStream_result:
"""
Attributes:
- success
- ade
- mtd
- tde
- se
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ade', (ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException, ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'mtd', (ttypes.ThriftMalformedStreamDefinitionException, ttypes.ThriftMalformedStreamDefinitionException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'tde', (ttypes.ThriftStreamDefinitionException, ttypes.ThriftStreamDefinitionException.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'se', (ttypes.ThriftSessionExpiredException, ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ade=None, mtd=None, tde=None, se=None,):
self.success = success
self.ade = ade
self.mtd = mtd
self.tde = tde
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ade = ttypes.ThriftDifferentStreamDefinitionAlreadyDefinedException()
self.ade.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.mtd = ttypes.ThriftMalformedStreamDefinitionException()
self.mtd.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.tde = ttypes.ThriftStreamDefinitionException()
self.tde.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.se = ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('defineStream_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.ade is not None:
oprot.writeFieldBegin('ade', TType.STRUCT, 1)
self.ade.write(oprot)
oprot.writeFieldEnd()
if self.mtd is not None:
oprot.writeFieldBegin('mtd', TType.STRUCT, 2)
self.mtd.write(oprot)
oprot.writeFieldEnd()
if self.tde is not None:
oprot.writeFieldBegin('tde', TType.STRUCT, 3)
self.tde.write(oprot)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 4)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class findStreamId_args:
"""
Attributes:
- sessionId
- streamName
- streamVersion
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamName', None, None, ), # 2
(3, TType.STRING, 'streamVersion', None, None, ), # 3
)
def __init__(self, sessionId=None, streamName=None, streamVersion=None,):
self.sessionId = sessionId
self.streamName = streamName
self.streamVersion = streamVersion
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.streamVersion = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('findStreamId_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamName is not None:
oprot.writeFieldBegin('streamName', TType.STRING, 2)
oprot.writeString(self.streamName)
oprot.writeFieldEnd()
if self.streamVersion is not None:
oprot.writeFieldBegin('streamVersion', TType.STRING, 3)
oprot.writeString(self.streamVersion)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class findStreamId_result:
"""
Attributes:
- success
- tnde
- se
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'tnde', (ttypes.ThriftNoStreamDefinitionExistException, ttypes.ThriftNoStreamDefinitionExistException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'se', (ttypes.ThriftSessionExpiredException, ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, tnde=None, se=None,):
self.success = success
self.tnde = tnde
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.tnde = ttypes.ThriftNoStreamDefinitionExistException()
self.tnde.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.se = ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('findStreamId_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.tnde is not None:
oprot.writeFieldBegin('tnde', TType.STRUCT, 1)
self.tnde.write(oprot)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 2)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class publish_args:
"""
Attributes:
- eventBundle
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'eventBundle', (Data.ttypes.ThriftEventBundle, Data.ttypes.ThriftEventBundle.thrift_spec), None, ), # 1
)
def __init__(self, eventBundle=None,):
self.eventBundle = eventBundle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.eventBundle = Data.ttypes.ThriftEventBundle()
self.eventBundle.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('publish_args')
if self.eventBundle is not None:
oprot.writeFieldBegin('eventBundle', TType.STRUCT, 1)
self.eventBundle.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class publish_result:
"""
Attributes:
- ue
- se
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ue', (ttypes.ThriftUndefinedEventTypeException, ttypes.ThriftUndefinedEventTypeException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'se', (ttypes.ThriftSessionExpiredException, ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 2
)
def __init__(self, ue=None, se=None,):
self.ue = ue
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ue = ttypes.ThriftUndefinedEventTypeException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.se = ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('publish_result')
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 1)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 2)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamById_args:
"""
Attributes:
- sessionId
- streamId
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamId', None, None, ), # 2
)
def __init__(self, sessionId=None, streamId=None,):
self.sessionId = sessionId
self.streamId = streamId
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamId = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamById_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamId is not None:
oprot.writeFieldBegin('streamId', TType.STRING, 2)
oprot.writeString(self.streamId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamById_result:
"""
Attributes:
- success
- se
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'se', (ttypes.ThriftSessionExpiredException, ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, se=None,):
self.success = success
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.se = ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamById_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamByNameVersion_args:
"""
Attributes:
- sessionId
- streamName
- streamVersion
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sessionId', None, None, ), # 1
(2, TType.STRING, 'streamName', None, None, ), # 2
(3, TType.STRING, 'streamVersion', None, None, ), # 3
)
def __init__(self, sessionId=None, streamName=None, streamVersion=None,):
self.sessionId = sessionId
self.streamName = streamName
self.streamVersion = streamVersion
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sessionId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.streamName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.streamVersion = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamByNameVersion_args')
if self.sessionId is not None:
oprot.writeFieldBegin('sessionId', TType.STRING, 1)
oprot.writeString(self.sessionId)
oprot.writeFieldEnd()
if self.streamName is not None:
oprot.writeFieldBegin('streamName', TType.STRING, 2)
oprot.writeString(self.streamName)
oprot.writeFieldEnd()
if self.streamVersion is not None:
oprot.writeFieldBegin('streamVersion', TType.STRING, 3)
oprot.writeString(self.streamVersion)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteStreamByNameVersion_result:
"""
Attributes:
- success
- se
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'se', (ttypes.ThriftSessionExpiredException, ttypes.ThriftSessionExpiredException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, se=None,):
self.success = success
self.se = se
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.se = ttypes.ThriftSessionExpiredException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteStreamByNameVersion_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
newswangerd/ansible | lib/ansible/plugins/loader.py | 8 | 55324 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import os
import os.path
import sys
import warnings
from collections import defaultdict, namedtuple
from ansible import constants as C
from ansible.errors import AnsibleError, AnsiblePluginCircularRedirect, AnsiblePluginRemovedError, AnsibleCollectionUnsupportedVersionError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.compat.importlib import import_module
from ansible.module_utils.six import string_types
from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder, _get_collection_metadata
from ansible.utils.display import Display
from ansible.utils.plugin_docs import add_fragments
from ansible import __version__ as ansible_version
# TODO: take the packaging dep, or vendor SpecifierSet?
try:
from packaging.specifiers import SpecifierSet
from packaging.version import Version
except ImportError:
SpecifierSet = None
Version = None
try:
import importlib.util
imp = None
except ImportError:
import imp
display = Display()
get_with_context_result = namedtuple('get_with_context_result', ['object', 'plugin_load_context'])
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
def add_all_plugin_dirs(path):
''' add any existing plugin dirs in the path provided '''
b_path = os.path.expanduser(to_bytes(path, errors='surrogate_or_strict'))
if os.path.isdir(b_path):
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(b_path, to_bytes(obj.subdir))
if os.path.isdir(plugin_path):
obj.add_directory(to_text(plugin_path))
else:
display.warning("Ignoring invalid path provided to plugin path: '%s' is not a directory" % to_text(path))
def get_shell_plugin(shell_type=None, executable=None):
if not shell_type:
# default to sh
shell_type = 'sh'
# mostly for backwards compat
if executable:
if isinstance(executable, string_types):
shell_filename = os.path.basename(executable)
try:
shell = shell_loader.get(shell_filename)
except Exception:
shell = None
if shell is None:
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
shell_type = shell.SHELL_FAMILY
break
else:
raise AnsibleError("Either a shell type or a shell executable must be provided ")
shell = shell_loader.get(shell_type)
if not shell:
raise AnsibleError("Could not find the shell plugin required (%s)." % shell_type)
if executable:
setattr(shell, 'executable', executable)
return shell
def add_dirs_to_loader(which_loader, paths):
loader = getattr(sys.modules[__name__], '%s_loader' % which_loader)
for path in paths:
loader.add_directory(path, with_subdir=True)
class PluginPathContext(object):
def __init__(self, path, internal):
self.path = path
self.internal = internal
class PluginLoadContext(object):
def __init__(self):
self.original_name = None
self.redirect_list = []
self.error_list = []
self.import_error_list = []
self.load_attempts = []
self.pending_redirect = None
self.exit_reason = None
self.plugin_resolved_path = None
self.plugin_resolved_name = None
self.plugin_resolved_collection = None # empty string for resolved plugins from user-supplied paths
self.deprecated = False
self.removal_date = None
self.removal_version = None
self.deprecation_warnings = []
self.resolved = False
def record_deprecation(self, name, deprecation, collection_name):
if not deprecation:
return self
warning_text = deprecation.get('warning_text', None)
removal_date = deprecation.get('removal_date', None)
removal_version = deprecation.get('removal_version', None)
# If both removal_date and removal_version are specified, use removal_date
if removal_date is not None:
removal_version = None
if not warning_text:
warning_text = '{0} has been deprecated'.format(name)
display.deprecated(warning_text, date=removal_date, version=removal_version, collection_name=collection_name)
self.deprecated = True
if removal_date:
self.removal_date = removal_date
if removal_version:
self.removal_version = removal_version
self.deprecation_warnings.append(warning_text)
return self
def resolve(self, resolved_name, resolved_path, resolved_collection, exit_reason):
self.pending_redirect = None
self.plugin_resolved_name = resolved_name
self.plugin_resolved_path = resolved_path
self.plugin_resolved_collection = resolved_collection
self.exit_reason = exit_reason
self.resolved = True
return self
def redirect(self, redirect_name):
self.pending_redirect = redirect_name
self.exit_reason = 'pending redirect resolution from {0} to {1}'.format(self.original_name, redirect_name)
self.resolved = False
return self
def nope(self, exit_reason):
self.pending_redirect = None
self.exit_reason = exit_reason
self.resolved = False
return self
class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of play basedirs, configured
paths, and the python path. The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
aliases = {} if aliases is None else aliases
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.subdir = subdir
# FIXME: remove alias dict in favor of alias by symlink?
self.aliases = aliases
if config and not isinstance(config, list):
config = [config]
elif not config:
config = []
self.config = config
if class_name not in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if class_name not in PATH_CACHE:
PATH_CACHE[class_name] = None
if class_name not in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
# hold dirs added at runtime outside of config
self._extra_dirs = []
# caches
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._searched_paths = set()
def __repr__(self):
return 'PluginLoader(type={0})'.format(AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir))
def _clear_caches(self):
if C.OLD_PLUGIN_CACHE_CLEARING:
self._paths = None
else:
# reset global caches
MODULE_CACHE[self.class_name] = {}
PATH_CACHE[self.class_name] = None
PLUGIN_PATH_CACHE[self.class_name] = defaultdict(dict)
# reset internal caches
self._module_cache = MODULE_CACHE[self.class_name]
self._paths = PATH_CACHE[self.class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[self.class_name]
self._searched_paths = set()
def __setstate__(self, data):
'''
Deserializer.
'''
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
'''
Serializer.
'''
return dict(
class_name=self.class_name,
base_class=self.base_class,
package=self.package,
config=self.config,
subdir=self.subdir,
aliases=self.aliases,
_extra_dirs=self._extra_dirs,
_searched_paths=self._searched_paths,
PATH_CACHE=PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
)
def format_paths(self, paths):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in paths:
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def print_paths(self):
return self.format_paths(self._get_paths(subdirs=False))
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir, followlinks=True):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root, x))
return results
def _get_package_paths(self, subdirs=True):
''' Gets the path of a Python package '''
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
for parent_mod in parts:
m = getattr(m, parent_mod)
self.package_path = to_text(os.path.dirname(m.__file__), errors='surrogate_or_strict')
if subdirs:
return self._all_directories(self.package_path)
return [self.package_path]
def _get_paths_with_context(self, subdirs=True):
''' Return a list of PluginPathContext objects to search for plugins in '''
# FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
# In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
# which always calls it with subdirs=False. So there currently isn't a problem with this caching.
if self._paths is not None:
return self._paths
ret = [PluginPathContext(p, False) for p in self._extra_dirs]
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
for path in self.config:
path = os.path.abspath(os.path.expanduser(path))
if subdirs:
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
c = to_text(c, errors='surrogate_or_strict')
if os.path.isdir(c) and c not in ret:
ret.append(PluginPathContext(c, False))
path = to_text(path, errors='surrogate_or_strict')
if path not in ret:
ret.append(PluginPathContext(path, False))
# look for any plugins installed in the package subtree
# Note package path always gets added last so that every other type of
# path is searched before it.
ret.extend([PluginPathContext(p, True) for p in self._get_package_paths(subdirs=subdirs)])
# HACK: because powershell modules are in the same directory
# hierarchy as other modules we have to process them last. This is
# because powershell only works on windows but the other modules work
# anywhere (possibly including windows if the correct language
# interpreter is installed). the non-powershell modules can have any
# file extension and thus powershell modules are picked up in that.
# The non-hack way to fix this is to have powershell modules be
# a different PluginLoader/ModuleLoader. But that requires changing
# other things too (known thing to change would be PATHS_CACHE,
# PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
# on the class_name and neither regular modules nor powershell modules
# would have class_names, they would not work as written.
#
# The expected sort order is paths in the order in 'ret' with paths ending in '/windows' at the end,
# also in the original order they were found in 'ret'.
# The .sort() method is guaranteed to be stable, so original order is preserved.
ret.sort(key=lambda p: p.path.endswith('/windows'))
# cache and return the result
self._paths = ret
return ret
def _get_paths(self, subdirs=True):
''' Return a list of paths to search for plugins in '''
paths_with_context = self._get_paths_with_context(subdirs=subdirs)
return [path_with_context.path for path_with_context in paths_with_context]
def _load_config_defs(self, name, module, path):
''' Reads plugin docs to find configuration setting definitions, to push to config manager for later use '''
# plugins w/o class name don't support config
if self.class_name:
type_name = get_plugin_class(self.class_name)
# if type name != 'module_doc_fragment':
if type_name in C.CONFIGURABLE_PLUGINS:
dstring = AnsibleLoader(getattr(module, 'DOCUMENTATION', ''), file_name=path).get_single_data()
if dstring:
add_fragments(dstring, path, fragment_loader=fragment_loader, is_module=(type_name == 'module'))
if dstring and 'options' in dstring and isinstance(dstring['options'], dict):
C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._clear_caches()
display.debug('Added %s to loader search path' % (directory))
def _query_collection_routing_meta(self, acr, plugin_type, extension=None):
collection_pkg = import_module(acr.n_python_collection_package_name)
if not collection_pkg:
return None
# FIXME: shouldn't need this...
try:
# force any type-specific metadata postprocessing to occur
import_module(acr.n_python_collection_package_name + '.plugins.{0}'.format(plugin_type))
except ImportError:
pass
# this will be created by the collection PEP302 loader
collection_meta = getattr(collection_pkg, '_collection_meta', None)
if not collection_meta:
return None
# TODO: add subdirs support
# check for extension-specific entry first (eg 'setup.ps1')
# TODO: str/bytes on extension/name munging
if acr.subdirs:
subdir_qualified_resource = '.'.join([acr.subdirs, acr.resource])
else:
subdir_qualified_resource = acr.resource
entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource + extension, None)
if not entry:
# try for extension-agnostic entry
entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource, None)
return entry
def _find_fq_plugin(self, fq_name, extension, plugin_load_context):
"""Search builtin paths to find a plugin. No external paths are searched,
meaning plugins inside roles inside collections will be ignored.
"""
plugin_load_context.resolved = False
plugin_type = AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
acr = AnsibleCollectionRef.from_fqcr(fq_name, plugin_type)
# check collection metadata to see if any special handling is required for this plugin
routing_metadata = self._query_collection_routing_meta(acr, plugin_type, extension=extension)
# TODO: factor this into a wrapper method
if routing_metadata:
deprecation = routing_metadata.get('deprecation', None)
# this will no-op if there's no deprecation metadata for this plugin
plugin_load_context.record_deprecation(fq_name, deprecation, acr.collection)
tombstone = routing_metadata.get('tombstone', None)
# FIXME: clean up text gen
if tombstone:
removal_date = tombstone.get('removal_date')
removal_version = tombstone.get('removal_version')
warning_text = tombstone.get('warning_text') or '{0} has been removed.'.format(fq_name)
removed_msg = display.get_deprecation_message(msg=warning_text, version=removal_version,
date=removal_date, removed=True,
collection_name=acr.collection)
plugin_load_context.removal_date = removal_date
plugin_load_context.removal_version = removal_version
plugin_load_context.resolved = True
plugin_load_context.exit_reason = removed_msg
raise AnsiblePluginRemovedError(removed_msg, plugin_load_context=plugin_load_context)
redirect = routing_metadata.get('redirect', None)
if redirect:
# FIXME: remove once this is covered in debug or whatever
display.vv("redirecting (type: {0}) {1} to {2}".format(plugin_type, fq_name, redirect))
return plugin_load_context.redirect(redirect)
# TODO: non-FQCN case, do we support `.` prefix for current collection, assume it with no dots, require it for subdirs in current, or ?
n_resource = to_native(acr.resource, errors='strict')
# we want this before the extension is added
full_name = '{0}.{1}'.format(acr.n_python_package_name, n_resource)
if extension:
n_resource += extension
pkg = sys.modules.get(acr.n_python_package_name)
if not pkg:
# FIXME: there must be cheaper/safer way to do this
try:
pkg = import_module(acr.n_python_package_name)
except ImportError:
return plugin_load_context.nope('Python package {0} not found'.format(acr.n_python_package_name))
pkg_path = os.path.dirname(pkg.__file__)
n_resource_path = os.path.join(pkg_path, n_resource)
# FIXME: and is file or file link or ...
if os.path.exists(n_resource_path):
return plugin_load_context.resolve(
full_name, to_text(n_resource_path), acr.collection, 'found exact match for {0} in {1}'.format(full_name, acr.collection))
if extension:
# the request was extension-specific, don't try for an extensionless match
return plugin_load_context.nope('no match for {0} in {1}'.format(to_text(n_resource), acr.collection))
# look for any matching extension in the package location (sans filter)
found_files = [f
for f in glob.iglob(os.path.join(pkg_path, n_resource) + '.*')
if os.path.isfile(f) and not f.endswith(C.MODULE_IGNORE_EXTS)]
if not found_files:
return plugin_load_context.nope('failed fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
if len(found_files) > 1:
# TODO: warn?
pass
return plugin_load_context.resolve(
full_name, to_text(found_files[0]), acr.collection, 'found fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
''' Find a plugin named name '''
result = self.find_plugin_with_context(name, mod_type, ignore_deprecated, check_aliases, collection_list)
if result.resolved and result.plugin_resolved_path:
return result.plugin_resolved_path
return None
def find_plugin_with_context(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
''' Find a plugin named name, returning contextual info about the load, recursively resolving redirection '''
plugin_load_context = PluginLoadContext()
plugin_load_context.original_name = name
while True:
result = self._resolve_plugin_step(name, mod_type, ignore_deprecated, check_aliases, collection_list, plugin_load_context=plugin_load_context)
if result.pending_redirect:
if result.pending_redirect in result.redirect_list:
raise AnsiblePluginCircularRedirect('plugin redirect loop resolving {0} (path: {1})'.format(result.original_name, result.redirect_list))
name = result.pending_redirect
result.pending_redirect = None
plugin_load_context = result
else:
break
# TODO: smuggle these to the controller when we're in a worker, reduce noise from normal things like missing plugin packages during collection search
if plugin_load_context.error_list:
display.warning("errors were encountered during the plugin load for {0}:\n{1}".format(name, plugin_load_context.error_list))
# TODO: display/return import_error_list? Only useful for forensics...
# FIXME: store structured deprecation data in PluginLoadContext and use display.deprecate
# if plugin_load_context.deprecated and C.config.get_config_value('DEPRECATION_WARNINGS'):
# for dw in plugin_load_context.deprecation_warnings:
# # TODO: need to smuggle these to the controller if we're in a worker context
# display.warning('[DEPRECATION WARNING] ' + dw)
return plugin_load_context
# FIXME: name bikeshed
def _resolve_plugin_step(self, name, mod_type='', ignore_deprecated=False,
check_aliases=False, collection_list=None, plugin_load_context=PluginLoadContext()):
if not plugin_load_context:
raise ValueError('A PluginLoadContext is required')
plugin_load_context.redirect_list.append(name)
plugin_load_context.resolved = False
global _PLUGIN_FILTERS
if name in _PLUGIN_FILTERS[self.package]:
plugin_load_context.exit_reason = '{0} matched a defined plugin filter'.format(name)
return plugin_load_context
if mod_type:
suffix = mod_type
elif self.class_name:
# Ansible plugins that run in the controller process (most plugins)
suffix = '.py'
else:
# Only Ansible Modules. Ansible modules can be any executable so
# they can have any suffix
suffix = ''
# FIXME: need this right now so we can still load shipped PS module_utils- come up with a more robust solution
if (AnsibleCollectionRef.is_valid_fqcr(name) or collection_list) and not name.startswith('Ansible'):
if '.' in name or not collection_list:
candidates = [name]
else:
candidates = ['{0}.{1}'.format(c, name) for c in collection_list]
for candidate_name in candidates:
try:
plugin_load_context.load_attempts.append(candidate_name)
# HACK: refactor this properly
if candidate_name.startswith('ansible.legacy'):
# 'ansible.legacy' refers to the plugin finding behavior used before collections existed.
# They need to search 'library' and the various '*_plugins' directories in order to find the file.
plugin_load_context = self._find_plugin_legacy(name.replace('ansible.legacy.', '', 1),
plugin_load_context, ignore_deprecated, check_aliases, suffix)
else:
# 'ansible.builtin' should be handled here. This means only internal, or builtin, paths are searched.
plugin_load_context = self._find_fq_plugin(candidate_name, suffix, plugin_load_context=plugin_load_context)
if candidate_name != plugin_load_context.original_name and candidate_name not in plugin_load_context.redirect_list:
plugin_load_context.redirect_list.append(candidate_name)
if plugin_load_context.resolved or plugin_load_context.pending_redirect: # if we got an answer or need to chase down a redirect, return
return plugin_load_context
except (AnsiblePluginRemovedError, AnsiblePluginCircularRedirect, AnsibleCollectionUnsupportedVersionError):
# these are generally fatal, let them fly
raise
except ImportError as ie:
plugin_load_context.import_error_list.append(ie)
except Exception as ex:
# FIXME: keep actual errors, not just assembled messages
plugin_load_context.error_list.append(to_native(ex))
if plugin_load_context.error_list:
display.debug(msg='plugin lookup for {0} failed; errors: {1}'.format(name, '; '.join(plugin_load_context.error_list)))
plugin_load_context.exit_reason = 'no matches found for {0}'.format(name)
return plugin_load_context
# if we got here, there's no collection list and it's not an FQ name, so do legacy lookup
return self._find_plugin_legacy(name, plugin_load_context, ignore_deprecated, check_aliases, suffix)
def _find_plugin_legacy(self, name, plugin_load_context, ignore_deprecated=False, check_aliases=False, suffix=None):
"""Search library and various *_plugins paths in order to find the file.
This was behavior prior to the existence of collections.
"""
plugin_load_context.resolved = False
if check_aliases:
name = self.aliases.get(name, name)
# The particular cache to look for modules within. This matches the
# requested mod_type
pull_cache = self._plugin_path_cache[suffix]
try:
path_with_context = pull_cache[name]
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context.resolved = True
return plugin_load_context
except KeyError:
# Cache miss. Now let's find the plugin
pass
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator.
# We can use _get_paths_with_context() since add_directory() forces a cache refresh.
for path_with_context in (p for p in self._get_paths_with_context() if p.path not in self._searched_paths and os.path.isdir(to_bytes(p.path))):
path = path_with_context.path
b_path = to_bytes(path)
display.debug('trying %s' % path)
plugin_load_context.load_attempts.append(path)
internal = path_with_context.internal
try:
full_paths = (os.path.join(b_path, f) for f in os.listdir(b_path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % to_text(e))
for full_path in (to_native(f) for f in full_paths if os.path.isfile(f) and not f.endswith(b'__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
# FIXME: I believe this is only correct for modules and module_utils.
# For all other plugins we want .pyc and .pyo should be valid
if any(full_path.endswith(x) for x in C.MODULE_IGNORE_EXTS):
continue
splitname = os.path.splitext(full_name)
base_name = splitname[0]
try:
extension = splitname[1]
except IndexError:
extension = ''
# everything downstream expects unicode
full_path = to_text(full_path, errors='surrogate_or_strict')
# Module found, now enter it into the caches that match this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = PluginPathContext(full_path, internal)
if full_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][full_name] = PluginPathContext(full_path, internal)
if base_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][base_name] = PluginPathContext(full_path, internal)
if full_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][full_name] = PluginPathContext(full_path, internal)
self._searched_paths.add(path)
try:
path_with_context = pull_cache[name]
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context.resolved = True
return plugin_load_context
except KeyError:
# Didn't find the plugin in this directory. Load modules from the next one
pass
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
alias_name = '_' + name
# We've already cached all the paths at this point
if alias_name in pull_cache:
path_with_context = pull_cache[alias_name]
if not ignore_deprecated and not os.path.islink(path_with_context.path):
# FIXME: this is not always the case, some are just aliases
display.deprecated('%s is kept for backwards compatibility but usage is discouraged. ' # pylint: disable=ansible-deprecated-no-version
'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = alias_name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context.resolved = True
return plugin_load_context
# last ditch, if it's something that can be redirected, look for a builtin redirect before giving up
candidate_fqcr = 'ansible.builtin.{0}'.format(name)
if '.' not in name and AnsibleCollectionRef.is_valid_fqcr(candidate_fqcr):
return self._find_fq_plugin(fq_name=candidate_fqcr, extension=suffix, plugin_load_context=plugin_load_context)
return plugin_load_context.nope('{0} is not eligible for last-chance resolution'.format(name))
def has_plugin(self, name, collection_list=None):
''' Checks if a plugin named name exists '''
try:
return self.find_plugin(name, collection_list=collection_list) is not None
except Exception as ex:
if isinstance(ex, AnsibleError):
raise
# log and continue, likely an innocuous type/package loading failure in collections import
display.debug('has_plugin error: {0}'.format(to_text(ex)))
__contains__ = has_plugin
def _load_module_source(self, name, path):
# avoid collisions across plugins
if name.startswith('ansible_collections.'):
full_name = name
else:
full_name = '.'.join([self.package, name])
if full_name in sys.modules:
# Avoids double loading, See https://github.com/ansible/ansible/issues/13110
return sys.modules[full_name]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if imp is None:
spec = importlib.util.spec_from_file_location(to_native(full_name), to_native(path))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[full_name] = module
else:
with open(to_bytes(path), 'rb') as module_file:
# to_native is used here because imp.load_source's path is for tracebacks and python's traceback formatting uses native strings
module = imp.load_source(to_native(full_name), to_native(path), module_file)
return module
def _update_object(self, obj, name, path, redirected_names=None):
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
setattr(obj, '_load_name', name)
setattr(obj, '_redirected_names', redirected_names or [])
def get(self, name, *args, **kwargs):
return self.get_with_context(name, *args, **kwargs).object
def get_with_context(self, name, *args, **kwargs):
''' instantiates a plugin of the given name using arguments '''
found_in_cache = True
class_only = kwargs.pop('class_only', False)
collection_list = kwargs.pop('collection_list', None)
if name in self.aliases:
name = self.aliases[name]
plugin_load_context = self.find_plugin_with_context(name, collection_list=collection_list)
if not plugin_load_context.resolved or not plugin_load_context.plugin_resolved_path:
# FIXME: this is probably an error (eg removed plugin)
return get_with_context_result(None, plugin_load_context)
name = plugin_load_context.plugin_resolved_name
path = plugin_load_context.plugin_resolved_path
redirected_names = plugin_load_context.redirect_list or []
if path not in self._module_cache:
self._module_cache[path] = self._load_module_source(name, path)
self._load_config_defs(name, self._module_cache[path], path)
found_in_cache = False
obj = getattr(self._module_cache[path], self.class_name)
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
return get_with_context_result(None, plugin_load_context)
if not issubclass(obj, plugin_class):
return get_with_context_result(None, plugin_load_context)
# FIXME: update this to use the load context
self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
# A plugin may need to use its _load_name in __init__ (for example, to set
# or get options from config), so update the object before using the constructor
instance = object.__new__(obj)
self._update_object(instance, name, path, redirected_names)
obj.__init__(instance, *args, **kwargs)
obj = instance
except TypeError as e:
if "abstract" in e.args[0]:
# Abstract Base Class. The found plugin file does not
# fully implement the defined interface.
return get_with_context_result(None, plugin_load_context)
raise
self._update_object(obj, name, path, redirected_names)
return get_with_context_result(obj, plugin_load_context)
def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None):
''' formats data to display debug info for plugin loading, also avoids processing unless really needed '''
if C.DEFAULT_DEBUG:
msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path)
if len(searched_paths) > 1:
msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths))
if found_in_cache or class_only:
msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only)
display.debug(msg)
def all(self, *args, **kwargs):
'''
Iterate through all plugins of this type
A plugin loader is initialized with a specific type. This function is an iterator returning
all of the plugins of that type to the caller.
:kwarg path_only: If this is set to True, then we return the paths to where the plugins reside
instead of an instance of the plugin. This conflicts with class_only and both should
not be set.
:kwarg class_only: If this is set to True then we return the python class which implements
a plugin rather than an instance of the plugin. This conflicts with path_only and both
should not be set.
:kwarg _dedupe: By default, we only return one plugin per plugin name. Deduplication happens
in the same way as the :meth:`get` and :meth:`find_plugin` methods resolve which plugin
should take precedence. If this is set to False, then we return all of the plugins
found, including those with duplicate names. In the case of duplicates, the order in
which they are returned is the one that would take precedence first, followed by the
others in decreasing precedence order. This should only be used by subclasses which
want to manage their own deduplication of the plugins.
:*args: Any extra arguments are passed to each plugin when it is instantiated.
:**kwargs: Any extra keyword arguments are passed to each plugin when it is instantiated.
'''
# TODO: Change the signature of this method to:
# def all(return_type='instance', args=None, kwargs=None):
# if args is None: args = []
# if kwargs is None: kwargs = {}
# return_type can be instance, class, or path.
# These changes will mean that plugin parameters won't conflict with our params and
# will also make it impossible to request both a path and a class at the same time.
#
# Move _dedupe to be a class attribute, CUSTOM_DEDUPE, with subclasses for filters and
# tests setting it to True
global _PLUGIN_FILTERS
dedupe = kwargs.pop('_dedupe', True)
path_only = kwargs.pop('path_only', False)
class_only = kwargs.pop('class_only', False)
# Having both path_only and class_only is a coding bug
if path_only and class_only:
raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
all_matches = []
found_in_cache = True
for i in self._get_paths():
all_matches.extend(glob.glob(to_native(os.path.join(i, "*.py"))))
loaded_modules = set()
for path in sorted(all_matches, key=os.path.basename):
name = os.path.splitext(path)[0]
basename = os.path.basename(name)
if basename == '__init__' or basename in _PLUGIN_FILTERS[self.package]:
continue
if dedupe and basename in loaded_modules:
continue
loaded_modules.add(basename)
if path_only:
yield path
continue
if path not in self._module_cache:
try:
if self.subdir in ('filter_plugins', 'test_plugins'):
# filter and test plugin files can contain multiple plugins
# they must have a unique python module name to prevent them from shadowing each other
full_name = '{0}_{1}'.format(abs(hash(path)), basename)
else:
full_name = basename
module = self._load_module_source(full_name, path)
self._load_config_defs(basename, module, path)
except Exception as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
self._module_cache[path] = module
found_in_cache = False
try:
obj = getattr(self._module_cache[path], self.class_name)
except AttributeError as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
continue
if not issubclass(obj, plugin_class):
continue
self._display_plugin_load(self.class_name, basename, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
obj = obj(*args, **kwargs)
except TypeError as e:
display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))
self._update_object(obj, basename, path)
yield obj
class Jinja2Loader(PluginLoader):
"""
PluginLoader optimized for Jinja2 plugins
The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
The way the calling code is setup, we need to do a few things differently in the all() method
"""
def find_plugin(self, name, collection_list=None):
# Nothing using Jinja2Loader use this method. We can't use the base class version because
# we deduplicate differently than the base class
if '.' in name:
return super(Jinja2Loader, self).find_plugin(name, collection_list=collection_list)
raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
def get(self, name, *args, **kwargs):
# Nothing using Jinja2Loader use this method. We can't use the base class version because
# we deduplicate differently than the base class
if '.' in name:
return super(Jinja2Loader, self).get(name, *args, **kwargs)
raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
def all(self, *args, **kwargs):
"""
Differences with :meth:`PluginLoader.all`:
* We do not deduplicate ansible plugin names. This is because we don't care about our
plugin names, here. We care about the names of the actual jinja2 plugins which are inside
of our plugins.
* We reverse the order of the list of plugins compared to other PluginLoaders. This is
because of how calling code chooses to sync the plugins from the list. It adds all the
Jinja2 plugins from one of our Ansible plugins into a dict. Then it adds the Jinja2
plugins from the next Ansible plugin, overwriting any Jinja2 plugins that had the same
name. This is an encapsulation violation (the PluginLoader should not know about what
calling code does with the data) but we're pushing the common code here. We'll fix
this in the future by moving more of the common code into this PluginLoader.
* We return a list. We could iterate the list instead but that's extra work for no gain because
the API receiving this doesn't care. It just needs an iterable
"""
# We don't deduplicate ansible plugin names. Instead, calling code deduplicates jinja2
# plugin names.
kwargs['_dedupe'] = False
# We have to instantiate a list of all plugins so that we can reverse it. We reverse it so
# that calling code will deduplicate this correctly.
plugins = list(super(Jinja2Loader, self).all(*args, **kwargs))
plugins.reverse()
return plugins
def _load_plugin_filter():
filters = defaultdict(frozenset)
user_set = False
if C.PLUGIN_FILTERS_CFG is None:
filter_cfg = '/etc/ansible/plugin_filters.yml'
else:
filter_cfg = C.PLUGIN_FILTERS_CFG
user_set = True
if os.path.exists(filter_cfg):
with open(filter_cfg, 'rb') as f:
try:
filter_data = from_yaml(f.read())
except Exception as e:
display.warning(u'The plugin filter file, {0} was not parsable.'
u' Skipping: {1}'.format(filter_cfg, to_text(e)))
return filters
try:
version = filter_data['filter_version']
except KeyError:
display.warning(u'The plugin filter file, {0} was invalid.'
u' Skipping.'.format(filter_cfg))
return filters
# Try to convert for people specifying version as a float instead of string
version = to_text(version)
version = version.strip()
if version == u'1.0':
# Modules and action plugins share the same blacklist since the difference between the
# two isn't visible to the users
try:
filters['ansible.modules'] = frozenset(filter_data['module_blacklist'])
except TypeError:
display.warning(u'Unable to parse the plugin filter file {0} as'
u' module_blacklist is not a list.'
u' Skipping.'.format(filter_cfg))
return filters
filters['ansible.plugins.action'] = filters['ansible.modules']
else:
display.warning(u'The plugin filter file, {0} was a version not recognized by this'
u' version of Ansible. Skipping.'.format(filter_cfg))
else:
if user_set:
display.warning(u'The plugin filter file, {0} does not exist.'
u' Skipping.'.format(filter_cfg))
# Specialcase the stat module as Ansible can run very few things if stat is blacklisted.
if 'stat' in filters['ansible.modules']:
raise AnsibleError('The stat module was specified in the module blacklist file, {0}, but'
' Ansible will not function without the stat module. Please remove stat'
' from the blacklist.'.format(to_native(filter_cfg)))
return filters
# since we don't want the actual collection loader understanding metadata, we'll do it in an event handler
def _on_collection_load_handler(collection_name, collection_path):
display.vvvv(to_text('Loading collection {0} from {1}'.format(collection_name, collection_path)))
collection_meta = _get_collection_metadata(collection_name)
try:
if not _does_collection_support_ansible_version(collection_meta.get('requires_ansible', ''), ansible_version):
mismatch_behavior = C.config.get_config_value('COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH')
message = 'Collection {0} does not support Ansible version {1}'.format(collection_name, ansible_version)
if mismatch_behavior == 'warning':
display.warning(message)
elif mismatch_behavior == 'error':
raise AnsibleCollectionUnsupportedVersionError(message)
except AnsibleError:
raise
except Exception as ex:
display.warning('Error parsing collection metadata requires_ansible value from collection {0}: {1}'.format(collection_name, ex))
def _does_collection_support_ansible_version(requirement_string, ansible_version):
if not requirement_string:
return True
if not SpecifierSet:
display.warning('packaging Python module unavailable; unable to validate collection Ansible version requirements')
return True
ss = SpecifierSet(requirement_string)
# ignore prerelease/postrelease/beta/dev flags for simplicity
base_ansible_version = Version(ansible_version).base_version
return ss.contains(base_ansible_version)
def _configure_collection_loader():
if AnsibleCollectionConfig.collection_finder:
display.warning('AnsibleCollectionFinder has already been configured')
return
finder = _AnsibleCollectionFinder(C.config.get_config_value('COLLECTIONS_PATHS'), C.config.get_config_value('COLLECTIONS_SCAN_SYS_PATH'))
finder._install()
# this should succeed now
AnsibleCollectionConfig.on_collection_load += _on_collection_load_handler
# TODO: All of the following is initialization code It should be moved inside of an initialization
# function which is called at some point early in the ansible and ansible-playbook CLI startup.
_PLUGIN_FILTERS = _load_plugin_filter()
_configure_collection_loader()
# doc fragments first
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.plugins.doc_fragments',
C.DOC_FRAGMENT_PLUGIN_PATH,
'doc_fragments',
)
action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins',
required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins',
)
connection_loader = PluginLoader(
'Connection',
'ansible.plugins.connection',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library',
)
module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
# NB: dedicated loader is currently necessary because PS module_utils expects "with subdir" lookup where
# regular module_utils doesn't. This can be revisited once we have more granular loaders.
ps_module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins',
required_base_class='LookupBase',
)
filter_loader = Jinja2Loader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins',
)
test_loader = Jinja2Loader(
'TestModule',
'ansible.plugins.test',
C.DEFAULT_TEST_PLUGIN_PATH,
'test_plugins'
)
strategy_loader = PluginLoader(
'StrategyModule',
'ansible.plugins.strategy',
C.DEFAULT_STRATEGY_PLUGIN_PATH,
'strategy_plugins',
required_base_class='StrategyBase',
)
terminal_loader = PluginLoader(
'TerminalModule',
'ansible.plugins.terminal',
C.DEFAULT_TERMINAL_PLUGIN_PATH,
'terminal_plugins',
required_base_class='TerminalBase'
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins',
)
cliconf_loader = PluginLoader(
'Cliconf',
'ansible.plugins.cliconf',
C.DEFAULT_CLICONF_PLUGIN_PATH,
'cliconf_plugins',
required_base_class='CliconfBase'
)
netconf_loader = PluginLoader(
'Netconf',
'ansible.plugins.netconf',
C.DEFAULT_NETCONF_PLUGIN_PATH,
'netconf_plugins',
required_base_class='NetconfBase'
)
inventory_loader = PluginLoader(
'InventoryModule',
'ansible.plugins.inventory',
C.DEFAULT_INVENTORY_PLUGIN_PATH,
'inventory_plugins'
)
httpapi_loader = PluginLoader(
'HttpApi',
'ansible.plugins.httpapi',
C.DEFAULT_HTTPAPI_PLUGIN_PATH,
'httpapi_plugins',
required_base_class='HttpApiBase',
)
become_loader = PluginLoader(
'BecomeModule',
'ansible.plugins.become',
C.BECOME_PLUGIN_PATH,
'become_plugins'
)
| gpl-3.0 |
hguochen/mediapop | mediapop/mediapop/settings/local.py | 58 | 1632 | """Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION
| mit |
lewisc/spark-tk | regression-tests/generatedata/movie_user_5ratings.py | 14 | 1717 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/usr/bin/python
import sys
import random
def get_row_type():
num = random.randint(1,5)
if num in range(1,4):
return "tr"
elif num is 4:
return "va"
elif num is 5:
return "te"
else:
print "get_row_type() returned ", num
sys.exit()
def main():
if len(sys.argv) != 3:
print "Usage: ./movie_user_5ratings.py number_of_movies number_of_user "
sys.exit()
number_of_movies = int(sys.argv[1])
number_of_user = int(sys.argv[2])
f = open("movie_user_5ratings.csv", 'w')
for mov in range(1, number_of_movies):
for user in range(1, number_of_user):
row_type = get_row_type()
weight = random.randint(1,5)
#print '%d,%s,%d,%d,%s' % (-mov,'r',user,weight,row_type)
#print '%d,%s,%d,%d,%s' % (user,'l',-mov,weight,row_type)
f.write('%d,%s,%d,%d,%s' % (-mov,'r',user,weight,row_type) + "\n")
f.write('%d,%s,%d,%d,%s' % (user,'l',-mov,weight,row_type) + "\n")
f.close()
if __name__ == "__main__":
main()
| apache-2.0 |
wazeerzulfikar/scikit-learn | examples/model_selection/plot_grid_search_digits.py | 56 | 2761 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
sudarkoff/ansible | contrib/inventory/proxmox.py | 74 | 5579 | #!/usr/bin/env python
# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <gauthierl@lapth.cnrs.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib
try:
import json
except ImportError:
import simplejson as json
import os
import sys
from optparse import OptionParser
from six import iteritems
from ansible.module_utils.urls import open_url
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
class ProxmoxQemu(dict):
def get_variables(self):
variables = {}
for key, value in iteritems(self):
variables['proxmox_' + key] = value
return variables
class ProxmoxQemuList(list):
def __init__(self, data=[]):
for item in data:
self.append(ProxmoxQemu(item))
def get_names(self):
return [qemu['name'] for qemu in self if qemu['template'] != 1]
def get_by_name(self, name):
results = [qemu for qemu in self if qemu['name'] == name]
return results[0] if len(results) > 0 else None
def get_variables(self):
variables = {}
for qemu in self:
variables[qemu['name']] = qemu.get_variables()
return variables
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
class ProxmoxPool(dict):
def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object):
def __init__(self, options):
self.options = options
self.credentials = None
if not options.url:
raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
elif not options.username:
raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
elif not options.password:
raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
def auth(self):
request_path = '{}api2/json/access/ticket'.format(self.options.url)
request_params = urllib.urlencode({
'username': self.options.username,
'password': self.options.password,
})
data = json.load(open_url(request_path, data=request_params))
self.credentials = {
'ticket': data['data']['ticket'],
'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
}
def get(self, url, data=None):
request_path = '{}{}'.format(self.options.url, url)
headers = {'Cookie': 'PVEAuthCookie={}'.format(self.credentials['ticket'])}
request = open_url(request_path, data=data, headers=headers)
response = json.load(request)
return response['data']
def nodes(self):
return ProxmoxNodeList(self.get('api2/json/nodes'))
def node_qemu(self, node):
return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node)))
def pools(self):
return ProxmoxPoolList(self.get('api2/json/pools'))
def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
def main_list(options):
results = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
}
}
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
results['all']['hosts'] += qemu_list.get_names()
results['_meta']['hostvars'].update(qemu_list.get_variables())
# pools
for pool in proxmox_api.pools().get_names():
results[pool] = {
'hosts': proxmox_api.pool(pool).get_members_name(),
}
return results
def main_host(options):
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
qemu = qemu_list.get_by_name(options.host)
if qemu:
return qemu.get_variables()
return {}
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list")
parser.add_option('--host', dest="host")
parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
(options, args) = parser.parse_args()
if options.list:
data = main_list(options)
elif options.host:
data = main_host(options)
else:
parser.print_help()
sys.exit(1)
indent = None
if options.pretty:
indent = 2
print(json.dumps(data, indent=indent))
if __name__ == '__main__':
main()
| gpl-3.0 |
myangeline/pygame | flightgame/plane.py | 1 | 1930 | # _*_ coding:utf-8 _*_
import random
import pygame
__author__ = 'Administrator'
class Plane():
def __init__(self, width, height, value, image, type):
self.width = width
self.height = height
self.value = value
self.image = image
self.type = type
self.rect = pygame.Rect(self.image.get_rect())
# self.position = [random.randint(0, 450 - self.width), -self.height / 2]
class Hero(Plane):
def __init__(self, width, height, value, image, type, position):
Plane.__init__(self, width, height, value, image, type)
self.position = position
self.keys = [False, False, False, False]
def deal_keydown(self, event):
if event.key == pygame.K_w:
self.keys[0] = True
elif event.key == pygame.K_a:
self.keys[1] = True
elif event.key == pygame.K_s:
self.keys[2] = True
elif event.key == pygame.K_d:
self.keys[3] = True
def deal_keyup(self, event):
if event.key == pygame.K_w:
self.keys[0] = False
elif event.key == pygame.K_a:
self.keys[1] = False
elif event.key == pygame.K_s:
self.keys[2] = False
elif event.key == pygame.K_d:
self.keys[3] = False
def move(self, step=5):
if self.keys[0]:
self.position[1] -= step
elif self.keys[2]:
self.position[1] += step
if self.keys[1]:
self.position[0] -= step
elif self.keys[3]:
self.position[0] += step
class EnemyPlane(Plane):
def __init__(self, width, height, value, image, type):
Plane.__init__(self, width, height, value, image, type)
self.position = [random.randint(0, 450 - self.width), -self.height / 2]
def move(self, step=3):
if self.position[1] > 650:
self.value = 0
self.position[1] += step | apache-2.0 |
ethguo/xpify | legacy/main - LEGACY (S).py | 1 | 34036 | #!/usr/bin/env python
###
# XPify - An application to gamify learning.
# Version: DEV
# Timestamp: NULL
#
# Written by Ethan Guo
# Using Google App Engine and the webapp2 framework
#
###
### IMPORTS ###
import time
import uuid
import cgi
import webapp2
from google.appengine.ext.webapp import util
from google.appengine.ext import db
from google.appengine.ext import ndb
from google.appengine.api import users
### HELPER FUNCTIONS ###
contentHTML = '''
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris et arcu congue, placerat
massa ut, vestibulum enim. Cras id tempus erat. Ut enim leo, fermentum eu elementum vel,
varius in nisl. Suspendisse dapibus suscipit mauris, eget ornare justo congue at. Fusce
hendrerit ante ut dictum tincidunt. Morbi nec metus sit amet odio elementum consectetur in eu
mi. Proin sed velit sapien. Proin auctor feugiat leo vel gravida. Nullam neque enim, hendrerit
fringilla velit non, fermentum pretium tortor. Sed id congue felis. Vestibulum laoreet a eros
a porttitor.</p>
<p>Sed fringilla, ipsum a congue tincidunt, odio purus lobortis lacus, mollis dictum turpis
nisi quis quam. Fusce in interdum diam. Mauris tempus dolor ut viverra pellentesque. Sed id
condimentum ligula, et venenatis augue. Sed gravida scelerisque metus, ut cursus enim
elementum nec. Mauris euismod tincidunt felis, ut auctor erat malesuada in. Sed aliquam turpis
sit amet velit malesuada vehicula. Nulla fermentum nibh sapien, et convallis erat eleifend at.
Phasellus accumsan mattis libero eu porttitor.</p>
<p>Duis ac egestas nulla, egestas tincidunt enim. Nulla ultrices cursus ipsum vitae vehicula.
Donec ac libero bibendum, semper magna vitae, vestibulum tellus. Suspendisse adipiscing tellus
ut tempor luctus. Praesent accumsan, mi vel scelerisque mattis, purus nisi ultricies ante, sit
amet malesuada nisl sapien eu erat. Ut iaculis mauris eget diam viverra, quis lacinia urna
elementum. Donec non ultrices nunc. Etiam quis adipiscing nunc. Proin luctus tellus in nulla
pellentesque, vel placerat nisi volutpat. Nullam at urna diam. Praesent congue tortor ut nisi
lacinia ornare. Proin tristique tincidunt posuere. Morbi ut elit porttitor, convallis nisl in,
posuere augue. Nullam fermentum urna odio, sagittis dapibus lorem elementum et. Pellentesque
habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Phasellus
semper feugiat erat nec semper.</p>
<p>In ut laoreet orci. Curabitur varius dui id lorem dignissim sagittis. Mauris imperdiet
libero nec orci gravida, a vulputate massa adipiscing. Nullam auctor nulla lorem, sit amet
rhoncus risus scelerisque sed. Duis nisl elit, consectetur in dictum a, varius ac elit. Nunc
ut tempus sapien. Sed sollicitudin, eros in semper ultricies, est sem facilisis urna, a
eleifend erat lorem sed orci. Proin pretium dolor ac tellus imperdiet, in mattis libero
porttitor. Nullam lorem velit, semper at lobortis sed, vestibulum sed nulla. Morbi lacinia,
nunc non luctus lacinia, sem elit laoreet elit, at dapibus nisi nisl ut purus. Maecenas vitae
imperdiet arcu, eget suscipit nunc. Class aptent taciti sociosqu ad litora torquent per
conubia nostra, per inceptos himenaeos. Maecenas sollicitudin, tellus nec blandit pharetra,
felis arcu dignissim nibh, vel pellentesque mi sem a odio. Proin luctus blandit venenatis.
Suspendisse ac gravida diam. Fusce blandit volutpat urna in laoreet.</p>
<p>Praesent ac tortor non est accumsan vehicula. Morbi mi felis, vulputate consequat viverra
sit amet, vehicula ac turpis. Aenean mattis blandit risus gravida gravida. Etiam hendrerit
ipsum arcu, et facilisis elit pharetra in. Sed cursus lobortis elit, ac imperdiet risus
vehicula id. Quisque ac dui id libero sagittis viverra. Praesent semper dictum venenatis.
Donec sed diam et velit sodales semper. Phasellus sed lacinia est. Nam sit amet dolor urna.
Nunc sagittis vitae sem eu posuere. Vestibulum in sodales lectus. Praesent rhoncus sit amet
risus non interdum.</p>
<p>Praesent ullamcorper et metus sed hendrerit. Etiam vitae sem nunc. Cras consequat in tortor
in sollicitudin. Aliquam quam enim, vestibulum a ultricies in, commodo et lectus. Suspendisse
potenti. Maecenas posuere vitae libero sed molestie. In et ligula quis lectus viverra cursus
eu quis turpis. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere
cubilia Curae; Nullam in purus dolor. Morbi sit amet sagittis dolor. Integer purus dolor,
hendrerit ac nisi blandit, tempor aliquam nunc. Nunc malesuada pharetra mollis. Morbi ultrices
velit sagittis semper semper. Pellentesque habitant morbi tristique senectus et netus et
malesuada fames ac turpis egestas. Maecenas nisi est, varius ac est vitae, hendrerit mattis
magna. Praesent libero augue, consectetur eu accumsan vulputate, feugiat at nunc.</p>
<p>Donec vel dolor vel lorem laoreet consequat. Integer volutpat neque interdum neque
fringilla, a tincidunt eros varius. Vivamus eu auctor orci, ac tempus sem. Donec libero ipsum,
varius vitae libero et, porta euismod dui. Mauris non diam id nisl blandit dignissim a quis
tellus. Etiam semper pellentesque felis, non tincidunt augue mollis vel. Vivamus tincidunt
elit lacus, eget congue ante placerat nec. Ut vel enim sit amet odio gravida rhoncus.
Vestibulum fermentum sed nibh adipiscing placerat. Proin eu urna nulla. Quisque eleifend
tristique justo, consectetur accumsan purus imperdiet id. Duis vehicula enim tellus, non
dapibus augue elementum id. Aenean ut aliquet nibh, in pellentesque elit. Pellentesque vitae
molestie quam. Maecenas quis ultrices quam, eget sodales est. Nunc porttitor orci vitae
bibendum venenatis.</p>
<p>Maecenas molestie eros nisl, sed dictum lectus consectetur et. Phasellus auctor libero sed
tristique mattis. Ut adipiscing libero quis rutrum ullamcorper. Mauris eget nisl vitae diam
fringilla posuere. Quisque sed mi tristique, fringilla odio eget, sollicitudin magna. Sed
viverra libero congue nisi iaculis, vitae hendrerit velit aliquet. Aliquam sit amet faucibus
lectus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis
egestas. Mauris iaculis neque ornare mauris suscipit, at tincidunt dui ornare. Sed id
vestibulum quam. Praesent at lacus ut tortor accumsan rutrum. Nulla vulputate sit amet eros ac
commodo. Morbi sit amet nibh ligula. Nunc quis mi vel tortor hendrerit tempus. Cras quis
vehicula metus.</p>
'''
def composePage(content, style=0):
return '''
<!DOCTYPE html>
<html>
<head>
<title>XPify - BETA</title>
<link rel="stylesheet" type="text/css" href="/static/css/theme/jquery-ui-1.10.4.custom.min.css" />
<link rel="stylesheet" type="text/css" href="/static/css/styles-global.css" />
<link rel="stylesheet" type="text/css" href="/static/css/styles%d.css" />
<link rel="shortcut icon" href="/static/images/favicon.ico" />
<link rel="apple-touch-icon" sizes="57x57" href="/static/images/apple-touch-icon-57x57.png" />
<link rel="apple-touch-icon" sizes="114x114" href="/static/images/apple-touch-icon-114x114.png" />
<link rel="apple-touch-icon" sizes="72x72" href="/static/images/apple-touch-icon-72x72.png" />
<link rel="apple-touch-icon" sizes="144x144" href="/static/images/apple-touch-icon-144x144.png" />
<link rel="apple-touch-icon" sizes="60x60" href="/static/images/apple-touch-icon-60x60.png" />
<link rel="apple-touch-icon" sizes="120x120" href="/static/images/apple-touch-icon-120x120.png" />
<link rel="apple-touch-icon" sizes="76x76" href="/static/images/apple-touch-icon-76x76.png" />
<link rel="apple-touch-icon" sizes="152x152" href="/static/images/apple-touch-icon-152x152.png" />
<link rel="icon" type="image/png" href="/static/images/favicon-196x196.png" sizes="196x196" />
<link rel="icon" type="image/png" href="/static/images/favicon-160x160.png" sizes="160x160" />
<link rel="icon" type="image/png" href="/static/images/favicon-96x96.png" sizes="96x96" />
<link rel="icon" type="image/png" href="/static/images/favicon-16x16.png" sizes="16x16" />
<link rel="icon" type="image/png" href="/static/images/favicon-32x32.png" sizes="32x32" />
<meta name="msapplication-TileColor" content="#603cba" />
<meta name="msapplication-TileImage" content="/static/images/mstile-144x144.png" />
<meta name="msapplication-square70x70logo" content="/static/images/mstile-70x70.png" />
<meta name="msapplication-square144x144logo" content="/static/images/mstile-144x144.png" />
<meta name="msapplication-square150x150logo" content="/static/images/mstile-150x150.png" />
<meta name="msapplication-square310x310logo" content="/static/images/mstile-310x310.png" />
<meta name="msapplication-wide310x150logo" content="/static/images/mstile-310x150.png" />
<script type="text/javascript" src="/static/js/jquery-1.10.2.min.js"></script>
<script type="text/javascript" src="/static/js/jquery-ui-1.10.4.custom.min.js"></script>
<script type="text/javascript" src="/static/js/portlets.js"></script>
</head>
%s
</html>
''' %(style, content)
def getUserEntry(gUser):
results = ndb.gql("SELECT __key__ FROM EntryUser "
"WHERE userid = :1", gUser.user_id()).get()
return results
def getNextUUID():
longid = uuid.uuid4()
id = str(int(longid))[:8]
return id
### DATABASE ###
class EntryUser(ndb.Model):
def getUsername(self):
return self.fullname or self.userobj.nickname() or "Anonymous"
def getClasses(self):
allClasses = ndb.gql("SELECT students, classid FROM EntryClass "
"WHERE schoolid = :1",
str(self.schoolid)).fetch(200)
classids = []
for class_ in allClasses:
if self.key in class_.students:
classids.append(class_.classid)
try:
return ndb.gql("SELECT __key__ FROM EntryClass "
"WHERE classid IN :1",
classids).fetch(20)
except db.BadQueryError:
return None
def getSchool(self):
return ndb.gql("SELECT __key__ FROM EntrySchool WHERE schoolid = :1", str(self.schoolid)).get()
usertype = ndb.StringProperty(required=True, # FINAL; Role of user
choices=["Student", "Teacher", "Administrator", "Undefined"] )
userobj = ndb.UserProperty(required=True) # User object
userid = ndb.StringProperty(required=True) # FINAL, UNIQUE; Generated from user object at creation
fullname = ndb.StringProperty(default="") # Used to compute $username
username = ndb.ComputedProperty(getUsername) # TYPE: String
school = ndb.ComputedProperty(getSchool) # FINAL; TYPE: Keys(EntrySchool)
schoolid = ndb.IntegerProperty(required=True) # FINAL; $schoolid property of $school
classes = ndb.ComputedProperty(getClasses) # UPLOOK; TYPE: Keys(EntryClass)
class EntryClass(ndb.Model):
def getSchool(self):
return ndb.gql("SELECT __key__ FROM EntrySchool WHERE schoolid = :1", self.schoolid).get()
classname = ndb.StringProperty(required=True) # Descriptive Name
classid = ndb.StringProperty(required=True) # FINAL, UNIQUE; Generated at creation
levels = ndb.JsonProperty() # FORMAT: [(<minXP>, <name>, <description>), ...]
school = ndb.ComputedProperty(getSchool) # FINAL; TYPE: Keys(EntrySchool)
schoolid = ndb.StringProperty(required=True) # FINAL; $schoolid property of $school
teachers = ndb.KeyProperty(repeated=True, # DEFINE; TYPE: Keys(EntryUser)[Teacher]
kind="EntryUser")
students = ndb.KeyProperty(repeated=True, # DEFINE; TYPE: Keys(EntryUser)[Student]
kind="EntryUser")
studentscores = ndb.JsonProperty() # FORMAT: {<studentKey>: <score>}
class EntrySchool(ndb.Model):
def getClasses(self):
classKeys = ndb.gql("SELECT __key__ FROM EntryClass WHERE schoolid = :1", self.schoolid).fetch(200)
schoolname = ndb.StringProperty(required=True) # Descriptive Name
schoolid = ndb.StringProperty(required=True) # FINAL, UNIQUE; Generated at creation
classes = ndb.ComputedProperty(getClasses) # DOWNLOOK; FORMAT: Keys(EntryClass)
### PAGES ###
class PageLanding(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
if gUser:
self.redirect("/my/home/")
else:
self.response.write(composePage('''
<body>
<header>
<div id="logo-wrapper"><img src="/static/images/favicon-196x196.png"
height="196" width="196" alt="[XPify]" /></div>
</header>
<div id="login-wrapper">
<a href="%s">Login</a>
</div>
</body>
''' %(users.create_login_url("/postlogin/")), style=1 ))
class PageMyHome(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
if not gUser:
self.redirect("/login/")
else:
user = getUserEntry(gUser).get()
userClasses = user.classes
classesHTML = ""
if userClasses:
for classKey in userClasses:
class_ = classKey.get()
classesHTML += '<li><a href="/class/challenges?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
classesHTML += '<li class="join-class-plus"><a href="/class/search/"><div>+</div></a></li>'
else:
if user.usertype in ["Teacher", "Administrator"]:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/create/">
Create one!
</a></li>
</div>
'''
else:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/search/">
Join one!
</a></li>
</div>
'''
contentHTML = '''
<div class="column">
<div class="portlet">
<div class="portlet-header">Header 1</div>
<div class="portlet-content">Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris et arcu congue, placerat
massa ut, vestibulum enim. Cras id tempus erat.</div>
</div>
<div class="portlet">
<div class="portlet-header">Header 3</div>
<div class="portlet-content">Sed fringilla, ipsum a congue tincidunt, odio purus lobortis lacus, mollis dictum turpis
nisi quis quam. Fusce in interdum diam. Mauris tempus dolor ut viverra pellentesque.</div>
</div>
</div>
<div class="column">
<div class="portlet">
<div class="portlet-header">Header 2</div>
<div class="portlet-content">Duis ac egestas nulla, egestas tincidunt enim. Nulla ultrices cursus ipsum vitae vehicula.
Donec ac libero bibendum, semper magna vitae, vestibulum tellus.</div>
</div>
</div>
'''
self.response.out.write(composePage('''
<body>
<aside>
<div id="logo-wrapper"><img src="/static/images/favicon-96x96.png"
height="96" width="96" alt="[XPify]" /></div>
<nav id="vert-navbar">
<ul id="vert-navbar-main">
<li id="current"><a href="/my/home/">Home</a></li>
<li><a href="/profile/">Profile</a></li>
</ul>
<span id="vert-navbar-classes-header">Classes</span>
<ul id="vert-navbar-classes-list">
%s
</ul>
</nav>
</aside>
<main>
<div id="content">
%s
</div>
</main>
</body>
''' %(classesHTML, contentHTML) ))
class PageProfile(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
if not gUser:
self.redirect("/login/")
else:
user = getUserEntry(gUser).get()
userClasses = user.classes
classesHTML = ""
if userClasses:
for classKey in userClasses:
class_ = classKey.get()
classesHTML += '<li><a href="/class/challenges?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
classesHTML += '<li class="join-class-plus"><a href="/class/search/"><div>+</div></a></li>'
else:
if user.usertype in ["Teacher", "Administrator"]:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/create/">
Create one!
</a></li>
</div>
'''
else:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/search/">
Join one!
</a></li>
</div>
'''
# Generate Content
tUserId = self.request.get("user")
tSelf = False
if not tUserId:
tUserId = gUser.user_id()
tSelf = True
tUser = ndb.gql("SELECT * FROM EntryUser WHERE userid = :1", tUserId).get()
subvalues = tUser.to_dict()
subvalues["schoolname"] = tUser.school.get().schoolname
if tSelf:
fullnameHTML = '''
<td class="table-name">Full Name</td>
<td class="table-value-editable">
<input type="text" name="fullname" value="%s" />
</td>
''' %(subvalues["fullname"])
else:
fullnameHTML = '''
<td class="table-name">Full Name</td>
<td class="table-value">%s</td>
''' %(subvalues["fullname"])
subvalues["fullnameHTML"] = fullnameHTML
contentHTML = '''
<button id="profile-change-picture" onclick="changePicture()">
<img id="profile-picture" src="/static/images/favicon-196x196.png"
height="196" width="196" alt="Profile Picture" />
</button>
<form id="table-form" action="/profile/" method="POST">
<table id="usertable">
<tr>
%(fullnameHTML)s
</tr> <tr>
<td class="table-name">User Type</td>
<td class="table-value">%(usertype)s</td>
</tr> <tr>
<td class="table-name">User ID</td>
<td class="table-value">%(userid)s</td>
</tr> <tr>
<td class="table-name">Classes</td>
<td class="table-value">%(classes)s</td>
</tr> <!--<tr>
<td class="table-name">School</td>
<td class="table-value">%(schoolname)s</td>
</tr> <tr>
<td class="table-name">School ID</td>
<td class="table-value">%(schoolid)s</td>
</tr>-->
</table>
<br />
<input type="submit" value="Save"/>
</form>
''' %(subvalues)
self.response.out.write(composePage('''
<body>
<aside>
<div id="logo-wrapper"><img src="/static/images/favicon-96x96.png"
height="96" width="96" alt="[XPify]" /></div>
<nav id="vert-navbar">
<ul id="vert-navbar-main">
<li><a href="/my/home/">Home</a></li>
<li id="current"><a href="/profile/">Profile</a></li>
</ul>
<span id="vert-navbar-classes-header">Classes</span>
<ul id="vert-navbar-classes-list">
%s
</ul>
</nav>
</aside>
<main>
<div id="content">
%s
</div>
</main>
</body>
''' %(classesHTML, contentHTML) ))
def post(self):
gUser = users.get_current_user()
if not gUser:
self.redirect("/login/")
else:
user = getUserEntry(gUser)
fullname = self.request.get("fullname")
if fullname:
entry = user.get()
entry.fullname = fullname
entry.put()
self.redirect("/profile/")
class PageClassSearch(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
if not gUser:
self.redirect("/login/")
else:
userKey = ndb.gql("SELECT __key__ FROM EntryUser WHERE userid = :1",
gUser.user_id()).get()
userClasses = userKey.get().classes
classesHTML = ""
if userClasses:
for classKey in userClasses:
class_ = classKey.get()
classesHTML += '<li><a href="/class/challenges?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
classesHTML += '<li id="current"><a href="/class/search/"><div>+</div></a></li>'
else:
if user.usertype in ["Teacher", "Administrator"]:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/create/">
Create one!
</a></li>
</div>
'''
else:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/search/">
Join one!
</a></li>
</div>
'''
# Generate Content
contentHTML = '''
<form action="/class/search" method="GET">
<input type="text" name="query" placeholder="Class name (Coming soon)" disabled="true" size="30"/> OR
<input type="text" name="query" placeholder="Class code" pattern="[0-9]+" />
<input type="submit" value="Search" />
</form>
'''
query = self.request.get("query")
if query:
classid = None
try:
classid = query
except ValueError:
results = ndb.gql("SELECT * FROM EntryClass WHERE classname = :1", classid).fetch(200)
else:
results = ndb.gql("SELECT * FROM EntryClass WHERE classid = :1", classid).fetch(200)
resultsHTML = ""
for result in results:
resultsHTML += '''
<label class="label-class">
<input type="radio" name="classid" value="%(classid)s" />
%(classname)s (%(classid)s)
</label>
''' %result.to_dict()
#TODO: Check if student already in class
contentHTML = '''
<form id="search-form" action="/class/search" method="GET">
<input type="text" name="query" placeholder="Class name (Coming soon)" disabled="true" size="30"/> OR
<input type="text" name="query" value="%s" placeholder="Class code" />
<input type="submit" value="Search" />
</form>
<hr />
<form id="joinclass-form" action="/class/search" method="POST">
<ul>
%s
</ul>
<input type="submit" value="Go!" />
</form>
''' %(query, resultsHTML)
self.response.out.write(composePage('''
<body>
<aside>
<div id="logo-wrapper"><img src="/static/images/favicon-96x96.png"
height="96" width="96" alt="[XPify]" /></div>
<nav id="vert-navbar">
<ul id="vert-navbar-main">
<li><a href="/my/home/">Home</a></li>
<li><a href="/profile/">Profile</a></li>
</ul>
<span id="vert-navbar-classes-header">Classes</span>
<ul id="vert-navbar-classes-list">
%s
</ul>
</nav>
</aside>
<main>
<div id="content">
%s
</div>
</main>
</body>
''' %(classesHTML, contentHTML) ))
def post(self):
gUser = users.get_current_user()
if not gUser:
self.redirect("/login/")
else:
classget = self.request.get("classid")
if classget:
classKey = ndb.gql("SELECT __key__ FROM EntryClass "
"WHERE classid = :1",
classget).get()
if classKey:
entry = classKey.get()
userKey = getUserEntry(gUser)
if userKey in entry.students or userKey in entry.teachers:
self.response.out.write('You are already in that class! <br /><a href="/class/challenges?class=%s">Go to class home</a>' %(classget))
else:
user = userKey.get()
if user.usertype == "Student":
entry.students.append(userKey)
elif user.usertype in ["Teacher", "Administrator"]:
entry.teachers.append(userKey)
entry.put()
self.redirect("/class/challenges?class=%s" %(classget) )
class PageClassChallenges(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
if not gUser:
self.redirect("/login/")
else:
classid = self.request.get("class")
user = getUserEntry(gUser).get()
userClasses = user.classes
classesHTML = ""
if userClasses:
for classKey in userClasses:
class_ = classKey.get()
if str(class_.classid) == classid:
classesHTML += '<li id="current"><a href="/class/challenges?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
else:
classesHTML += '<li><a href="/class/challenges?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
classesHTML += '<li class="join-class-plus"><a href="/class/search/"><div>+</div></a></li>'
else:
if user.usertype in ["Teacher", "Administrator"]:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/create/">
Create one!
</a></li>
</div>
'''
else:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/search/">
Join one!
</a></li>
</div>
'''
manageHTML = ""
if user.usertype == "Teacher":
manageHTML = '<li><a href="/class/manage/?class=%s">Manage</a></li>' %(classid)
self.response.out.write(composePage('''
<body>
<aside>
<div id="logo-wrapper"><img src="/static/images/favicon-96x96.png"
height="96" width="96" alt="[XPify]" /></div>
<nav id="vert-navbar">
<ul id="vert-navbar-main">
<li><a href="/my/home/">Home</a></li>
<li><a href="/profile/">Profile</a></li>
</ul>
<span id="vert-navbar-classes-header">Classes</span>
<ul id="vert-navbar-classes-list">
%(classes)s
</ul>
</nav>
</aside>
<main>
<nav id="span-navbar">
<ul>
<li id="current"><a href="/class/challenges?class=%(classid)s">Challenges</a></li>
<li><a href="/class/leaderboards?class=%(classid)s">Leaderboards</a></li>
<li><div class="unimplemented">Coming Soon</div></li>
%(manage)s
</ul>
</nav>
<div id="content">
%(content)s
</div>
</main>
</body>
''' %{"classes":classesHTML, "classid":classid, "manage":manageHTML, "content":contentHTML} ))
class PageClassCreate(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
user = getUserEntry(gUser).get()
if not (gUser and user.usertype == "Teacher"):
self.redirect("/login/")
else:
classid = self.request.get("class")
userClasses = user.classes
classesHTML = ""
if userClasses:
for classKey in userClasses:
class_ = classKey.get()
if str(class_.classid) == classid:
classesHTML += '<li id="current"><a href="/class/home?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
else:
classesHTML += '<li><a href="/class/home?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
classesHTML += '<li class="join-class-plus" id="current"><a href="/class/search/"><div>+</div></a></li>'
else:
if user.usertype in ["Teacher", "Administrator"]:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li id="current"><a href="/class/create/">
Create one!
</a></li>
</div>
'''
else:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/search/">
Join one!
</a></li>
</div>
'''
self.response.out.write(composePage('''
<body>
<aside>
<div id="logo-wrapper"><img src="/static/images/favicon-96x96.png"
height="96" width="96" alt="[XPify]" /></div>
<nav id="vert-navbar">
<ul id="vert-navbar-main">
<li><a href="/my/home/">Home</a></li>
<li><a href="/profile/">Profile</a></li>
</ul>
<span id="vert-navbar-classes-header">Classes</span>
<ul id="vert-navbar-classes-list">
%(classes)s
</ul>
</nav>
</aside>
<main>
<div id="content">
<form action="/class/create" method="POST">
<label>
Class Name: <input type="text" name="classname">
</label>
<input type="submit" value="Create">
</form>
</div>
</main>
</body>
''' %{"classes":classesHTML, "classid":classid, "content":contentHTML} ))
def post(self):
entry = EntryClass()
entry.classname = self.request.get("classname", "Untitled Class")
entry.classid = getNextUUID()
entry.schoolid = int(self.request.get("schoolid", 000001))
entry.put()
class PageClassManage(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
user = getUserEntry(gUser).get()
if not (gUser and user.usertype == "Teacher"):
self.redirect("/login/")
else:
classid = self.request.get("class")
userClasses = user.classes
classesHTML = ""
if userClasses:
for classKey in userClasses:
class_ = classKey.get()
if str(class_.classid) == classid:
classesHTML += '<li id="current"><a href="/class/home?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
else:
classesHTML += '<li><a href="/class/home?class=%s">%s</a></li>\n' %(
class_.classid, class_.classname)
classesHTML += '<li class="join-class-plus"><a href="/class/search/"><div>+</div></a></li>'
else:
if user.usertype in ["Teacher", "Administrator"]:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/create/">
Create one!
</a></li>
</div>
'''
else:
classesHTML = '''
<div id="no-classes">
<span id="no-classes-banner">You have no classes!</span>
<li><a href="/class/search/">
Join one!
</a></li>
</div>
'''
class_ = ndb.gql("SELECT * FROM EntryClass WHERE classid = :1", int(classid)).get()
subvalues = class_.to_dict()
subvalues["teachersHTML"] = ", ".join([teacherKey.get().username for teacherKey in class_.teachers])
subvalues["studentsHTML"] = ", ".join([studentKey.get().username for studentKey in class_.students])
subvalues["schoolname"] = class_.school.get().schoolname
contentHTML = '''
<form id="table-form" action="/class/manage?class=%s" method="POST">
<table id="classtable">
<tr>
<td class="table-name">Class Name</td>
<td class="table-value-editable">
<input type="text" name="classname" value="%(classname)s" />
</td>
</tr> <tr>
<td class="table-name">Class ID</td>
<td class="table-value">%(classid)s</td>
</tr> <tr>
<td class="table-name">Teachers</td>
<td class="table-value">%(teachersHTML)s</td>
</tr> <tr>
<td class="table-name">Students</td>
<td class="table-value">%(studentsHTML)s</td>
</tr> <tr>
<td class="table-name">School</td>
<td class="table-value">%(schoolname)s</td>
</tr> <tr>
<td class="table-name">School ID</td>
<td class="table-value">%(schoolid)s</td>
</tr> <tr>
<td class="table-name">Class ID</td>
<td class="table-value">%(classid)s</td>
</tr>
</table>
<br />
<input type="submit" value="Save"/>
</form>
''' %(subvalues)
self.response.out.write(composePage('''
<body>
<aside>
<div id="logo-wrapper"><img src="/static/images/favicon-96x96.png"
height="96" width="96" alt="[XPify]" /></div>
<nav id="vert-navbar">
<ul id="vert-navbar-main">
<li><a href="/my/home/">Home</a></li>
<li><a href="/profile/">Profile</a></li>
</ul>
<span id="vert-navbar-classes-header">Classes</span>
<ul id="vert-navbar-classes-list">
%(classes)s
</ul>
</nav>
</aside>
<main>
<nav id="span-navbar">
<ul>
<li><a href="/class/challenges?=%(classid)s">Challenges</a></li>
<li><a href="/class/leaderboards?=%(classid)s">Leaderboards</a></li>
<li><div class="unimplemented">Coming Soon</div></li>
<li id="current"><a href="/class/manage/?class=%(classid)s">Manage</a></li>
</ul>
</nav>
<div id="content">
%(content)s
</div>
</main>
</body>
''' %{"classes":classesHTML, "classid":classid, "content":contentHTML} ))
class PagePostlogin(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
continueURL = self.request.get("continue", "/my/home/")
tUser = ndb.gql("SELECT __key__ FROM EntryUser WHERE userid = :1",
gUser.user_id()).get()
if not tUser:
# Show User Creation
self.response.out.write(composePage('''
<body>
<main>
<form id="user-form" action="/postlogin/" method="POST">
<label class="label-school">
School Code: <input type="text" name="school" value="000001" pattern="[0-9]+" />
</label>
<br />
<span id="header-usertype">User Type:</span>
<br />
<label class="label-usertype">
<input type="radio" name="usertype" value="Student" checked="true"/> Student
</label><br />
<label class="label-usertype">
<input type="radio" name="usertype" value="Teacher" /> Teacher
</label><br />
<label class="label-usertype">
<input type="radio" name="usertype" value="Administrator" /> Administrator
</label>
<br />
<input type="submit" />
</form>
</main>
</body>
''', style=1))
else:
self.redirect(continueURL)
def post(self):
schoolid = int(self.request.get("school"))
usertype = self.request.get("usertype")
if schoolid and usertype:
gUser = users.get_current_user()
entry = EntryUser()
entry.schoolid = schoolid
entry.usertype = usertype
entry.userobj = gUser
entry.userid = gUser.user_id()
entry.put()
else:
self.redirect("/postlogin/")
# Continue
self.redirect("/d/my/home/")
### REDIRECTS ###
class RedirectLogin(webapp2.RequestHandler):
def get(self):
target = cgi.escape(self.request.get("continue", "/my/home/"))
self.redirect(users.create_login_url("/postlogin/?continue=" + target))
class RedirectLogout(webapp2.RequestHandler):
def get(self):
target = cgi.escape(self.request.get("continue", "/"))
self.redirect(users.create_logout_url(target))
class RedirectRestrictedPage(webapp2.RequestHandler):
target = "/"
def get(self):
gUser = users.get_current_user()
if gUser:
self.redirect(self.target)
else:
self.redirect(users.create_login_url("/postlogin/?continue=" + self.target))
class RedirectMyHome(RedirectRestrictedPage):
target = "/my/home/"
class RedirectClassSearch(RedirectRestrictedPage):
target = "/class/search/"
class RedirectClassChallenges(RedirectRestrictedPage):
target = "/class/challenges/"
class RedirectDelayMyHome(webapp2.RequestHandler):
def get(self):
gUser = users.get_current_user()
if gUser:
time.sleep(1)
self.redirect("/my/home/")
else:
self.redirect(users.create_login_url("/postlogin/?continue=/my/home"))
#:DEV:
class PageDev(webapp2.RequestHandler):
def post(self):
entry = EntryClass()
def get(self):
if self.request.get("make")=="class":
entry = EntryClass()
entry.classname = self.request.get("classname", "Demo Class")
entry.classid = 000001
entry.schoolid = int(self.request.get("schoolid", 000001))
entry.put()
self.response.out.write("Success!")
elif self.request.get("make")=="school":
entry = EntrySchool()
entry.schoolname = "Demo School"
entry.schoolid = 000001
entry.put()
self.response.out.write("Success!")
### APPLICATION ###
app = webapp2.WSGIApplication([
("^(?:/landing)?/?$", PageLanding),
("^/login/?$", RedirectLogin),
("^/logout/?$", RedirectLogout),
("^/postlogin/?$", PagePostlogin),
("^/my/home/?$", PageMyHome),
("^/my/?$|^/home/?$", RedirectMyHome),
("^/d/my/home/?$", RedirectDelayMyHome),
("^/profile/?$", PageProfile),
("^/class/challenges/?$", PageClassChallenges),
("^/class/?$", RedirectClassChallenges),
("^/class/create/?$", PageClassCreate),
("^/class/manage/?$", PageClassManage),
("^/class/search/?$", PageClassSearch),
("^/class/join/?$", RedirectClassSearch),
("^/dev/?$", PageDev)
], debug=True)
| gpl-3.0 |
toshywoshy/ansible | lib/ansible/modules/network/ios/ios_system.py | 18 | 12129 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco IOS devices
description:
- This module provides declarative management of node system attributes
on Cisco IOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
extends_documentation_fragment: ios
notes:
- Tested against IOS 15.6
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- Provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
lookup_enabled:
description:
- Administrative control
for enabling or disabling DNS lookups. When this argument is
set to True, lookups are performed and when it is set to False,
lookups are not performed.
type: bool
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain name
ios_system:
hostname: ios01
domain_name: test.example.com
domain_search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
ios_system:
state: absent
- name: configure DNS lookup sources
ios_system:
lookup_source: MgmtEth0/0/CPU0/0
lookup_enabled: yes
- name: configure name servers
ios_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname ios01
- ip domain name test.example.com
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec
from ansible.module_utils.network.common.utils import ComplexList
_CONFIGURED_VRFS = None
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
return vrf in _CONFIGURED_VRFS
config = get_config(module)
_CONFIGURED_VRFS = re.findall(r'vrf definition (\S+)', config)
return vrf in _CONFIGURED_VRFS
def requires_vrf(module, vrf):
if not has_vrf(module, vrf):
module.fail_json(msg='vrf %s is not configured' % vrf)
def diff_list(want, have):
adds = [w for w in want if w not in have]
removes = [h for h in have if h not in want]
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
def needs_update(x):
return want.get(x) is not None and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'Router':
commands.append('no hostname')
if have['lookup_source']:
commands.append('no ip domain lookup source-interface %s' % have['lookup_source'])
if have['lookup_enabled'] is False:
commands.append('ip domain lookup')
vrfs = set()
for item in have['domain_name']:
if item['vrf'] and item['vrf'] not in vrfs:
vrfs.add(item['vrf'])
commands.append('no ip domain name vrf %s' % item['vrf'])
elif None not in vrfs:
vrfs.add(None)
commands.append('no ip domain name')
vrfs = set()
for item in have['domain_search']:
if item['vrf'] and item['vrf'] not in vrfs:
vrfs.add(item['vrf'])
commands.append('no ip domain list vrf %s' % item['vrf'])
elif None not in vrfs:
vrfs.add(None)
commands.append('no ip domain list')
vrfs = set()
for item in have['name_servers']:
if item['vrf'] and item['vrf'] not in vrfs:
vrfs.add(item['vrf'])
commands.append('no ip name-server vrf %s' % item['vrf'])
elif None not in vrfs:
vrfs.add(None)
commands.append('no ip name-server')
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('lookup_source'):
commands.append('ip domain lookup source-interface %s' % want['lookup_source'])
if needs_update('lookup_enabled'):
cmd = 'ip domain lookup'
if want['lookup_enabled'] is False:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['domain_name']:
adds, removes = diff_list(want['domain_name'], have['domain_name'])
for item in removes:
if item['vrf']:
commands.append('no ip domain name vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('no ip domain name %s' % item['name'])
for item in adds:
if item['vrf']:
requires_vrf(module, item['vrf'])
commands.append('ip domain name vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('ip domain name %s' % item['name'])
if want['domain_search']:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in removes:
if item['vrf']:
commands.append('no ip domain list vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('no ip domain list %s' % item['name'])
for item in adds:
if item['vrf']:
requires_vrf(module, item['vrf'])
commands.append('ip domain list vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('ip domain list %s' % item['name'])
if want['name_servers']:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in removes:
if item['vrf']:
commands.append('no ip name-server vrf %s %s' % (item['vrf'], item['server']))
else:
commands.append('no ip name-server %s' % item['server'])
for item in adds:
if item['vrf']:
requires_vrf(module, item['vrf'])
commands.append('ip name-server vrf %s %s' % (item['vrf'], item['server']))
else:
commands.append('ip name-server %s' % item['server'])
return commands
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.findall(r'^ip domain[- ]name (?:vrf (\S+) )*(\S+)', config, re.M)
matches = list()
for vrf, name in match:
if not vrf:
vrf = None
matches.append({'name': name, 'vrf': vrf})
return matches
def parse_domain_search(config):
match = re.findall(r'^ip domain[- ]list (?:vrf (\S+) )*(\S+)', config, re.M)
matches = list()
for vrf, name in match:
if not vrf:
vrf = None
matches.append({'name': name, 'vrf': vrf})
return matches
def parse_name_servers(config):
match = re.findall(r'^ip name-server (?:vrf (\S+) )*(.*)', config, re.M)
matches = list()
for vrf, servers in match:
if not vrf:
vrf = None
for server in servers.split():
matches.append({'server': server, 'vrf': vrf})
return matches
def parse_lookup_source(config):
match = re.search(r'ip domain[- ]lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_search': parse_domain_search(config),
'lookup_source': parse_lookup_source(config),
'lookup_enabled': 'no ip domain lookup' not in config and 'no ip domain-lookup' not in config,
'name_servers': parse_name_servers(config)
}
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
'lookup_source': module.params['lookup_source'],
'lookup_enabled': module.params['lookup_enabled'],
}
domain_name = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
domain_search = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
name_servers = ComplexList(dict(
server=dict(key=True),
vrf=dict()
), module)
for arg, cast in [('domain_name', domain_name),
('domain_search', domain_search),
('name_servers', name_servers)]:
if module.params[arg]:
obj[arg] = cast(module.params[arg])
else:
obj[arg] = None
return obj
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(type='list'),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
lookup_source=dict(),
lookup_enabled=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present')
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
shail2810/nova | nova/servicegroup/drivers/zk.py | 49 | 8221 | # Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova import exception
from nova.i18n import _LE, _LW
from nova.servicegroup.drivers import base
evzookeeper = importutils.try_import('evzookeeper')
membership = importutils.try_import('evzookeeper.membership')
zookeeper = importutils.try_import('zookeeper')
zk_driver_opts = [
cfg.StrOpt('address',
help='The ZooKeeper addresses for servicegroup service in the '
'format of host1:port,host2:port,host3:port'),
cfg.IntOpt('recv_timeout',
default=4000,
help='The recv_timeout parameter for the zk session'),
cfg.StrOpt('sg_prefix',
default="/servicegroups",
help='The prefix used in ZooKeeper to store ephemeral nodes'),
cfg.IntOpt('sg_retry_interval',
default=5,
help='Number of seconds to wait until retrying to join the '
'session'),
]
CONF = cfg.CONF
CONF.register_opts(zk_driver_opts, group="zookeeper")
LOG = logging.getLogger(__name__)
class ZooKeeperDriver(base.Driver):
"""ZooKeeper driver for the service group API."""
def __init__(self, *args, **kwargs):
"""Create the zk session object."""
if not all([evzookeeper, membership, zookeeper]):
raise ImportError('zookeeper module not found')
self._memberships = {}
self._monitors = {}
super(ZooKeeperDriver, self).__init__()
self._cached_session = None
@property
def _session(self):
"""Creates zookeeper session in lazy manner.
Session is created in lazy manner to mitigate lock problem
in zookeeper.
Lock happens when many processes try to use the same zk handle.
Lazy creation allows to deffer initialization of session until
is really required by worker (child process).
:returns: ZKSession -- new or created earlier
"""
if self._cached_session is None:
self._cached_session = self._init_session()
return self._cached_session
def _init_session(self):
"""Initializes new session.
Optionally creates required servicegroup prefix.
:returns ZKSession - newly created session
"""
null = open(os.devnull, "w")
session = evzookeeper.ZKSession(CONF.zookeeper.address,
recv_timeout=
CONF.zookeeper.recv_timeout,
zklog_fd=null)
# Make sure the prefix exists
try:
session.create(CONF.zookeeper.sg_prefix, "",
acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
except zookeeper.NodeExistsException:
pass
# Log a warning about quality for this driver.
LOG.warning(_LW('The ZooKeeper service group driver in Nova is not '
'tested by the OpenStack project and thus its quality '
'can not be ensured. This may change in the future, '
'but current deployers should be aware that the use '
'of it in production right now may be risky.'))
return session
def join(self, member, group, service=None):
"""Add a new member to a service group.
:param member: the joined member ID/name
:param group: the group ID/name, of the joined member
:param service: a `nova.service.Service` object
"""
process_id = str(os.getpid())
LOG.debug('ZooKeeperDriver: join new member %(id)s(%(pid)s) to the '
'%(gr)s group, service=%(sr)s',
{'id': member, 'pid': process_id,
'gr': group, 'sr': service})
member = self._memberships.get((group, member), None)
if member is None:
# the first time to join. Generate a new object
path = "%s/%s/%s" % (CONF.zookeeper.sg_prefix, group, member)
try:
zk_member = membership.Membership(self._session, path,
process_id)
except RuntimeError:
LOG.exception(_LE("Unable to join. It is possible that either"
" another node exists with the same name, or"
" this node just restarted. We will try "
"again in a short while to make sure."))
eventlet.sleep(CONF.zookeeper.sg_retry_interval)
zk_member = membership.Membership(self._session, path, member)
self._memberships[(group, member)] = zk_member
def is_up(self, service_ref):
group_id = service_ref['topic']
member_id = service_ref['host']
all_members = self._get_all(group_id)
return member_id in all_members
def _get_all(self, group_id):
"""Return all members in a list, or a ServiceGroupUnavailable
exception.
"""
monitor = self._monitors.get(group_id, None)
if monitor is None:
path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id)
null = open(os.devnull, "w")
local_session = evzookeeper.ZKSession(CONF.zookeeper.address,
recv_timeout=
CONF.zookeeper.recv_timeout,
zklog_fd=null)
monitor = membership.MembershipMonitor(local_session, path)
self._monitors[group_id] = monitor
# Note(maoy): When initialized for the first time, it takes a
# while to retrieve all members from zookeeper. To prevent
# None to be returned, we sleep 5 sec max to wait for data to
# be ready.
timeout = 5 # seconds
interval = 0.1
tries = int(timeout / interval)
for _retry in range(tries):
eventlet.sleep(interval)
all_members = monitor.get_all()
if all_members is not None:
# Stop the tries once the cache is populated
LOG.debug('got info about members in %r: %r',
path, ', '.join(all_members))
break
else:
# if all_members, weren't populated
LOG.warning(_LW('Problem with acquiring the list of '
'children of %(path)r within a given '
'timeout=%(timeout)rs'),
path, timeout)
else:
all_members = monitor.get_all()
if all_members is None:
raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver")
def have_processes(member):
"""Predicate that given member has processes (subnode exists)."""
value, stat = monitor.get_member_details(member)
# only check nodes that are created by Membership class
if value == 'ZKMembers':
num_children = stat['numChildren']
return num_children > 0
else:
# unknown type of node found - ignoring
return False
# filter only this members that have processes running
all_members = filter(have_processes, all_members)
return all_members
| apache-2.0 |
arangodb/arangodb | 3rdParty/V8/gypfiles/broken/verify_source_deps.py | 2 | 4450 | #!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to print potentially missing source dependencies based on the actual
.h and .cc files in the source tree and which files are included in the gyp
and gn files. The latter inclusion is overapproximated.
TODO(machenbach): If two source files with the same name exist, but only one
is referenced from a gyp/gn file, we won't necessarily detect it.
"""
import itertools
import re
import os
import subprocess
import sys
V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
GYP_FILES = [
os.path.join(V8_BASE, 'gypfiles', 'd8.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'v8.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'inspector.gypi'),
os.path.join(V8_BASE, 'gypfiles', 'v8vtune.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'samples.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'mkgrokdump.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'parser-shell.gyp'),
]
ALL_GYP_PREFIXES = [
'..',
'common',
os.path.join('src', 'third_party', 'vtune'),
'src',
'samples',
'testing',
'tools',
os.path.join('test', 'common'),
os.path.join('test', 'inspector'),
os.path.join('test', 'mkgrokdump'),
]
GYP_UNSUPPORTED_FEATURES = [
'gcmole',
'setup-isolate-deserialize.cc',
'v8-version.h'
]
GN_FILES = [
os.path.join(V8_BASE, 'BUILD.gn'),
os.path.join(V8_BASE, 'src', 'inspector', 'BUILD.gn'),
os.path.join(V8_BASE, 'test', 'inspector', 'BUILD.gn'),
os.path.join(V8_BASE, 'test', 'mkgrokdump', 'BUILD.gn'),
os.path.join(V8_BASE, 'tools', 'BUILD.gn'),
]
GN_UNSUPPORTED_FEATURES = [
'aix',
'cygwin',
'freebsd',
'gcmole',
'openbsd',
'ppc',
'qnx',
'solaris',
'vtune',
'v8-version.h',
]
ALL_GN_PREFIXES = [
'..',
os.path.join('src', 'inspector'),
'src',
'testing',
os.path.join('test', 'inspector'),
os.path.join('test', 'mkgrokdump'),
]
def pathsplit(path):
return re.split('[/\\\\]', path)
def path_no_prefix(path, prefixes):
for prefix in prefixes:
if path.startswith(prefix + os.sep):
return path_no_prefix(path[len(prefix) + 1:], prefixes)
return path
def isources(prefixes):
cmd = ['git', 'ls-tree', '-r', 'HEAD', '--full-name', '--name-only']
for f in subprocess.check_output(cmd, universal_newlines=True).split('\n'):
if not (f.endswith('.h') or f.endswith('.cc')):
continue
yield path_no_prefix(os.path.join(*pathsplit(f)), prefixes)
def iflatten(obj):
if isinstance(obj, dict):
for value in obj.values():
for i in iflatten(value):
yield i
elif isinstance(obj, list):
for value in obj:
for i in iflatten(value):
yield i
elif isinstance(obj, basestring):
yield path_no_prefix(os.path.join(*pathsplit(obj)), ALL_GYP_PREFIXES)
def iflatten_gyp_file(gyp_file):
"""Overaproximates all values in the gyp file.
Iterates over all string values recursively. Removes '../' path prefixes.
"""
with open(gyp_file) as f:
return iflatten(eval(f.read()))
def iflatten_gn_file(gn_file):
"""Overaproximates all values in the gn file.
Iterates over all double quoted strings.
"""
with open(gn_file) as f:
for line in f.read().splitlines():
match = re.match(r'.*"([^"]*)".*', line)
if match:
yield path_no_prefix(
os.path.join(*pathsplit(match.group(1))), ALL_GN_PREFIXES)
def icheck_values(values, prefixes):
for source_file in isources(prefixes):
if source_file not in values:
yield source_file
def missing_gyp_files():
gyp_values = set(itertools.chain(
*[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES]
))
gyp_files = sorted(icheck_values(gyp_values, ALL_GYP_PREFIXES))
return filter(
lambda x: not any(i in x for i in GYP_UNSUPPORTED_FEATURES), gyp_files)
def missing_gn_files():
gn_values = set(itertools.chain(
*[iflatten_gn_file(gn_file) for gn_file in GN_FILES]
))
gn_files = sorted(icheck_values(gn_values, ALL_GN_PREFIXES))
return filter(
lambda x: not any(i in x for i in GN_UNSUPPORTED_FEATURES), gn_files)
def main():
print "----------- Files not in gyp: ------------"
for i in missing_gyp_files():
print i
print "\n----------- Files not in gn: -------------"
for i in missing_gn_files():
print i
return 0
if '__main__' == __name__:
sys.exit(main())
| apache-2.0 |
golismero/golismero | plugins/testing/recon/punkspider.py | 5 | 5708 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from golismero.api.data.resource.domain import Domain
from golismero.api.data.resource.url import URL
from golismero.api.data.vulnerability.injection.sql import SQLInjection
from golismero.api.data.vulnerability.injection.xss import XSS
from golismero.api.logger import Logger
from golismero.api.plugin import TestingPlugin
from golismero.api.text.text_utils import to_utf8
from golismero.api.net.web_utils import parse_url
import requests
import traceback
#------------------------------------------------------------------------------
class PunkSPIDER(TestingPlugin):
"""
This plugin tries to perform passive reconnaissance on a target using
the PunkSPIDER vulnerability lookup engine.
"""
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [Domain]
#--------------------------------------------------------------------------
def run(self, info):
# Query PunkSPIDER.
host_id = info.hostname
host_id = parse_url(host_id).hostname
host_id = ".".join(reversed(host_id.split(".")))
d = self.query_punkspider(host_id)
# Stop if we have no results.
if not d:
Logger.log("No results found for host: %s" % info.hostname)
return
# This is where we'll collect the data we'll return.
results = []
# For each vulnerability...
for v in d["data"]:
try:
# Future-proof checks.
if v["protocol"] not in ("http", "https"):
Logger.log_more_verbose(
"Skipped non-web vulnerability: %s"
% to_utf8(v["id"]))
continue
if v["bugType"] not in ("xss", "sqli", "bsqli"):
Logger.log_more_verbose(
"Skipped unknown vulnerability type: %s"
% to_utf8(v["bugType"]))
continue
# Get the vulnerable URL, parameter and payload.
url = to_utf8(v["vulnerabilityUrl"])
param = to_utf8(v["parameter"])
parsed = parse_url(url)
payload = parsed.query_params[param]
# Get the level.
level = to_utf8(v["level"])
# Create the URL object.
url_o = URL(url)
results.append(url_o)
# Get the vulnerability class.
if v["bugType"] == "xss":
clazz = XSS
else:
clazz = SQLInjection
# Create the Vulnerability object.
vuln = clazz(
url_o,
vulnerable_params = { param: payload },
injection_point = clazz.INJECTION_POINT_URL,
injection_type = to_utf8(v["bugType"]), # FIXME
level = level,
tool_id = to_utf8(v["id"]),
)
results.append(vuln)
# Log errors.
except Exception, e:
tb = traceback.format_exc()
Logger.log_error_verbose(str(e))
Logger.log_error_more_verbose(tb)
# Log how many vulnerabilities we found.
count = int(len(results) / 2)
if count == 0:
Logger.log("No vulnerabilities found for host: " + info.hostname)
elif count == 1:
Logger.log("Found one vulnerability for host: " + info.hostname)
else:
Logger.log("Found %d vulnerabilities for host: %s"
% (count, info.hostname))
# Return the results.
return results
#--------------------------------------------------------------------------
# The PunkSPIDER API.
URL = (
"https://www.punkspider.org/service/search/detail/%s"
)
HEADERS = {
"Accept": "*/*",
"Referer": "https://www.punkspider.org/",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64)"
" AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/31.0.1650.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
def query_punkspider(self, host_id):
try:
r = requests.get(self.URL % host_id,
headers = self.HEADERS,
verify=False)
assert r.headers["Content-Type"].startswith("application/json"),\
"Response from server is not a JSON encoded object"
return r.json()
except requests.RequestException, e:
Logger.log_error(
"Query to PunkSPIDER failed, reason: %s" % str(e))
| gpl-2.0 |
fxtentacle/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py | 124 | 1987 | # Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import unittest2 as unittest
from webkitpy.common.system.outputtee import Tee, OutputTee
class SimpleTeeTest(unittest.TestCase):
def test_simple_tee(self):
file1, file2 = StringIO.StringIO(), StringIO.StringIO()
tee = Tee(file1, file2)
tee.write("foo bar\n")
tee.write("baz\n")
self.assertEqual(file1.getvalue(), "foo bar\nbaz\n")
self.assertEqual(file2.getvalue(), file1.getvalue())
| bsd-3-clause |
thiriel/maps | venv/lib/python2.7/site-packages/django/contrib/localflavor/fi/fi_municipalities.py | 394 | 10822 | # -*- coding: utf-8 -*-
"""
An alphabetical list of Finnish municipalities for use as `choices` in a
formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
MUNICIPALITY_CHOICES = (
('akaa', u"Akaa"),
('alajarvi', u"Alajärvi"),
('alavieska', u"Alavieska"),
('alavus', u"Alavus"),
('artjarvi', u"Artjärvi"),
('asikkala', u"Asikkala"),
('askola', u"Askola"),
('aura', u"Aura"),
('brando', u"Brändö"),
('eckero', u"Eckerö"),
('enonkoski', u"Enonkoski"),
('enontekio', u"Enontekiö"),
('espoo', u"Espoo"),
('eura', u"Eura"),
('eurajoki', u"Eurajoki"),
('evijarvi', u"Evijärvi"),
('finstrom', u"Finström"),
('forssa', u"Forssa"),
('foglo', u"Föglö"),
('geta', u"Geta"),
('haapajarvi', u"Haapajärvi"),
('haapavesi', u"Haapavesi"),
('hailuoto', u"Hailuoto"),
('halsua', u"Halsua"),
('hamina', u"Hamina"),
('hammarland', u"Hammarland"),
('hankasalmi', u"Hankasalmi"),
('hanko', u"Hanko"),
('harjavalta', u"Harjavalta"),
('hartola', u"Hartola"),
('hattula', u"Hattula"),
('haukipudas', u"Haukipudas"),
('hausjarvi', u"Hausjärvi"),
('heinola', u"Heinola"),
('heinavesi', u"Heinävesi"),
('helsinki', u"Helsinki"),
('hirvensalmi', u"Hirvensalmi"),
('hollola', u"Hollola"),
('honkajoki', u"Honkajoki"),
('huittinen', u"Huittinen"),
('humppila', u"Humppila"),
('hyrynsalmi', u"Hyrynsalmi"),
('hyvinkaa', u"Hyvinkää"),
('hameenkoski', u"Hämeenkoski"),
('hameenkyro', u"Hämeenkyrö"),
('hameenlinna', u"Hämeenlinna"),
('ii', u"Ii"),
('iisalmi', u"Iisalmi"),
('iitti', u"Iitti"),
('ikaalinen', u"Ikaalinen"),
('ilmajoki', u"Ilmajoki"),
('ilomantsi', u"Ilomantsi"),
('imatra', u"Imatra"),
('inari', u"Inari"),
('inkoo', u"Inkoo"),
('isojoki', u"Isojoki"),
('isokyro', u"Isokyrö"),
('jalasjarvi', u"Jalasjärvi"),
('janakkala', u"Janakkala"),
('joensuu', u"Joensuu"),
('jokioinen', u"Jokioinen"),
('jomala', u"Jomala"),
('joroinen', u"Joroinen"),
('joutsa', u"Joutsa"),
('juankoski', u"Juankoski"),
('juuka', u"Juuka"),
('juupajoki', u"Juupajoki"),
('juva', u"Juva"),
('jyvaskyla', u"Jyväskylä"),
('jamijarvi', u"Jämijärvi"),
('jamsa', u"Jämsä"),
('jarvenpaa', u"Järvenpää"),
('kaarina', u"Kaarina"),
('kaavi', u"Kaavi"),
('kajaani', u"Kajaani"),
('kalajoki', u"Kalajoki"),
('kangasala', u"Kangasala"),
('kangasniemi', u"Kangasniemi"),
('kankaanpaa', u"Kankaanpää"),
('kannonkoski', u"Kannonkoski"),
('kannus', u"Kannus"),
('karijoki', u"Karijoki"),
('karjalohja', u"Karjalohja"),
('karkkila', u"Karkkila"),
('karstula', u"Karstula"),
('karttula', u"Karttula"),
('karvia', u"Karvia"),
('kaskinen', u"Kaskinen"),
('kauhajoki', u"Kauhajoki"),
('kauhava', u"Kauhava"),
('kauniainen', u"Kauniainen"),
('kaustinen', u"Kaustinen"),
('keitele', u"Keitele"),
('kemi', u"Kemi"),
('kemijarvi', u"Kemijärvi"),
('keminmaa', u"Keminmaa"),
('kemionsaari', u"Kemiönsaari"),
('kempele', u"Kempele"),
('kerava', u"Kerava"),
('kerimaki', u"Kerimäki"),
('kesalahti', u"Kesälahti"),
('keuruu', u"Keuruu"),
('kihnio', u"Kihniö"),
('kiikoinen', u"Kiikoinen"),
('kiiminki', u"Kiiminki"),
('kinnula', u"Kinnula"),
('kirkkonummi', u"Kirkkonummi"),
('kitee', u"Kitee"),
('kittila', u"Kittilä"),
('kiuruvesi', u"Kiuruvesi"),
('kivijarvi', u"Kivijärvi"),
('kokemaki', u"Kokemäki"),
('kokkola', u"Kokkola"),
('kolari', u"Kolari"),
('konnevesi', u"Konnevesi"),
('kontiolahti', u"Kontiolahti"),
('korsnas', u"Korsnäs"),
('koskitl', u"Koski Tl"),
('kotka', u"Kotka"),
('kouvola', u"Kouvola"),
('kristiinankaupunki', u"Kristiinankaupunki"),
('kruunupyy', u"Kruunupyy"),
('kuhmalahti', u"Kuhmalahti"),
('kuhmo', u"Kuhmo"),
('kuhmoinen', u"Kuhmoinen"),
('kumlinge', u"Kumlinge"),
('kuopio', u"Kuopio"),
('kuortane', u"Kuortane"),
('kurikka', u"Kurikka"),
('kustavi', u"Kustavi"),
('kuusamo', u"Kuusamo"),
('kylmakoski', u"Kylmäkoski"),
('kyyjarvi', u"Kyyjärvi"),
('karkola', u"Kärkölä"),
('karsamaki', u"Kärsämäki"),
('kokar', u"Kökar"),
('koylio', u"Köyliö"),
('lahti', u"Lahti"),
('laihia', u"Laihia"),
('laitila', u"Laitila"),
('lapinjarvi', u"Lapinjärvi"),
('lapinlahti', u"Lapinlahti"),
('lappajarvi', u"Lappajärvi"),
('lappeenranta', u"Lappeenranta"),
('lapua', u"Lapua"),
('laukaa', u"Laukaa"),
('lavia', u"Lavia"),
('lemi', u"Lemi"),
('lemland', u"Lemland"),
('lempaala', u"Lempäälä"),
('leppavirta', u"Leppävirta"),
('lestijarvi', u"Lestijärvi"),
('lieksa', u"Lieksa"),
('lieto', u"Lieto"),
('liminka', u"Liminka"),
('liperi', u"Liperi"),
('lohja', u"Lohja"),
('loimaa', u"Loimaa"),
('loppi', u"Loppi"),
('loviisa', u"Loviisa"),
('luhanka', u"Luhanka"),
('lumijoki', u"Lumijoki"),
('lumparland', u"Lumparland"),
('luoto', u"Luoto"),
('luumaki', u"Luumäki"),
('luvia', u"Luvia"),
('lansi-turunmaa', u"Länsi-Turunmaa"),
('maalahti', u"Maalahti"),
('maaninka', u"Maaninka"),
('maarianhamina', u"Maarianhamina"),
('marttila', u"Marttila"),
('masku', u"Masku"),
('merijarvi', u"Merijärvi"),
('merikarvia', u"Merikarvia"),
('miehikkala', u"Miehikkälä"),
('mikkeli', u"Mikkeli"),
('muhos', u"Muhos"),
('multia', u"Multia"),
('muonio', u"Muonio"),
('mustasaari', u"Mustasaari"),
('muurame', u"Muurame"),
('mynamaki', u"Mynämäki"),
('myrskyla', u"Myrskylä"),
('mantsala', u"Mäntsälä"),
('mantta-vilppula', u"Mänttä-Vilppula"),
('mantyharju', u"Mäntyharju"),
('naantali', u"Naantali"),
('nakkila', u"Nakkila"),
('nastola', u"Nastola"),
('nilsia', u"Nilsiä"),
('nivala', u"Nivala"),
('nokia', u"Nokia"),
('nousiainen', u"Nousiainen"),
('nummi-pusula', u"Nummi-Pusula"),
('nurmes', u"Nurmes"),
('nurmijarvi', u"Nurmijärvi"),
('narpio', u"Närpiö"),
('oravainen', u"Oravainen"),
('orimattila', u"Orimattila"),
('oripaa', u"Oripää"),
('orivesi', u"Orivesi"),
('oulainen', u"Oulainen"),
('oulu', u"Oulu"),
('oulunsalo', u"Oulunsalo"),
('outokumpu', u"Outokumpu"),
('padasjoki', u"Padasjoki"),
('paimio', u"Paimio"),
('paltamo', u"Paltamo"),
('parikkala', u"Parikkala"),
('parkano', u"Parkano"),
('pedersore', u"Pedersöre"),
('pelkosenniemi', u"Pelkosenniemi"),
('pello', u"Pello"),
('perho', u"Perho"),
('pertunmaa', u"Pertunmaa"),
('petajavesi', u"Petäjävesi"),
('pieksamaki', u"Pieksämäki"),
('pielavesi', u"Pielavesi"),
('pietarsaari', u"Pietarsaari"),
('pihtipudas', u"Pihtipudas"),
('pirkkala', u"Pirkkala"),
('polvijarvi', u"Polvijärvi"),
('pomarkku', u"Pomarkku"),
('pori', u"Pori"),
('pornainen', u"Pornainen"),
('porvoo', u"Porvoo"),
('posio', u"Posio"),
('pudasjarvi', u"Pudasjärvi"),
('pukkila', u"Pukkila"),
('punkaharju', u"Punkaharju"),
('punkalaidun', u"Punkalaidun"),
('puolanka', u"Puolanka"),
('puumala', u"Puumala"),
('pyhtaa', u"Pyhtää"),
('pyhajoki', u"Pyhäjoki"),
('pyhajarvi', u"Pyhäjärvi"),
('pyhanta', u"Pyhäntä"),
('pyharanta', u"Pyhäranta"),
('palkane', u"Pälkäne"),
('poytya', u"Pöytyä"),
('raahe', u"Raahe"),
('raasepori', u"Raasepori"),
('raisio', u"Raisio"),
('rantasalmi', u"Rantasalmi"),
('ranua', u"Ranua"),
('rauma', u"Rauma"),
('rautalampi', u"Rautalampi"),
('rautavaara', u"Rautavaara"),
('rautjarvi', u"Rautjärvi"),
('reisjarvi', u"Reisjärvi"),
('riihimaki', u"Riihimäki"),
('ristiina', u"Ristiina"),
('ristijarvi', u"Ristijärvi"),
('rovaniemi', u"Rovaniemi"),
('ruokolahti', u"Ruokolahti"),
('ruovesi', u"Ruovesi"),
('rusko', u"Rusko"),
('raakkyla', u"Rääkkylä"),
('saarijarvi', u"Saarijärvi"),
('salla', u"Salla"),
('salo', u"Salo"),
('saltvik', u"Saltvik"),
('sastamala', u"Sastamala"),
('sauvo', u"Sauvo"),
('savitaipale', u"Savitaipale"),
('savonlinna', u"Savonlinna"),
('savukoski', u"Savukoski"),
('seinajoki', u"Seinäjoki"),
('sievi', u"Sievi"),
('siikainen', u"Siikainen"),
('siikajoki', u"Siikajoki"),
('siikalatva', u"Siikalatva"),
('siilinjarvi', u"Siilinjärvi"),
('simo', u"Simo"),
('sipoo', u"Sipoo"),
('siuntio', u"Siuntio"),
('sodankyla', u"Sodankylä"),
('soini', u"Soini"),
('somero', u"Somero"),
('sonkajarvi', u"Sonkajärvi"),
('sotkamo', u"Sotkamo"),
('sottunga', u"Sottunga"),
('sulkava', u"Sulkava"),
('sund', u"Sund"),
('suomenniemi', u"Suomenniemi"),
('suomussalmi', u"Suomussalmi"),
('suonenjoki', u"Suonenjoki"),
('sysma', u"Sysmä"),
('sakyla', u"Säkylä"),
('taipalsaari', u"Taipalsaari"),
('taivalkoski', u"Taivalkoski"),
('taivassalo', u"Taivassalo"),
('tammela', u"Tammela"),
('tampere', u"Tampere"),
('tarvasjoki', u"Tarvasjoki"),
('tervo', u"Tervo"),
('tervola', u"Tervola"),
('teuva', u"Teuva"),
('tohmajarvi', u"Tohmajärvi"),
('toholampi', u"Toholampi"),
('toivakka', u"Toivakka"),
('tornio', u"Tornio"),
('turku', u"Turku"),
('tuusniemi', u"Tuusniemi"),
('tuusula', u"Tuusula"),
('tyrnava', u"Tyrnävä"),
('toysa', u"Töysä"),
('ulvila', u"Ulvila"),
('urjala', u"Urjala"),
('utajarvi', u"Utajärvi"),
('utsjoki', u"Utsjoki"),
('uurainen', u"Uurainen"),
('uusikaarlepyy', u"Uusikaarlepyy"),
('uusikaupunki', u"Uusikaupunki"),
('vaala', u"Vaala"),
('vaasa', u"Vaasa"),
('valkeakoski', u"Valkeakoski"),
('valtimo', u"Valtimo"),
('vantaa', u"Vantaa"),
('varkaus', u"Varkaus"),
('varpaisjarvi', u"Varpaisjärvi"),
('vehmaa', u"Vehmaa"),
('vesanto', u"Vesanto"),
('vesilahti', u"Vesilahti"),
('veteli', u"Veteli"),
('vierema', u"Vieremä"),
('vihanti', u"Vihanti"),
('vihti', u"Vihti"),
('viitasaari', u"Viitasaari"),
('vimpeli', u"Vimpeli"),
('virolahti', u"Virolahti"),
('virrat', u"Virrat"),
('vardo', u"Vårdö"),
('vahakyro', u"Vähäkyrö"),
('voyri-maksamaa', u"Vöyri-Maksamaa"),
('yli-ii', u"Yli-Ii"),
('ylitornio', u"Ylitornio"),
('ylivieska', u"Ylivieska"),
('ylojarvi', u"Ylöjärvi"),
('ypaja', u"Ypäjä"),
('ahtari', u"Ähtäri"),
('aanekoski', u"Äänekoski")
) | bsd-3-clause |
adamkh/kivy | kivy/config.py | 2 | 30423 | '''
Configuration object
====================
The :class:`Config` object is an instance of a modified Python ConfigParser.
See the `ConfigParser documentation
<http://docs.python.org/library/configparser.html>`_ for more information.
Kivy has a configuration file which determines the default settings. In
order to change these settings, you can alter this file manually or use
the Config object. Please see the :ref:`Configure Kivy` section for more
information.
.. note::
To avoid instances where the config settings do not work or they are
not applied before window creation (like setting an initial window size),
:meth:`Config.set <kivy.config.ConfigParser.set>` should be used before
importing any modules that affect the application window (e.g. importing
Window). Ideally, these settings should be declared right at the start of
your main.py script.
Usage of the Config object
--------------------------
To read a configuration token from a particular section::
>>> from kivy.config import Config
>>> Config.getint('kivy', 'show_fps')
0
Change the configuration and save it::
>>> Config.set('postproc', 'retain_time', '50')
>>> Config.write()
.. versionchanged:: 1.7.1
The ConfigParser should work correctly with utf-8 now. The values are
converted from ascii to unicode only when needed. The method get() returns
utf-8 strings.
.. _configuration-tokens:
Available configuration tokens
------------------------------
.. |log_levels| replace:: 'debug', 'info', 'warning', 'error' or 'critical'
:kivy:
`desktop`: int, 0 or 1
This option controls desktop OS specific features, such as enabling
drag-able scroll-bar in scroll views, disabling of bubbles in
TextInput etc. 0 is disabled, 1 is enabled.
`exit_on_escape`: int, 0 or 1
Enables exiting kivy when escape is pressed.
0 is disabled, 1 is enabled.
`pause_on_minimize`: int, 0 or 1
If set to `1`, the main loop is paused and the `on_pause` event
is dispatched when the window is minimized. This option is intended
for desktop use only. Defaults to `0`.
`keyboard_layout`: string
Identifier of the layout to use.
`keyboard_mode`: string
Specifies the keyboard mode to use. If can be one of the following:
* '' - Let Kivy choose the best option for your current platform.
* 'system' - real keyboard.
* 'dock' - one virtual keyboard docked to a screen side.
* 'multi' - one virtual keyboard for every widget request.
* 'systemanddock' - virtual docked keyboard plus input from real
keyboard.
* 'systemandmulti' - analogous.
`log_dir`: string
Path of log directory.
`log_enable`: int, 0 or 1
Activate file logging. 0 is disabled, 1 is enabled.
`log_level`: string, one of |log_levels|
Set the minimum log level to use.
`log_name`: string
Format string to use for the filename of log file.
`window_icon`: string
Path of the window icon. Use this if you want to replace the default
pygame icon.
:postproc:
`double_tap_distance`: float
Maximum distance allowed for a double tap, normalized inside the range
0 - 1000.
`double_tap_time`: int
Time allowed for the detection of double tap, in milliseconds.
`ignore`: list of tuples
List of regions where new touches are ignored.
This configuration token can be used to resolve hotspot problems
with DIY hardware. The format of the list must be::
ignore = [(xmin, ymin, xmax, ymax), ...]
All the values must be inside the range 0 - 1.
`jitter_distance`: int
Maximum distance for jitter detection, normalized inside the range 0
- 1000.
`jitter_ignore_devices`: string, separated with commas
List of devices to ignore from jitter detection.
`retain_distance`: int
If the touch moves more than is indicated by retain_distance, it will
not be retained. Argument should be an int between 0 and 1000.
`retain_time`: int
Time allowed for a retain touch, in milliseconds.
`triple_tap_distance`: float
Maximum distance allowed for a triple tap, normalized inside the range
0 - 1000.
`triple_tap_time`: int
Time allowed for the detection of triple tap, in milliseconds.
:graphics:
`borderless`: int , one of 0 or 1
If set to `1`, removes the window border/decoration.
`window_state`: string , one of 'visible', 'hidden', 'maximized' \
or 'minimized'
Sets the window state, defaults to 'visible'. This option is available
only for the SDL2 window provider and it should be used on desktop
OSes.
`fbo`: string, one of 'hardware', 'software' or 'force-hardware'
Selects the FBO backend to use.
`fullscreen`: int or string, one of 0, 1, 'fake' or 'auto'
Activate fullscreen. If set to `1`, a resolution of `width`
times `height` pixels will be used.
If set to `auto`, your current display's resolution will be
used instead. This is most likely what you want.
If you want to place the window in another display,
use `fake`, or set the `borderless` option from the graphics section,
then adjust `width`, `height`, `top` and `left`.
`height`: int
Height of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`left`: int
Left position of the :class:`~kivy.core.window.Window`.
`maxfps`: int, defaults to 60
Maximum FPS allowed.
'multisamples': int, defaults to 2
Sets the `MultiSample Anti-Aliasing (MSAA)
<http://en.wikipedia.org/wiki/Multisample_anti-aliasing>`_ level.
Increasing this value results in smoother graphics but at the cost of
processing time.
.. note::
This feature is limited by device hardware support and will have no
effect on devices which do not support the level of MSAA requested.
`position`: string, one of 'auto' or 'custom'
Position of the window on your display. If `auto` is used, you have no
control of the initial position: `top` and `left` are ignored.
`show_cursor`: int, one of 0 or 1
Show the cursor on the screen.
`top`: int
Top position of the :class:`~kivy.core.window.Window`.
`resizable`: int, one of 0 or 1
If 0, the window will have a fixed size. If 1, the window will be
resizable.
`rotation`: int, one of 0, 90, 180 or 270
Rotation of the :class:`~kivy.core.window.Window`.
`width`: int
Width of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`minimum_width`: int
Minimum width to restrict the window to. (sdl2 only)
`minimun_height`: int
Minimum height to restrict the window to. (sdl2 only)
:input:
You can create new input devices using this syntax::
# example of input provider instance
yourid = providerid,parameters
# example for tuio provider
default = tuio,127.0.0.1:3333
mytable = tuio,192.168.0.1:3334
.. seealso::
Check the providers in kivy.input.providers for the syntax to use
inside the configuration file.
:widgets:
`scroll_distance`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_distance`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_friction`: float
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_friction`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_timeout`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_timeout`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_stoptime`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_stoptime`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
`scroll_moves`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_moves`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
:modules:
You can activate modules with this syntax::
modulename =
Anything after the = will be passed to the module as arguments.
Check the specific module's documentation for a list of accepted
arguments.
.. note::
These options control only the initalization of the app and a restart
is required for value changes to take effect.
.. versionchanged:: 1.9.0
`borderless` and `window_state` have been added to the graphics section.
The `fake` setting of the `fullscreen` option has been deprecated,
use the `borderless` option instead.
`pause_on_minimize` has been added to the kivy section.
.. versionchanged:: 1.8.0
`systemanddock` and `systemandmulti` has been added as possible values for
`keyboard_mode` in the kivy section. `exit_on_escape` has been added
to the kivy section.
.. versionchanged:: 1.2.0
`resizable` has been added to graphics section.
.. versionchanged:: 1.1.0
tuio no longer listens by default. Window icons are not copied to
user directory anymore. You can still set a new window icon by using the
``window_icon`` config setting.
.. versionchanged:: 1.0.8
`scroll_timeout`, `scroll_distance` and `scroll_friction` have been added.
`list_friction`, `list_trigger_distance` and `list_friction_bound`
have been removed. `keyboard_type` and `keyboard_layout` have been
removed from the widget. `keyboard_mode` and `keyboard_layout` have
been added to the kivy section.
'''
__all__ = ('Config', 'ConfigParser')
try:
from ConfigParser import ConfigParser as PythonConfigParser
except ImportError:
from configparser import RawConfigParser as PythonConfigParser
from os import environ
from os.path import exists
from kivy import kivy_config_fn
from kivy.logger import Logger, logger_config_update
from collections import OrderedDict
from kivy.utils import platform
from kivy.compat import PY2, string_types
from weakref import ref
_is_rpi = exists('/opt/vc/include/bcm_host.h')
# Version number of current configuration format
KIVY_CONFIG_VERSION = 14
Config = None
'''Kivy configuration object. Its :attr:`~kivy.config.ConfigParser.name` is
`'kivy'`
'''
class ConfigParser(PythonConfigParser, object):
'''Enhanced ConfigParser class that supports the addition of default
sections and default values.
By default, the kivy ConfigParser instance, :attr:`~kivy.config.Config`,
is given the name `'kivy'` and the ConfigParser instance used by App,
:meth:`~kivy.app.App.build_settings`, is given the name `'app'`.
:Parameters:
`name`: string
The name of the instance. See :attr:`name`. Defaults to `''`.
.. versionchanged:: 1.9.0
Each ConfigParser can now be named, :attr:`name`. You can get the
ConfigParser associated with a name using :meth:`get_configparser`.
In addition, you can now control the config values with
:class:`~kivy.properties.ConfigParserProperty`.
.. versionadded:: 1.0.7
'''
def __init__(self, name=''):
PythonConfigParser.__init__(self)
self._sections = OrderedDict()
self.filename = None
self._callbacks = []
self.name = name
def add_callback(self, callback, section=None, key=None):
'''Add a callback to be called when a specific section/key changed. If
you don't specify a section or a key, it will call the callback
for all section/keys changes.
Callbacks will receive 3 arguments: the section, key and value.
.. versionadded:: 1.4.1
'''
if section is None and key is not None:
raise Exception('You cannot specify a key without a section')
self._callbacks.append((callback, section, key))
def remove_callback(self, callback, section=None, key=None):
'''Removes a callback added with :meth:`add_callback`.
:meth:`remove_callback` must be called with the same parameters as
:meth:`add_callback`.
Raises a `ValueError` if not found.
.. versionadded:: 1.9.0
'''
self._callbacks.remove((callback, section, key))
def _do_callbacks(self, section, key, value):
for callback, csection, ckey in self._callbacks:
if csection is not None and csection != section:
continue
elif ckey is not None and ckey != key:
continue
callback(section, key, value)
def read(self, filename):
'''Read only one filename. In contrast to the original ConfigParser of
Python, this one is able to read only one file at a time. The last
read file will be used for the :meth:`write` method.
.. versionchanged:: 1.9.0
:meth:`read` now calls the callbacks if read changed any values.
'''
if not isinstance(filename, string_types):
raise Exception('Only one filename is accepted ({})'.format(
string_types.__name__))
self.filename = filename
# If we try to open directly the configuration file in utf-8,
# we correctly get the unicode value by default.
# But, when we try to save it again, all the values we didn't changed
# are still unicode, and then the PythonConfigParser internal do
# a str() conversion -> fail.
# Instead we currently to the conversion to utf-8 when value are
# "get()", but we internally store them in ascii.
#with codecs.open(filename, 'r', encoding='utf-8') as f:
# self.readfp(f)
old_vals = {sect: {k: v for k, v in self.items(sect)} for sect in
self.sections()}
PythonConfigParser.read(self, filename)
# when reading new file, sections/keys are only increased, not removed
f = self._do_callbacks
for section in self.sections():
if section not in old_vals: # new section
for k, v in self.items(section):
f(section, k, v)
continue
old_keys = old_vals[section]
for k, v in self.items(section): # just update new/changed keys
if k not in old_keys or v != old_keys[k]:
f(section, k, v)
def set(self, section, option, value):
'''Functions similarly to PythonConfigParser's set method, except that
the value is implicitly converted to a string.
'''
e_value = value
if not isinstance(value, string_types):
# might be boolean, int, etc.
e_value = str(value)
if PY2:
if isinstance(value, unicode):
e_value = value.encode('utf-8')
ret = PythonConfigParser.set(self, section, option, e_value)
self._do_callbacks(section, option, value)
return ret
def setall(self, section, keyvalues):
'''Set a lot of keys/values in one section at the same time.
'''
for key, value in keyvalues.items():
self.set(section, key, value)
def get(self, section, option, **kwargs):
value = PythonConfigParser.get(self, section, option, **kwargs)
if PY2:
if type(value) is str:
return value.decode('utf-8')
return value
def setdefaults(self, section, keyvalues):
'''Set a lot of keys/value defaults in one section at the same time.
'''
self.adddefaultsection(section)
for key, value in keyvalues.items():
self.setdefault(section, key, value)
def setdefault(self, section, option, value):
'''Set the default value of a particular option.
'''
if self.has_option(section, option):
return
self.set(section, option, value)
def getdefault(self, section, option, defaultvalue):
'''Get an option. If not found, it will return the default value.
'''
if not self.has_section(section):
return defaultvalue
if not self.has_option(section, option):
return defaultvalue
return self.get(section, option)
def getdefaultint(self, section, option, defaultvalue):
'''Get an option. If not found, it will return the default value.
The return value will be always converted as an integer.
.. versionadded:: 1.6.0
'''
return int(self.getdefault(section, option, defaultvalue))
def adddefaultsection(self, section):
'''Add a section if the section is missing.
'''
if self.has_section(section):
return
self.add_section(section)
def write(self):
'''Write the configuration to the last file opened using the
:meth:`read` method.
Return True if the write finished successfully.
'''
if self.filename is None:
return False
try:
with open(self.filename, 'w') as fd:
PythonConfigParser.write(self, fd)
except IOError:
Logger.exception('Unable to write the config <%s>' % self.filename)
return False
return True
def update_config(self, filename, overwrite=False):
'''Upgrade the configuration based on a new default config file.
Overwrite any existing values if overwrite is True.
'''
pcp = PythonConfigParser()
pcp.read(filename)
confset = self.setall if overwrite else self.setdefaults
for section in pcp.sections():
confset(section, dict(pcp.items(section)))
self.write()
@staticmethod
def _register_named_property(name, widget_ref, *largs):
''' Called by the ConfigParserProperty to register a property which
was created with a config name instead of a config object.
When a ConfigParser with this name is later created, the properties
are then notified that this parser now exists so they can use it.
If the parser already exists, the property is notified here. See
:meth:`~kivy.properties.ConfigParserProperty.set_config`.
:Parameters:
`name`: a non-empty string
The name of the ConfigParser that is associated with the
property. See :attr:`name`.
`widget_ref`: 2-tuple.
The first element is a reference to the widget containing the
property, the second element is the name of the property. E.g.:
class House(Widget):
address = ConfigParserProperty('', 'info', 'street',
'directory')
Then, the first element is a ref to a House instance, and the
second is `'address'`.
'''
configs = ConfigParser._named_configs
try:
config, props = configs[name]
except KeyError:
configs[name] = (None, [widget_ref])
return
props.append(widget_ref)
if config:
config = config()
widget = widget_ref[0]()
if config and widget: # associate this config with property
widget.property(widget_ref[1]).set_config(config)
@staticmethod
def get_configparser(name):
'''Returns the :class:`ConfigParser` instance whose name is `name`, or
None if not found.
:Parameters:
`name`: string
The name of the :class:`ConfigParser` instance to return.
'''
try:
config = ConfigParser._named_configs[name][0]
return config() if config else None
except KeyError:
return None
# keys are configparser names, values are 2-tuple of (ref(configparser),
# widget_ref), where widget_ref is same as in _register_named_property
_named_configs = {}
_name = ''
@property
def name(self):
''' The name associated with this ConfigParser instance, if not `''`.
Defaults to `''`. It can be safely dynamically changed or set to `''`.
When a ConfigParser is given a name, that config object can be
retrieved using :meth:`get_configparser`. In addition, that config
instance can also be used with a
:class:`~kivy.properties.ConfigParserProperty` instance that set its
`config` value to this name.
Setting more than one ConfigParser with the same name will raise a
`ValueError`.
'''
return self._name
@name.setter
def name(self, value):
old_name = self._name
if value is old_name:
return
self._name = value
configs = ConfigParser._named_configs
if old_name: # disconnect this parser from previously connected props
_, props = configs.get(old_name, (None, []))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(None)
configs[old_name] = (None, props)
if not value:
return
# if given new name, connect it with property that used this name
try:
config, props = configs[value]
except KeyError:
configs[value] = (ref(self), [])
return
if config is not None:
raise ValueError('A parser named {} already exists'.format(value))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(self)
configs[value] = (ref(self), props)
if not environ.get('KIVY_DOC_INCLUDE'):
#
# Read, analyse configuration file
# Support upgrade of older config file versions
#
# Create default configuration
Config = ConfigParser(name='kivy')
Config.add_callback(logger_config_update, 'kivy', 'log_level')
# Read config file if exist
if (exists(kivy_config_fn) and
'KIVY_USE_DEFAULTCONFIG' not in environ and
'KIVY_NO_CONFIG' not in environ):
try:
Config.read(kivy_config_fn)
except Exception as e:
Logger.exception('Core: error while reading local'
'configuration')
version = Config.getdefaultint('kivy', 'config_version', 0)
# Add defaults section
Config.adddefaultsection('kivy')
Config.adddefaultsection('graphics')
Config.adddefaultsection('input')
Config.adddefaultsection('postproc')
Config.adddefaultsection('widgets')
Config.adddefaultsection('modules')
# Upgrade default configuration until we have the current version
need_save = False
if version != KIVY_CONFIG_VERSION and 'KIVY_NO_CONFIG' not in environ:
Logger.warning('Config: Older configuration version detected'
' ({0} instead of {1})'.format(
version, KIVY_CONFIG_VERSION))
Logger.warning('Config: Upgrading configuration in progress.')
need_save = True
while version < KIVY_CONFIG_VERSION:
Logger.debug('Config: Upgrading from %d to %d' %
(version, version + 1))
if version == 0:
# log level
Config.setdefault('kivy', 'keyboard_repeat_delay', '300')
Config.setdefault('kivy', 'keyboard_repeat_rate', '30')
Config.setdefault('kivy', 'log_dir', 'logs')
Config.setdefault('kivy', 'log_enable', '1')
Config.setdefault('kivy', 'log_level', 'info')
Config.setdefault('kivy', 'log_name', 'kivy_%y-%m-%d_%_.txt')
Config.setdefault('kivy', 'window_icon', '')
# default graphics parameters
Config.setdefault('graphics', 'display', '-1')
Config.setdefault('graphics', 'fullscreen', 'no')
Config.setdefault('graphics', 'height', '600')
Config.setdefault('graphics', 'left', '0')
Config.setdefault('graphics', 'maxfps', '0')
Config.setdefault('graphics', 'multisamples', '2')
Config.setdefault('graphics', 'position', 'auto')
Config.setdefault('graphics', 'rotation', '0')
Config.setdefault('graphics', 'show_cursor', '1')
Config.setdefault('graphics', 'top', '0')
Config.setdefault('graphics', 'vsync', '1')
Config.setdefault('graphics', 'width', '800')
# input configuration
Config.setdefault('input', 'mouse', 'mouse')
# activate native input provider in configuration
# from 1.0.9, don't activate mactouch by default, or app are
# unusable.
if platform == 'win':
Config.setdefault('input', 'wm_touch', 'wm_touch')
Config.setdefault('input', 'wm_pen', 'wm_pen')
elif platform == 'linux':
probesysfs = 'probesysfs'
if _is_rpi:
probesysfs += ',provider=hidinput'
Config.setdefault('input', '%(name)s', probesysfs)
# input postprocessing configuration
Config.setdefault('postproc', 'double_tap_distance', '20')
Config.setdefault('postproc', 'double_tap_time', '250')
Config.setdefault('postproc', 'ignore', '[]')
Config.setdefault('postproc', 'jitter_distance', '0')
Config.setdefault('postproc', 'jitter_ignore_devices',
'mouse,mactouch,')
Config.setdefault('postproc', 'retain_distance', '50')
Config.setdefault('postproc', 'retain_time', '0')
# default configuration for keyboard repeatition
Config.setdefault('widgets', 'keyboard_layout', 'qwerty')
Config.setdefault('widgets', 'keyboard_type', '')
Config.setdefault('widgets', 'list_friction', '10')
Config.setdefault('widgets', 'list_friction_bound', '20')
Config.setdefault('widgets', 'list_trigger_distance', '5')
elif version == 1:
Config.remove_option('graphics', 'vsync')
Config.set('graphics', 'maxfps', '60')
elif version == 2:
# was a version to automatically copy windows icon in the user
# directory, but it's now not used anymore. User can still change
# the window icon by touching the config.
pass
elif version == 3:
# add token for scrollview
Config.setdefault('widgets', 'scroll_timeout', '55')
Config.setdefault('widgets', 'scroll_distance', '20')
Config.setdefault('widgets', 'scroll_friction', '1.')
# remove old list_* token
Config.remove_option('widgets', 'list_friction')
Config.remove_option('widgets', 'list_friction_bound')
Config.remove_option('widgets', 'list_trigger_distance')
elif version == 4:
Config.remove_option('widgets', 'keyboard_type')
Config.remove_option('widgets', 'keyboard_layout')
# add keyboard token
Config.setdefault('kivy', 'keyboard_mode', '')
Config.setdefault('kivy', 'keyboard_layout', 'qwerty')
elif version == 5:
Config.setdefault('graphics', 'resizable', '1')
elif version == 6:
# if the timeout is still the default value, change it
Config.setdefault('widgets', 'scroll_stoptime', '300')
Config.setdefault('widgets', 'scroll_moves', '5')
elif version == 7:
# desktop bool indicating whether to use desktop specific features
is_desktop = int(platform in ('win', 'macosx', 'linux'))
Config.setdefault('kivy', 'desktop', is_desktop)
Config.setdefault('postproc', 'triple_tap_distance', '20')
Config.setdefault('postproc', 'triple_tap_time', '375')
elif version == 8:
if Config.getint('widgets', 'scroll_timeout') == 55:
Config.set('widgets', 'scroll_timeout', '250')
elif version == 9:
Config.setdefault('kivy', 'exit_on_escape', '1')
elif version == 10:
Config.set('graphics', 'fullscreen', '0')
Config.setdefault('graphics', 'borderless', '0')
elif version == 11:
Config.setdefault('kivy', 'pause_on_minimize', '0')
elif version == 12:
Config.setdefault('graphics', 'window_state', 'visible')
elif version == 13:
Config.setdefault('graphics', 'minimum_width', '0')
Config.setdefault('graphics', 'minimum_height', '0')
# elif version == 1:
# # add here the command for upgrading from configuration 0 to 1
else:
# for future.
break
# Pass to the next version
version += 1
# Indicate to the Config that we've upgrade to the latest version.
Config.set('kivy', 'config_version', KIVY_CONFIG_VERSION)
# Now, activate log file
Logger.logfile_activated = bool(Config.getint('kivy', 'log_enable'))
# If no configuration exist, write the default one.
if ((not exists(kivy_config_fn) or need_save) and
'KIVY_NO_CONFIG' not in environ):
try:
Config.filename = kivy_config_fn
Config.write()
except Exception as e:
Logger.exception('Core: Error while saving default config file')
| mit |
liucode/tempest-master | tempest/scenario/test_server_basic_ops.py | 1 | 6234 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_log import log as logging
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
from tempest.scenario import utils as test_utils
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
load_tests = test_utils.load_tests_input_scenario_utils
class TestServerBasicOps(manager.ScenarioTest):
"""
This smoke test case follows this basic set of operations:
* Create a keypair for use in launching an instance
* Create a security group to control network access in instance
* Add simple permissive rules to the security group
* Launch an instance
* Perform ssh to instance
* Verify metadata service
* Verify metadata on config_drive
* Terminate the instance
"""
def setUp(self):
super(TestServerBasicOps, self).setUp()
# Setup image and flavor the test instance
# Support both configured and injected values
if not hasattr(self, 'image_ref'):
self.image_ref = CONF.compute.image_ref
if not hasattr(self, 'flavor_ref'):
self.flavor_ref = CONF.compute.flavor_ref
self.image_utils = test_utils.ImageUtils(self.manager)
if not self.image_utils.is_flavor_enough(self.flavor_ref,
self.image_ref):
raise self.skipException(
'{image} does not fit in {flavor}'.format(
image=self.image_ref, flavor=self.flavor_ref
)
)
self.run_ssh = CONF.validation.run_validation and \
self.image_utils.is_sshable_image(self.image_ref)
self.ssh_user = self.image_utils.ssh_user(self.image_ref)
LOG.debug('Starting test for i:{image}, f:{flavor}. '
'Run ssh: {ssh}, user: {ssh_user}'.format(
image=self.image_ref, flavor=self.flavor_ref,
ssh=self.run_ssh, ssh_user=self.ssh_user))
def add_keypair(self):
self.keypair = self.create_keypair()
def boot_instance(self):
# Create server with image and flavor from input scenario
security_groups = [{'name': self.security_group['name']}]
self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups,
'config_drive': CONF.compute_feature_enabled.config_drive,
'metadata': self.md
}
self.instance = self.create_server(image=self.image_ref,
flavor=self.flavor_ref,
create_kwargs=create_kwargs)
def verify_ssh(self):
if self.run_ssh:
# Obtain a floating IP
self.fip = self.create_floating_ip(self.instance)['ip']
# Check ssh
self.ssh_client = self.get_remote_client(
server_or_ip=self.fip,
username=self.image_utils.ssh_user(self.image_ref),
private_key=self.keypair['private_key'])
def verify_metadata(self):
if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
# Verify metadata service
md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
def exec_cmd_and_verify_output():
cmd = 'curl ' + md_url
result = self.ssh_client.exec_command(cmd)
if result:
msg = ('Failed while verifying metadata on server. Result '
'of command "%s" is NOT "%s".' % (cmd, self.fip))
self.assertEqual(self.fip, result, msg)
return 'Verification is successful!'
if not test.call_until_true(exec_cmd_and_verify_output,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise exceptions.TimeoutException('Timed out while waiting to '
'verify metadata on server. '
'%s is empty.' % md_url)
def verify_metadata_on_config_drive(self):
if self.run_ssh and CONF.compute_feature_enabled.config_drive:
# Verify metadata on config_drive
cmd_blkid = 'blkid -t LABEL=config-2 -o device'
dev_name = self.ssh_client.exec_command(cmd_blkid)
dev_name = dev_name.rstrip()
self.ssh_client.exec_command('sudo mount %s /mnt' % dev_name)
cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
result = self.ssh_client.exec_command(cmd_md)
self.ssh_client.exec_command('sudo umount /mnt')
result = json.loads(result)
self.assertIn('meta', result)
msg = ('Failed while verifying metadata on config_drive on server.'
' Result of command "%s" is NOT "%s".' % (cmd_md, self.md))
self.assertEqual(self.md, result['meta'], msg)
@test.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_server_basicops(self):
self.add_keypair()
self.security_group = self._create_security_group()
self.boot_instance()
self.verify_ssh()
self.verify_metadata()
self.verify_metadata_on_config_drive()
self.servers_client.delete_server(self.instance['id'])
| apache-2.0 |
jcoliver/OSUScratchpads | sites/all/libraries/mediaelement/src/Builder.py | 14 | 4203 | import sys
import os
import shutil
me_filename = 'mediaelement'
mep_filename = 'mediaelementplayer'
combined_filename = 'mediaelement-and-player'
# BUILD MediaElement (single file)
print('building MediaElement.js')
me_files = []
me_files.append('me-header.js')
me_files.append('me-namespace.js')
me_files.append('me-utility.js')
me_files.append('me-plugindetector.js')
me_files.append('me-featuredetection.js')
me_files.append('me-mediaelements.js')
me_files.append('me-shim.js')
code = ''
for item in me_files:
src_file = open('js/' + item,'r')
code += src_file.read() + "\n"
tmp_file = open('../build/' + me_filename + '.js','w')
tmp_file.write(code)
tmp_file.close()
# BUILD MediaElementPlayer (single file)
print('building MediaElementPlayer.js')
mep_files = []
mep_files.append('mep-header.js')
mep_files.append('mep-library.js')
mep_files.append('mep-player.js')
mep_files.append('mep-feature-playpause.js')
mep_files.append('mep-feature-stop.js')
mep_files.append('mep-feature-progress.js')
mep_files.append('mep-feature-time.js')
mep_files.append('mep-feature-volume.js')
mep_files.append('mep-feature-fullscreen.js')
mep_files.append('mep-feature-tracks.js')
mep_files.append('mep-feature-contextmenu.js')
code = ''
for item in mep_files:
src_file = open('js/' + item,'r')
code += src_file.read() + "\n"
tmp_file = open('../build/' + mep_filename + '.js','w')
tmp_file.write(code)
tmp_file.close()
# MINIFY both scripts
print('Minifying JavaScript')
# os.system("java -jar yuicompressor-2.4.2.jar ../build/" + me_filename + ".js -o ../build/" + me_filename + ".min.js --charset utf-8 -v")
# os.system("java -jar yuicompressor-2.4.2.jar ../build/" + mep_filename + ".js -o ../build/" + mep_filename + ".min.js --charset utf-8 -v")
os.system("java -jar compiler.jar --js ../build/" + me_filename + ".js --js_output_file ../build/" + me_filename + ".min.js")
os.system("java -jar compiler.jar --js ../build/" + mep_filename + ".js --js_output_file ../build/" + mep_filename + ".min.js")
# PREPEND intros
def addHeader(headerFilename, filename):
# get the header text
tmp_file = open(headerFilename)
header_txt = tmp_file.read();
tmp_file.close()
# read the current contents of the file
tmp_file = open(filename)
file_txt = tmp_file.read()
tmp_file.close()
# open the file again for writing
tmp_file = open(filename, 'w')
tmp_file.write(header_txt)
# write the original contents
tmp_file.write(file_txt)
tmp_file.close()
addHeader('js/me-header.js', '../build/' + me_filename + '.min.js')
addHeader('js/mep-header.js', '../build/' + mep_filename + '.min.js')
# COMBINE into single script
print('Combining scripts')
code = ''
src_file = open('../build/' + me_filename + '.js','r')
code += src_file.read() + "\n"
src_file = open('../build/' + mep_filename + '.js','r')
code += src_file.read() + "\n"
tmp_file = open('../build/' + combined_filename + '.js','w')
tmp_file.write(code)
tmp_file.close()
code = ''
src_file = open('../build/' + me_filename + '.min.js','r')
code += src_file.read() + "\n"
src_file = open('../build/' + mep_filename + '.min.js','r')
code += src_file.read() + "\n"
tmp_file = open('../build/' + combined_filename + '.min.js','w')
tmp_file.write(code)
tmp_file.close()
# MINIFY CSS
print('Minifying CSS')
src_file = open('css/mediaelementplayer.css','r')
tmp_file = open('../build/mediaelementplayer.css','w')
tmp_file.write(src_file.read())
tmp_file.close()
os.system("java -jar yuicompressor-2.4.2.jar ../build/mediaelementplayer.css -o ../build/mediaelementplayer.min.css --charset utf-8 -v")
#COPY skin files
print('Copying Skin Files')
shutil.copy2('css/controls.png','../build/controls.png')
shutil.copy2('css/bigplay.png','../build/bigplay.png')
shutil.copy2('css/loading.gif','../build/loading.gif')
shutil.copy2('css/mejs-skins.css','../build/mejs-skins.css')
shutil.copy2('css/controls-ted.png','../build/controls-ted.png')
shutil.copy2('css/controls-wmp.png','../build/controls-wmp.png')
shutil.copy2('css/controls-wmp-bg.png','../build/controls-wmp-bg.png')
print('DONE!')
| gpl-2.0 |
Anonymous-X6/django | tests/mail/test_sendtestemail.py | 327 | 3088 | from __future__ import unicode_literals
from django.core import mail
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
@override_settings(
ADMINS=(('Admin', 'admin@example.com'), ('Admin and Manager', 'admin_and_manager@example.com')),
MANAGERS=(('Manager', 'manager@example.com'), ('Admin and Manager', 'admin_and_manager@example.com')),
)
class SendTestEmailManagementCommand(SimpleTestCase):
"""
Test the sending of a test email using the `sendtestemail` command.
"""
def test_single_receiver(self):
"""
The mail is sent with the correct subject and recipient.
"""
recipient = 'joe@example.com'
call_command('sendtestemail', recipient)
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(mail_message.recipients(), [recipient])
def test_multiple_receivers(self):
"""
The mail may be sent with multiple recipients.
"""
recipients = ['joe@example.com', 'jane@example.com']
call_command('sendtestemail', recipients[0], recipients[1])
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(sorted(mail_message.recipients()), [
'jane@example.com',
'joe@example.com',
])
def test_manager_receivers(self):
"""
The mail should be sent to the email addresses specified in
settings.MANAGERS.
"""
call_command('sendtestemail', '--managers')
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(sorted(mail_message.recipients()), [
'admin_and_manager@example.com',
'manager@example.com',
])
def test_admin_receivers(self):
"""
The mail should be sent to the email addresses specified in
settings.ADMIN.
"""
call_command('sendtestemail', '--admins')
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(sorted(mail_message.recipients()), [
'admin@example.com',
'admin_and_manager@example.com',
])
def test_manager_and_admin_receivers(self):
"""
The mail should be sent to the email addresses specified in both
settings.MANAGERS and settings.ADMINS.
"""
call_command('sendtestemail', '--managers', '--admins')
self.assertEqual(len(mail.outbox), 2)
manager_mail = mail.outbox[0]
self.assertEqual(sorted(manager_mail.recipients()), [
'admin_and_manager@example.com',
'manager@example.com',
])
admin_mail = mail.outbox[1]
self.assertEqual(sorted(admin_mail.recipients()), [
'admin@example.com',
'admin_and_manager@example.com',
])
| bsd-3-clause |
liusen09003110-163-com/linux | tools/perf/scripts/python/syscall-counts.py | 1996 | 1700 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
MarekSuchanek/repocribro | repocribro/controllers/webhooks.py | 1 | 1567 | import flask
from ..models import Repository
webhooks = flask.Blueprint('webhooks', __name__, url_prefix='/webhook/github')
@webhooks.route('', methods=['POST'])
def gh_webhook():
"""Point for GitHub webhook msgs (POST handler)"""
db = flask.current_app.container.get('db')
ext_master = flask.current_app.container.get('ext_master')
gh_api = flask.current_app.container.get('gh_api')
hooks_list = ext_master.call('get_gh_webhook_processors', default={})
hooks = {}
for ext_hooks in hooks_list:
for hook_event in ext_hooks:
if hook_event not in hooks:
hooks[hook_event] = []
hooks[hook_event].extend(ext_hooks[hook_event])
headers = flask.request.headers
agent = headers.get('User-Agent', '')
signature = headers.get('X-Hub-Signature', '')
delivery_id = headers.get('X-GitHub-Delivery', '')
event = headers.get('X-GitHub-Event', '')
data = flask.request.get_json()
if not agent.startswith('GitHub-Hookshot/'):
flask.abort(404)
if data is None or 'repository' not in data:
flask.abort(404)
if not gh_api.webhook_verify_signature(flask.request.data, signature):
flask.abort(404)
repo = db.session.query(Repository).filter_by(
github_id=data['repository']['id']
).first()
if repo is None:
flask.abort(404)
for event_processor in hooks.get(event, []):
event_processor(db=db, repo=repo, data=data, delivery_id=delivery_id)
repo.events_updated()
db.session.commit()
return ''
| mit |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_4/tests/modeltests/custom_pk/tests.py | 34 | 5733 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.db import transaction, IntegrityError
from django.test import TestCase, skipIfDBFeature
from .models import Employee, Business, Bar, Foo
class CustomPKTests(TestCase):
def test_custom_pk(self):
dan = Employee.objects.create(
employee_code=123, first_name="Dan", last_name="Jones"
)
self.assertQuerysetEqual(
Employee.objects.all(), [
"Dan Jones",
],
unicode
)
fran = Employee.objects.create(
employee_code=456, first_name="Fran", last_name="Bones"
)
self.assertQuerysetEqual(
Employee.objects.all(), [
"Fran Bones",
"Dan Jones",
],
unicode
)
self.assertEqual(Employee.objects.get(pk=123), dan)
self.assertEqual(Employee.objects.get(pk=456), fran)
self.assertRaises(Employee.DoesNotExist,
lambda: Employee.objects.get(pk=42)
)
# Use the name of the primary key, rather than pk.
self.assertEqual(Employee.objects.get(employee_code=123), dan)
# pk can be used as a substitute for the primary key.
self.assertQuerysetEqual(
Employee.objects.filter(pk__in=[123, 456]), [
"Fran Bones",
"Dan Jones",
],
unicode
)
# The primary key can be accessed via the pk property on the model.
e = Employee.objects.get(pk=123)
self.assertEqual(e.pk, 123)
# Or we can use the real attribute name for the primary key:
self.assertEqual(e.employee_code, 123)
# Fran got married and changed her last name.
fran = Employee.objects.get(pk=456)
fran.last_name = "Jones"
fran.save()
self.assertQuerysetEqual(
Employee.objects.filter(last_name="Jones"), [
"Dan Jones",
"Fran Jones",
],
unicode
)
emps = Employee.objects.in_bulk([123, 456])
self.assertEqual(emps[123], dan)
b = Business.objects.create(name="Sears")
b.employees.add(dan, fran)
self.assertQuerysetEqual(
b.employees.all(), [
"Dan Jones",
"Fran Jones",
],
unicode
)
self.assertQuerysetEqual(
fran.business_set.all(), [
"Sears",
],
lambda b: b.name
)
self.assertEqual(Business.objects.in_bulk(["Sears"]), {
"Sears": b,
})
self.assertQuerysetEqual(
Business.objects.filter(name="Sears"), [
"Sears"
],
lambda b: b.name
)
self.assertQuerysetEqual(
Business.objects.filter(pk="Sears"), [
"Sears",
],
lambda b: b.name
)
# Queries across tables, involving primary key
self.assertQuerysetEqual(
Employee.objects.filter(business__name="Sears"), [
"Dan Jones",
"Fran Jones",
],
unicode,
)
self.assertQuerysetEqual(
Employee.objects.filter(business__pk="Sears"), [
"Dan Jones",
"Fran Jones",
],
unicode,
)
self.assertQuerysetEqual(
Business.objects.filter(employees__employee_code=123), [
"Sears",
],
lambda b: b.name
)
self.assertQuerysetEqual(
Business.objects.filter(employees__pk=123), [
"Sears",
],
lambda b: b.name,
)
self.assertQuerysetEqual(
Business.objects.filter(employees__first_name__startswith="Fran"), [
"Sears",
],
lambda b: b.name
)
def test_unicode_pk(self):
# Primary key may be unicode string
bus = Business.objects.create(name=u'jaźń')
def test_unique_pk(self):
# The primary key must also obviously be unique, so trying to create a
# new object with the same primary key will fail.
e = Employee.objects.create(
employee_code=123, first_name="Frank", last_name="Jones"
)
sid = transaction.savepoint()
self.assertRaises(IntegrityError,
Employee.objects.create, employee_code=123, first_name="Fred", last_name="Jones"
)
transaction.savepoint_rollback(sid)
def test_custom_field_pk(self):
# Regression for #10785 -- Custom fields can be used for primary keys.
new_bar = Bar.objects.create()
new_foo = Foo.objects.create(bar=new_bar)
f = Foo.objects.get(bar=new_bar.pk)
self.assertEqual(f, new_foo)
self.assertEqual(f.bar, new_bar)
f = Foo.objects.get(bar=new_bar)
self.assertEqual(f, new_foo),
self.assertEqual(f.bar, new_bar)
# SQLite lets objects be saved with an empty primary key, even though an
# integer is expected. So we can't check for an error being raised in that
# case for SQLite. Remove it from the suite for this next bit.
@skipIfDBFeature('supports_unspecified_pk')
def test_required_pk(self):
# The primary key must be specified, so an error is raised if you
# try to create an object without it.
sid = transaction.savepoint()
self.assertRaises(IntegrityError,
Employee.objects.create, first_name="Tom", last_name="Smith"
)
transaction.savepoint_rollback(sid)
| mit |
ChromiumWebApps/chromium | third_party/closure_linter/closure_linter/not_strict_test.py | 142 | 2332 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import checker
from closure_linter import errors
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
checker.GJsLintRunner(),
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
| bsd-3-clause |
idrogeno/enigma2 | lib/python/Components/Converter/ClockToText.py | 30 | 3734 | from Converter import Converter
from time import localtime, strftime
from Components.Element import cached
class ClockToText(Converter, object):
DEFAULT = 0
WITH_SECONDS = 1
IN_MINUTES = 2
DATE = 3
FORMAT = 4
AS_LENGTH = 5
TIMESTAMP = 6
FULL = 7
SHORT_DATE = 8
LONG_DATE = 9
VFD = 10
AS_LENGTHHOURS = 11
AS_LENGTHSECONDS = 12
FULL_DATE = 13
# add: date, date as string, weekday, ...
# (whatever you need!)
def __init__(self, type):
Converter.__init__(self, type)
self.fix = ""
if ';' in type:
type, self.fix = type.split(';')
if type == "WithSeconds":
self.type = self.WITH_SECONDS
elif type == "InMinutes":
self.type = self.IN_MINUTES
elif type == "Date":
self.type = self.DATE
elif type == "AsLength":
self.type = self.AS_LENGTH
elif type == "AsLengthHours":
self.type = self.AS_LENGTHHOURS
elif type == "AsLengthSeconds":
self.type = self.AS_LENGTHSECONDS
elif type == "Timestamp":
self.type = self.TIMESTAMP
elif type == "Full":
self.type = self.FULL
elif type == "ShortDate":
self.type = self.SHORT_DATE
elif type == "LongDate":
self.type = self.LONG_DATE
elif type == "FullDate":
self.type = self.FULL_DATE
elif type == "VFD":
self.type = self.VFD
elif "Format" in type:
self.type = self.FORMAT
self.fmt_string = type[7:]
else:
self.type = self.DEFAULT
@cached
def getText(self):
time = self.source.time
if time is None:
return ""
# add/remove 1st space
def fix_space(string):
if "Proportional" in self.fix and t.tm_hour < 10:
return " " + string
if "NoSpace" in self.fix:
return string.lstrip(' ')
return string
# handle durations
if self.type == self.IN_MINUTES:
return ngettext("%d Min", "%d Mins", (time / 60)) % (time / 60)
elif self.type == self.AS_LENGTH:
if time < 0:
return ""
return "%d:%02d" % (time / 60, time % 60)
elif self.type == self.AS_LENGTHHOURS:
if time < 0:
return ""
return "%d:%02d" % (time / 3600, time / 60 % 60)
elif self.type == self.AS_LENGTHSECONDS:
if time < 0:
return ""
return "%d:%02d:%02d" % (time / 3600, time / 60 % 60, time % 60)
elif self.type == self.TIMESTAMP:
return str(time)
t = localtime(time)
if self.type == self.WITH_SECONDS:
# TRANSLATORS: full time representation hour:minute:seconds
return fix_space(_("%2d:%02d:%02d") % (t.tm_hour, t.tm_min, t.tm_sec))
elif self.type == self.DEFAULT:
# TRANSLATORS: short time representation hour:minute
return fix_space(_("%2d:%02d") % (t.tm_hour, t.tm_min))
elif self.type == self.DATE:
# TRANSLATORS: full date representation dayname daynum monthname year in strftime() format! See 'man strftime'
d = _("%A %e %B %Y")
elif self.type == self.FULL:
# TRANSLATORS: long date representation short dayname daynum short monthname hour:minute in strftime() format! See 'man strftime'
d = _("%a %e/%m %-H:%M")
elif self.type == self.SHORT_DATE:
# TRANSLATORS: short date representation short dayname daynum short monthname in strftime() format! See 'man strftime'
d = _("%a %e/%m")
elif self.type == self.LONG_DATE:
# TRANSLATORS: long date representations dayname daynum monthname in strftime() format! See 'man strftime'
d = _("%A %e %B")
elif self.type == self.FULL_DATE:
# TRANSLATORS: full date representations sort dayname daynum monthname long year in strftime() format! See 'man strftime'
d = _("%a %e %B %Y")
elif self.type == self.VFD:
# TRANSLATORS: VFD hour:minute daynum short monthname in strftime() format! See 'man strftime'
d = _("%k:%M %e/%m")
elif self.type == self.FORMAT:
d = self.fmt_string
else:
return "???"
return strftime(d, t)
text = property(getText)
| gpl-2.0 |
mrknow/filmkodi | plugin.video.fanfilm/resources/lib/libraries/cachemeta.py | 2 | 2532 | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,hashlib,time
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
from resources.lib.libraries import control
def get(function, timeout, *args):
try:
response = None
f = repr(function)
f = re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', f)
a = hashlib.md5()
for i in args: a.update(str(i))
a = str(a.hexdigest())
except:
pass
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.cachemetaFile)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM rel_list WHERE func = '%s' AND args = '%s'" % (f, a))
match = dbcur.fetchone()
response = eval(match[2].encode('utf-8'))
t1 = int(match[3])
t2 = int(time.time())
update = (abs(t2 - t1) / 3600) >= int(timeout)
if update == False:
return response
except:
pass
try:
r = function(*args)
if (r == None or r == []) and not response == None:
return response
elif (r == None or r == []):
return r
except:
return
try:
insert = True
if r['cover_url'] == '' or r['backdrop_url'] == '': insert = False
r = repr(r)
t = int(time.time())
dbcur.execute("CREATE TABLE IF NOT EXISTS rel_list (""func TEXT, ""args TEXT, ""response TEXT, ""added TEXT, ""UNIQUE(func, args)"");")
dbcur.execute("DELETE FROM rel_list WHERE func = '%s' AND args = '%s'" % (f, a))
if insert == True: dbcur.execute("INSERT INTO rel_list Values (?, ?, ?, ?)", (f, a, r, t))
dbcon.commit()
except:
pass
try:
return eval(r.encode('utf-8'))
except:
pass
| apache-2.0 |
xiaoyaozi5566/DynamicCache | src/arch/x86/isa/insts/general_purpose/flags/load_and_store.py | 91 | 2246 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop SAHF {
ruflags ah, dataSize=1
};
def macroop LAHF {
wruflags ah, t0, dataSize=1
};
'''
| bsd-3-clause |
AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | 354 | 3406 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| mit |
Eric-Gaudiello/tensorflow_dev | tensorflow_home/tensorflow_venv/lib/python3.4/site-packages/pip/_vendor/pkg_resources/__init__.py | 252 | 106466 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib.machinery as importlib_machinery
else:
importlib_machinery = None
try:
import parser
except ImportError:
pass
import pip._vendor.packaging.version
import pip._vendor.packaging.specifiers
packaging = pip._vendor.packaging
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| gpl-3.0 |
khosrow/metpx | sundew/unittests/unittest_Ingestor.py | 1 | 2061 | # -*- coding: iso-8859-1 -*-
#############################################################################################
# Name: unittest_Ingestor.py
# Author: Jun Hu
# Date: 2012-04-30
# Description: test cases for Ingestor class
#############################################################################################
import sys,os,unittest
sys.path.insert(1, '../sundew/lib/')
os.environ['PXROOT']="."
from Logger import Logger
from Ingestor import Ingestor
from Source import Source
class unittest_Ingestor(unittest.TestCase):
def setUp(self,logFile='log/Template.log'):
self.logger = Logger(logFile, 'DEBUG', 'Sub')
self.logger = self.logger.getLogger()
self.source = Source('source-test', self.logger)
self.ingestor = Ingestor(self.source,self.logger)
self.ingestor.setClients()
def test_Ingestor(self):
print self.ingestor.clients
#filter.ingestor.ingestSingleFile(igniter)
#filter.ingestor.ingestBulletinFile(igniter)
#source.ingestor.ingestSingleFile(igniter)
#source.ingestor.ingestBulletinFile(igniter)
#source.ingestor.ingestCollection(igniter)
#self.ingestor.setFeeds(self.feeds)
#self.ingestor.setClients()
#source.ingestor.createDir('/apps/px/turton', source.ingestor.dbDirsCache)
#self.drp = DirectRoutingParser(pathFichierCircuit, self.source.ingestor.allNames, logger)
#self.source.ingestor.ingest()
#self.source.ingestor.getMatchingClientNamesFromMasks(nomFichier, clist)
#self.source.ingestor.ingest(tempNom, nomFichier, clist)
#self.source.ingestor.ingest()
#clist = self.source.ingestor.getMatchingClientNamesFromMasks(nomFichier, clist)
#self.source.ingestor.ingest(tempNom, nomFichier, clist)
#self.assertEqual(None, None)
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(unittest_Ingestor))
return suite
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(unittest_Ingestor)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 |
Zekom/fofix | src/Scene.py | 4 | 3747 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X (FoFiX) #
# Copyright (C) 2006 Sami Kyöstilä #
# 2009 FoFiX Team #
# 2009 akedrou #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from View import BackgroundLayer
from Input import KeyListener
from Camera import Camera
from OpenGL.GL import *
from OpenGL.GLU import *
class Scene(BackgroundLayer, KeyListener):
def __init__(self, engine):
self.engine = engine
self.actors = []
self.camera = Camera()
self.world = None
self.space = None
self.time = 0.0
self.actors = []
self.players = self.engine.world.getPlayers()
self.controls = engine.input.controls
#for simplification of theme writing
self.fontDict = self.engine.data.fontDict
self.geometry = self.engine.view.geometry[2:4]
self.fontScreenBottom = self.engine.data.fontScreenBottom
self.aspectRatio = self.engine.view.aspectRatio
self.drawImage = self.engine.drawImage
self.drawStarScore = self.engine.drawStarScore
def addPlayer(self, player):
self.players.append(player)
def removePlayer(self, player):
self.players.remove(player)
def run(self, ticks):
self.time += ticks / 50.0
def shown(self):
self.engine.input.addKeyListener(self)
def hidden(self):
self.engine.input.removeKeyListener(self)
def keyPressed(self, key, unicode):
c = self.controls.keyPressed(key)
if c:
return True
return False
def keyReleased(self, key):
c = self.controls.keyReleased(key)
if c:
return True
return False
def render3D(self):
pass
def render(self, visibility, topMost):
# render the scene
try:
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(60, self.engine.view.aspectRatio, 0.1, 1000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glPushMatrix()
self.camera.apply()
self.render3D()
finally:
glPopMatrix()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
| gpl-2.0 |
xiandiancloud/ji | common/test/acceptance/pages/lms/courseware.py | 6 | 1321 | """
Courseware page.
"""
from .course_page import CoursePage
class CoursewarePage(CoursePage):
"""
Course info.
"""
url_path = "courseware/"
xblock_component_selector = '.vert .xblock'
def is_browser_on_page(self):
return self.q(css='body.courseware').present
@property
def num_xblock_components(self):
"""
Return the number of rendered xblocks within the unit on the page
"""
return len(self.q(css=self.xblock_component_selector))
def xblock_component_type(self, index=0):
"""
Extract rendered xblock component type.
Returns:
str: xblock module type
index: which xblock to query, where the index is the vertical display within the page
(default is 0)
"""
return self.q(css=self.xblock_component_selector).attrs('data-block-type')[index]
def xblock_component_html_content(self, index=0):
"""
Extract rendered xblock component html content.
Returns:
str: xblock module html content
index: which xblock to query, where the index is the vertical display within the page
(default is 0)
"""
return self.q(css=self.xblock_component_selector).attrs('innerHTML')[index].strip()
| agpl-3.0 |
invisiblek/python-for-android | python3-alpha/extra_modules/gdata/dublincore/data.py | 126 | 2106 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Dublin Core Metadata Initiative (DCMI) Extension"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
DC_TEMPLATE = '{http://purl.org/dc/terms/}%s'
class Creator(atom.core.XmlElement):
"""Entity primarily responsible for making the resource."""
_qname = DC_TEMPLATE % 'creator'
class Date(atom.core.XmlElement):
"""Point or period of time associated with an event in the lifecycle of the resource."""
_qname = DC_TEMPLATE % 'date'
class Description(atom.core.XmlElement):
"""Account of the resource."""
_qname = DC_TEMPLATE % 'description'
class Format(atom.core.XmlElement):
"""File format, physical medium, or dimensions of the resource."""
_qname = DC_TEMPLATE % 'format'
class Identifier(atom.core.XmlElement):
"""An unambiguous reference to the resource within a given context."""
_qname = DC_TEMPLATE % 'identifier'
class Language(atom.core.XmlElement):
"""Language of the resource."""
_qname = DC_TEMPLATE % 'language'
class Publisher(atom.core.XmlElement):
"""Entity responsible for making the resource available."""
_qname = DC_TEMPLATE % 'publisher'
class Rights(atom.core.XmlElement):
"""Information about rights held in and over the resource."""
_qname = DC_TEMPLATE % 'rights'
class Subject(atom.core.XmlElement):
"""Topic of the resource."""
_qname = DC_TEMPLATE % 'subject'
class Title(atom.core.XmlElement):
"""Name given to the resource."""
_qname = DC_TEMPLATE % 'title'
| apache-2.0 |
rafaeltomesouza/frontend-class1 | aula2/a11/linkedin/client/.gradle/nodejs/node-v7.5.0-darwin-x64/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py | 1789 | 10585 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)
| mit |
Work4Labs/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/delete_regress/models.py | 28 | 1583 | from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
class Award(models.Model):
name = models.CharField(max_length=25)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
content_object = generic.GenericForeignKey()
class AwardNote(models.Model):
award = models.ForeignKey(Award)
note = models.CharField(max_length=100)
class Person(models.Model):
name = models.CharField(max_length=25)
awards = generic.GenericRelation(Award)
class Book(models.Model):
pagecount = models.IntegerField()
class Toy(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
name = models.CharField(max_length=50)
toys = models.ManyToManyField(Toy, through='PlayedWith')
class PlayedWith(models.Model):
child = models.ForeignKey(Child)
toy = models.ForeignKey(Toy)
date = models.DateField(db_column='date_col')
class PlayedWithNote(models.Model):
played = models.ForeignKey(PlayedWith)
note = models.TextField()
class Contact(models.Model):
label = models.CharField(max_length=100)
class Email(Contact):
email_address = models.EmailField(max_length=100)
class Researcher(models.Model):
contacts = models.ManyToManyField(Contact, related_name="research_contacts")
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name")
meal = models.CharField(max_length=20)
| gpl-3.0 |
agaffney/ansible | test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py | 6 | 106436 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import argparse
import ast
import datetime
import json
import errno
import os
import re
import subprocess
import sys
import tempfile
import traceback
from collections import OrderedDict
from contextlib import contextmanager
from distutils.version import StrictVersion, LooseVersion
from fnmatch import fnmatch
import yaml
from ansible import __version__ as ansible_version
from ansible.executor.module_common import REPLACER_WINDOWS
from ansible.module_utils.common._collections_compat import Mapping
from ansible.plugins.loader import fragment_loader
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
from ansible.utils.plugin_docs import REJECTLIST, add_collection_to_versions_and_dates, add_fragments, get_docstring
from ansible.utils.version import SemanticVersion
from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec
from .schema import ansible_module_kwargs_schema, doc_schema, return_schema
from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate
from voluptuous.humanize import humanize_error
from ansible.module_utils.six import PY3, with_metaclass, string_types
if PY3:
# Because there is no ast.TryExcept in Python 3 ast module
TRY_EXCEPT = ast.Try
# REPLACER_WINDOWS from ansible.executor.module_common is byte
# string but we need unicode for Python 3
REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
else:
TRY_EXCEPT = ast.TryExcept
REJECTLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
INDENT_REGEX = re.compile(r'([\t]*)')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\([^)].*')
SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
REJECTLIST_IMPORTS = {
'requests': {
'new_only': True,
'error': {
'code': 'use-module-utils-urls',
'msg': ('requests import found, should use '
'ansible.module_utils.urls instead')
}
},
r'boto(?:\.|$)': {
'new_only': True,
'error': {
'code': 'use-boto3',
'msg': 'boto import found, new modules should use boto3'
}
},
}
SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*')
OS_CALL_REGEX = re.compile(r'os\.call.*')
LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3]))
def compare_dates(d1, d2):
try:
date1 = parse_isodate(d1, allow_date=True)
date2 = parse_isodate(d2, allow_date=True)
return date1 == date2
except ValueError:
# At least one of d1 and d2 cannot be parsed. Simply compare values.
return d1 == d2
class ReporterEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Exception):
return str(o)
return json.JSONEncoder.default(self, o)
class Reporter:
def __init__(self):
self.files = OrderedDict()
def _ensure_default_entry(self, path):
try:
self.files[path]
except KeyError:
self.files[path] = {
'errors': [],
'warnings': [],
'traces': [],
'warning_traces': []
}
def _log(self, path, code, msg, level='error', line=0, column=0):
self._ensure_default_entry(path)
lvl_dct = self.files[path]['%ss' % level]
lvl_dct.append({
'code': code,
'msg': msg,
'line': line,
'column': column
})
def error(self, *args, **kwargs):
self._log(*args, level='error', **kwargs)
def warning(self, *args, **kwargs):
self._log(*args, level='warning', **kwargs)
def trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['traces'].append(tracebk)
def warning_trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['warning_traces'].append(tracebk)
@staticmethod
@contextmanager
def _output_handle(output):
if output != '-':
handle = open(output, 'w+')
else:
handle = sys.stdout
yield handle
handle.flush()
handle.close()
@staticmethod
def _filter_out_ok(reports):
temp_reports = OrderedDict()
for path, report in reports.items():
if report['errors'] or report['warnings']:
temp_reports[path] = report
return temp_reports
def plain(self, warnings=False, output='-'):
"""Print out the test results in plain format
output is ignored here for now
"""
ret = []
for path, report in Reporter._filter_out_ok(self.files).items():
traces = report['traces'][:]
if warnings and report['warnings']:
traces.extend(report['warning_traces'])
for trace in traces:
print('TRACE:')
print('\n '.join((' %s' % trace).splitlines()))
for error in report['errors']:
error['path'] = path
print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error)
ret.append(1)
if warnings:
for warning in report['warnings']:
warning['path'] = path
print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning)
return 3 if ret else 0
def json(self, warnings=False, output='-'):
"""Print out the test results in json format
warnings is not respected in this output
"""
ret = [len(r['errors']) for r in self.files.values()]
with Reporter._output_handle(output) as handle:
print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
return 3 if sum(ret) else 0
class Validator(with_metaclass(abc.ABCMeta, object)):
"""Validator instances are intended to be run on a single object. if you
are scanning multiple objects for problems, you'll want to have a separate
Validator for each one."""
def __init__(self, reporter=None):
self.reporter = reporter
@abc.abstractproperty
def object_name(self):
"""Name of the object we validated"""
pass
@abc.abstractproperty
def object_path(self):
"""Path of the object we validated"""
pass
@abc.abstractmethod
def validate(self):
"""Run this method to generate the test results"""
pass
class ModuleValidator(Validator):
REJECTLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
REJECTLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
'shippable.yml',
'.gitattributes', '.gitmodules', 'COPYING',
'__init__.py', 'VERSION', 'test-docs.sh'))
REJECTLIST = REJECTLIST_FILES.union(REJECTLIST['MODULE'])
PS_DOC_REJECTLIST = frozenset((
'async_status.ps1',
'slurp.ps1',
'setup.ps1'
))
# win_dsc is a dynamic arg spec, the docs won't ever match
PS_ARG_VALIDATE_REJECTLIST = frozenset(('win_dsc.ps1', ))
ACCEPTLIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function'))
def __init__(self, path, analyze_arg_spec=False, collection=None, collection_version=None,
base_branch=None, git_cache=None, reporter=None, routing=None):
super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(self.path)
self.name = os.path.splitext(self.basename)[0]
self.analyze_arg_spec = analyze_arg_spec
self._Version = LooseVersion
self._StrictVersion = StrictVersion
self.collection = collection
self.collection_name = 'ansible.builtin'
if self.collection:
self._Version = SemanticVersion
self._StrictVersion = SemanticVersion
collection_namespace_path, collection_name = os.path.split(self.collection)
self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
self.routing = routing
self.collection_version = None
if collection_version is not None:
self.collection_version_str = collection_version
self.collection_version = SemanticVersion(collection_version)
self.base_branch = base_branch
self.git_cache = git_cache or GitCache()
self._python_module_override = False
with open(path) as f:
self.text = f.read()
self.length = len(self.text.splitlines())
try:
self.ast = ast.parse(self.text)
except Exception:
self.ast = None
if base_branch:
self.base_module = self._get_base_file()
else:
self.base_module = None
def _create_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return LooseVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._Version(v)
def _create_strict_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return StrictVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._StrictVersion(v)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.base_module:
return
try:
os.remove(self.base_module)
except Exception:
pass
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def _get_collection_meta(self):
"""Implement if we need this for version_added comparisons
"""
pass
def _python_module(self):
if self.path.endswith('.py') or self._python_module_override:
return True
return False
def _powershell_module(self):
if self.path.endswith('.ps1'):
return True
return False
def _just_docs(self):
"""Module can contain just docs and from __future__ boilerplate
"""
try:
for child in self.ast.body:
if not isinstance(child, ast.Assign):
# allowed from __future__ imports
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
for future_import in child.names:
if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS:
break
else:
continue
return False
return True
except AttributeError:
return False
def _get_base_branch_module_path(self):
"""List all paths within lib/ansible/modules to try and match a moved module"""
return self.git_cache.base_module_paths.get(self.object_name)
def _has_alias(self):
"""Return true if the module has any aliases."""
return self.object_name in self.git_cache.head_aliased_modules
def _get_base_file(self):
# In case of module moves, look for the original location
base_path = self._get_base_branch_module_path()
command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)]
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if int(p.returncode) != 0:
return None
t = tempfile.NamedTemporaryFile(delete=False)
t.write(stdout)
t.close()
return t.name
def _is_new_module(self):
if self._has_alias():
return False
return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module)
def _check_interpreter(self, powershell=False):
if powershell:
if not self.text.startswith('#!powershell\n'):
self.reporter.error(
path=self.object_path,
code='missing-powershell-interpreter',
msg='Interpreter line is not "#!powershell"'
)
return
if not self.text.startswith('#!/usr/bin/python'):
self.reporter.error(
path=self.object_path,
code='missing-python-interpreter',
msg='Interpreter line is not "#!/usr/bin/python"',
)
def _check_type_instead_of_isinstance(self, powershell=False):
if powershell:
return
for line_no, line in enumerate(self.text.splitlines()):
typekeyword = TYPE_REGEX.match(line)
if typekeyword:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='unidiomatic-typecheck',
msg=('Type comparison using type() found. '
'Use isinstance() instead'),
line=line_no + 1
)
def _check_for_sys_exit(self):
# Optimize out the happy path
if 'sys.exit' not in self.text:
return
for line_no, line in enumerate(self.text.splitlines()):
sys_exit_usage = SYS_EXIT_REGEX.match(line)
if sys_exit_usage:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='use-fail-json-not-sys-exit',
msg='sys.exit() call found. Should be exit_json/fail_json',
line=line_no + 1
)
def _check_gpl3_header(self):
header = '\n'.join(self.text.split('\n')[:20])
if ('GNU General Public License' not in header or
('version 3' not in header and 'v3.0' not in header)):
self.reporter.error(
path=self.object_path,
code='missing-gplv3-license',
msg='GPLv3 license header not found in the first 20 lines of the module'
)
elif self._is_new_module():
if len([line for line in header
if 'GNU General Public License' in line]) > 1:
self.reporter.error(
path=self.object_path,
code='use-short-gplv3-license',
msg='Found old style GPLv3 license header: '
'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright'
)
def _check_for_subprocess(self):
for child in self.ast.body:
if isinstance(child, ast.Import):
if child.names[0].name == 'subprocess':
for line_no, line in enumerate(self.text.splitlines()):
sp_match = SUBPROCESS_REGEX.search(line)
if sp_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-popen',
msg=('subprocess.Popen call found. Should be module.run_command'),
line=(line_no + 1),
column=(sp_match.span()[0] + 1)
)
def _check_for_os_call(self):
if 'os.call' in self.text:
for line_no, line in enumerate(self.text.splitlines()):
os_call_match = OS_CALL_REGEX.search(line)
if os_call_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-os-call',
msg=('os.call() call found. Should be module.run_command'),
line=(line_no + 1),
column=(os_call_match.span()[0] + 1)
)
def _find_blacklist_imports(self):
for child in self.ast.body:
names = []
if isinstance(child, ast.Import):
names.extend(child.names)
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
names.extend(grandchild.names)
for name in names:
# TODO: Add line/col
for blacklist_import, options in REJECTLIST_IMPORTS.items():
if re.search(blacklist_import, name.name):
new_only = options['new_only']
if self._is_new_module() and new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
elif not new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
def _find_module_utils(self, main):
linenos = []
found_basic = False
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
names = []
try:
names.append(child.module)
if child.module.endswith('.basic'):
found_basic = True
except AttributeError:
pass
names.extend([n.name for n in child.names])
if [n for n in names if n.startswith('ansible.module_utils')]:
linenos.append(child.lineno)
for name in child.names:
if ('module_utils' in getattr(child, 'module', '') and
isinstance(name, ast.alias) and
name.name == '*'):
msg = (
'module-utils-specific-import',
('module_utils imports should import specific '
'components, not "*"')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
if (isinstance(name, ast.alias) and
name.name == 'basic'):
found_basic = True
if not found_basic:
self.reporter.warning(
path=self.object_path,
code='missing-module-utils-basic-import',
msg='Did not find "ansible.module_utils.basic" import'
)
return linenos
def _get_first_callable(self):
linenos = []
for child in self.ast.body:
if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
linenos.append(child.lineno)
return min(linenos)
def _find_main_call(self, look_for="main"):
""" Ensure that the module ends with:
if __name__ == '__main__':
main()
OR, in the case of modules that are in the docs-only deprecation phase
if __name__ == '__main__':
removed_module()
"""
lineno = False
if_bodies = []
for child in self.ast.body:
if isinstance(child, ast.If):
try:
if child.test.left.id == '__name__':
if_bodies.extend(child.body)
except AttributeError:
pass
bodies = self.ast.body
bodies.extend(if_bodies)
for child in bodies:
# validate that the next to last line is 'if __name__ == "__main__"'
if child.lineno == (self.length - 1):
mainchecked = False
try:
if isinstance(child, ast.If) and \
child.test.left.id == '__name__' and \
len(child.test.ops) == 1 and \
isinstance(child.test.ops[0], ast.Eq) and \
child.test.comparators[0].s == '__main__':
mainchecked = True
except Exception:
pass
if not mainchecked:
self.reporter.error(
path=self.object_path,
code='missing-if-name-main',
msg='Next to last line should be: if __name__ == "__main__":',
line=child.lineno
)
# validate that the final line is a call to main()
if isinstance(child, ast.Expr):
if isinstance(child.value, ast.Call):
if (isinstance(child.value.func, ast.Name) and
child.value.func.id == look_for):
lineno = child.lineno
if lineno < self.length - 1:
self.reporter.error(
path=self.object_path,
code='last-line-main-call',
msg=('Call to %s() not the last line' % look_for),
line=lineno
)
if not lineno:
self.reporter.error(
path=self.object_path,
code='missing-main-call',
msg=('Did not find a call to %s()' % look_for)
)
return lineno or 0
def _find_has_import(self):
for child in self.ast.body:
found_try_except_import = False
found_has = False
if isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
found_try_except_import = True
if isinstance(grandchild, ast.Assign):
for target in grandchild.targets:
if target.id.lower().startswith('has_'):
found_has = True
if found_try_except_import and not found_has:
# TODO: Add line/col
self.reporter.warning(
path=self.object_path,
code='try-except-missing-has',
msg='Found Try/Except block without HAS_ assignment'
)
def _ensure_imports_below_docs(self, doc_info, first_callable):
try:
min_doc_line = min(
[doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']]
)
except ValueError:
# We can't perform this validation, as there are no DOCs provided at all
return
max_doc_line = max(
[doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']]
)
import_lines = []
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
# allowed from __future__ imports
for future_import in child.names:
if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS:
self.reporter.error(
path=self.object_path,
code='illegal-future-imports',
msg=('Only the following from __future__ imports are allowed: %s'
% ', '.join(self.ACCEPTLIST_FUTURE_IMPORTS)),
line=child.lineno
)
break
else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import
continue
import_lines.append(child.lineno)
if child.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation variables. '
'All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
import_lines.append(grandchild.lineno)
if grandchild.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation '
'variables. All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
for import_line in import_lines:
if not (max_doc_line < import_line < first_callable):
msg = (
'import-placement',
('Imports should be directly below DOCUMENTATION/EXAMPLES/'
'RETURN.')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
def _validate_ps_replacers(self):
# loop all (for/else + error)
# get module list for each
# check "shape" of each module name
module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)'
found_requires = False
for req_stmt in re.finditer(module_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-utils-per-requires',
msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.psm1'):
self.reporter.error(
path=self.object_path,
code='invalid-requires-extension',
msg='Module #Requires should not end in .psm1: "%s"' % module_name
)
for req_stmt in re.finditer(csharp_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-csharp-utils-per-requires',
msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.cs'):
self.reporter.error(
path=self.object_path,
code='illegal-extension-cs',
msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
)
# also accept the legacy #POWERSHELL_COMMON replacer signal
if not found_requires and REPLACER_WINDOWS not in self.text:
self.reporter.error(
path=self.object_path,
code='missing-module-utils-import-csharp-requirements',
msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
)
def _find_ps_docs_py_file(self):
if self.object_name in self.PS_DOC_REJECTLIST:
return
py_path = self.path.replace('.ps1', '.py')
if not os.path.isfile(py_path):
self.reporter.error(
path=self.object_path,
code='missing-python-doc',
msg='Missing python documentation file'
)
return py_path
def _get_docs(self):
docs = {
'DOCUMENTATION': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'EXAMPLES': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'RETURN': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
}
for child in self.ast.body:
if isinstance(child, ast.Assign):
for grandchild in child.targets:
if not isinstance(grandchild, ast.Name):
continue
if grandchild.id == 'DOCUMENTATION':
docs['DOCUMENTATION']['value'] = child.value.s
docs['DOCUMENTATION']['lineno'] = child.lineno
docs['DOCUMENTATION']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'EXAMPLES':
docs['EXAMPLES']['value'] = child.value.s
docs['EXAMPLES']['lineno'] = child.lineno
docs['EXAMPLES']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'RETURN':
docs['RETURN']['value'] = child.value.s
docs['RETURN']['lineno'] = child.lineno
docs['RETURN']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
return docs
def _validate_docs_schema(self, doc, schema, name, error_code):
# TODO: Add line/col
errors = []
try:
schema(doc)
except Exception as e:
for error in e.errors:
error.data = doc
errors.extend(e.errors)
for error in errors:
path = [str(p) for p in error.path]
local_error_code = getattr(error, 'ansible_error_code', error_code)
if isinstance(error.data, dict):
error_message = humanize_error(error.data, error)
else:
error_message = error
if path:
combined_path = '%s.%s' % (name, '.'.join(path))
else:
combined_path = name
self.reporter.error(
path=self.object_path,
code=local_error_code,
msg='%s: %s' % (combined_path, error_message)
)
def _validate_docs(self):
doc_info = self._get_docs()
doc = None
documentation_exists = False
examples_exist = False
returns_exist = False
# We have three ways of marking deprecated/removed files. Have to check each one
# individually and then make sure they all agree
filename_deprecated_or_removed = False
deprecated = False
removed = False
doc_deprecated = None # doc legally might not exist
routing_says_deprecated = False
if self.object_name.startswith('_') and not os.path.islink(self.object_path):
filename_deprecated_or_removed = True
# We are testing a collection
if self.routing:
routing_deprecation = self.routing.get('plugin_routing', {}).get('modules', {}).get(self.name, {}).get('deprecation', {})
if routing_deprecation:
# meta/runtime.yml says this is deprecated
routing_says_deprecated = True
deprecated = True
if not removed:
if not bool(doc_info['DOCUMENTATION']['value']):
self.reporter.error(
path=self.object_path,
code='missing-documentation',
msg='No DOCUMENTATION provided'
)
else:
documentation_exists = True
doc, errors, traces = parse_yaml(
doc_info['DOCUMENTATION']['value'],
doc_info['DOCUMENTATION']['lineno'],
self.name, 'DOCUMENTATION'
)
if doc:
add_collection_to_versions_and_dates(doc, self.collection_name, is_module=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='documentation-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not errors and not traces:
missing_fragment = False
with CaptureStd():
try:
get_docstring(self.path, fragment_loader, verbose=True,
collection_name=self.collection_name, is_module=True)
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.error(
path=self.object_path,
code='missing-doc-fragment',
msg='DOCUMENTATION fragment missing: %s' % fragment
)
missing_fragment = True
except Exception as e:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
self.reporter.error(
path=self.object_path,
code='documentation-error',
msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
)
if not missing_fragment:
add_fragments(doc, self.object_path, fragment_loader=fragment_loader, is_module=True)
if 'options' in doc and doc['options'] is None:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-options',
msg='DOCUMENTATION.options must be a dictionary/hash when used',
)
if 'deprecated' in doc and doc.get('deprecated'):
doc_deprecated = True
doc_deprecation = doc['deprecated']
documentation_collection = doc_deprecation.get('removed_from_collection')
if documentation_collection != self.collection_name:
self.reporter.error(
path=self.object_path,
code='deprecation-wrong-collection',
msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
documentation_collection, self.collection_name)
)
else:
doc_deprecated = False
if os.path.islink(self.object_path):
# This module has an alias, which we can tell as it's a symlink
# Rather than checking for `module: $filename` we need to check against the true filename
self._validate_docs_schema(
doc,
doc_schema(
os.readlink(self.object_path).split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
),
'DOCUMENTATION',
'invalid-documentation',
)
else:
# This is the normal case
self._validate_docs_schema(
doc,
doc_schema(
self.object_name.split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
),
'DOCUMENTATION',
'invalid-documentation',
)
if not self.collection:
existing_doc = self._check_for_new_args(doc)
self._check_version_added(doc, existing_doc)
if not bool(doc_info['EXAMPLES']['value']):
self.reporter.error(
path=self.object_path,
code='missing-examples',
msg='No EXAMPLES provided'
)
else:
_doc, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'],
doc_info['EXAMPLES']['lineno'],
self.name, 'EXAMPLES', load_all=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='invalid-examples',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not bool(doc_info['RETURN']['value']):
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code='missing-return',
msg='No RETURN provided'
)
else:
self.reporter.warning(
path=self.object_path,
code='missing-return-legacy',
msg='No RETURN provided'
)
else:
data, errors, traces = parse_yaml(doc_info['RETURN']['value'],
doc_info['RETURN']['lineno'],
self.name, 'RETURN')
if data:
add_collection_to_versions_and_dates(data, self.collection_name, is_module=True, return_docs=True)
self._validate_docs_schema(data, return_schema(for_collection=bool(self.collection)),
'RETURN', 'return-syntax-error')
for error in errors:
self.reporter.error(
path=self.object_path,
code='return-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
# Check for mismatched deprecation
if not self.collection:
mismatched_deprecation = True
if not (filename_deprecated_or_removed or removed or deprecated or doc_deprecated):
mismatched_deprecation = False
else:
if (filename_deprecated_or_removed and deprecated and doc_deprecated):
mismatched_deprecation = False
if (filename_deprecated_or_removed and removed and not (documentation_exists or examples_exist or returns_exist)):
mismatched_deprecation = False
if mismatched_deprecation:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='Module deprecation/removed must agree in documentaiton, by prepending filename with'
' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
' documentation for removed'
)
else:
# We are testing a collection
if self.object_name.startswith('_'):
self.reporter.error(
path=self.object_path,
code='collections-no-underscore-on-deprecation',
msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead',
)
if not (doc_deprecated == routing_says_deprecated):
# DOCUMENTATION.deprecated and meta/runtime.yml disagree
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
)
elif routing_says_deprecated:
# Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
# Make sure they give the same version or date.
routing_date = routing_deprecation.get('removal_date')
routing_version = routing_deprecation.get('removal_version')
# The versions and dates in the module documentation are auto-tagged, so remove the tag
# to make comparison possible and to avoid confusing the user.
documentation_date = doc_deprecation.get('removed_at_date')
documentation_version = doc_deprecation.get('removed_in')
if not compare_dates(routing_date, documentation_date):
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
routing_date, documentation_date)
)
if routing_version != documentation_version:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
routing_version, documentation_version)
)
# In the future we should error if ANSIBLE_METADATA exists in a collection
return doc_info, doc
def _check_version_added(self, doc, existing_doc):
version_added_raw = doc.get('version_added')
try:
collection_name = doc.get('version_added_collection')
version_added = self._create_strict_version(
str(version_added_raw or '0.0'),
collection_name=collection_name)
except ValueError as e:
version_added = version_added_raw or '0.0'
if self._is_new_module() or version_added != 'historical':
# already reported during schema validation, except:
if version_added == 'historical':
self.reporter.error(
path=self.object_path,
code='module-invalid-version-added',
msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
)
return
if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
)
if not self._is_new_module():
return
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
if (version_added < strict_ansible_version or
strict_ansible_version < version_added):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
)
def _validate_ansible_module_call(self, docs):
try:
spec, args, kwargs = get_argument_spec(self.path, self.collection)
except AnsibleModuleNotInitialized:
self.reporter.error(
path=self.object_path,
code='ansible-module-not-initialized',
msg="Execution of the module did not result in initialization of AnsibleModule",
)
return
except AnsibleModuleImportError as e:
self.reporter.error(
path=self.object_path,
code='import-error',
msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
)
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
self._validate_docs_schema(kwargs, ansible_module_kwargs_schema(for_collection=bool(self.collection)),
'AnsibleModule', 'invalid-ansiblemodule-schema')
self._validate_argument_spec(docs, spec, kwargs)
def _validate_list_of_module_args(self, name, terms, spec, context):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)):
# This is already reported by schema checking
continue
bad_term = False
for term in check:
if not isinstance(term, string_types):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must contain strings in the lists or tuples; found value %r" % (term, )
self.reporter.error(
path=self.object_path,
code=name + '-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(check)) != len(check):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code=name + '-collision',
msg=msg,
)
if not set(check) <= set(spec):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code=name + '-unknown',
msg=msg,
)
def _validate_required_if(self, terms, spec, context, module):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
# This is already reported by schema checking
continue
if len(check) == 4 and not isinstance(check[3], bool):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
self.reporter.error(
path=self.object_path,
code='required_if-is_one_of-type',
msg=msg,
)
requirements = check[2]
if not isinstance(requirements, (list, tuple)):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
continue
bad_term = False
for term in requirements:
if not isinstance(term, string_types):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have only strings in third value (requirements); got %r" % (term, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(requirements)) != len(requirements):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms in requirements"
self.reporter.error(
path=self.object_path,
code='required_if-requirements-collision',
msg=msg,
)
if not set(requirements) <= set(spec):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_if-requirements-unknown',
msg=msg,
)
key = check[0]
if key not in spec:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have its key %s in argument_spec" % key
self.reporter.error(
path=self.object_path,
code='required_if-unknown-key',
msg=msg,
)
continue
if key in requirements:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains its key %s in requirements" % key
self.reporter.error(
path=self.object_path,
code='required_if-key-in-requirements',
msg=msg,
)
value = check[1]
if value is not None:
_type = spec[key].get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
try:
with CaptureStd():
dummy = _type_checker(value)
except (Exception, SystemExit):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
self.reporter.error(
path=self.object_path,
code='required_if-value-type',
msg=msg,
)
def _validate_required_by(self, terms, spec, context):
if terms is None:
return
if not isinstance(terms, Mapping):
# This is already reported by schema checking
return
for key, value in terms.items():
if isinstance(value, string_types):
value = [value]
if not isinstance(value, (list, tuple)):
# This is already reported by schema checking
continue
for term in value:
if not isinstance(term, string_types):
# This is already reported by schema checking
continue
if len(set(value)) != len(value) or key in value:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code='required_by-collision',
msg=msg,
)
if not set(value) <= set(spec) or key not in spec:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_by-unknown',
msg=msg,
)
def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
if not self.analyze_arg_spec:
return
if docs is None:
docs = {}
if context is None:
context = []
if last_context_spec is None:
last_context_spec = kwargs
try:
if not context:
add_fragments(docs, self.object_path, fragment_loader=fragment_loader, is_module=True)
except Exception:
# Cannot merge fragments
return
# Use this to access type checkers later
module = NoArgsAnsibleModule({})
self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
self._validate_required_by(last_context_spec.get('required_by'), spec, context)
provider_args = set()
args_from_argspec = set()
deprecated_args_from_argspec = set()
doc_options = docs.get('options', {})
if doc_options is None:
doc_options = {}
for arg, data in spec.items():
restricted_argument_names = ('message', 'syslog_facility')
if arg.lower() in restricted_argument_names:
msg = "Argument '%s' in argument_spec " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
if 'aliases' in data:
for al in data['aliases']:
if al.lower() in restricted_argument_names:
msg = "Argument alias '%s' in argument_spec " % al
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
if not isinstance(data, dict):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must be a dictionary/hash when used"
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec',
msg=msg,
)
continue
removed_at_date = data.get('removed_at_date', None)
if removed_at_date is not None:
try:
if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a removed_at_date '%s' before today" % removed_at_date
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when removed_at_date is not in ISO format. Since schema
# validation already reported this as an error, don't report it a second time.
pass
deprecated_aliases = data.get('deprecated_aliases', None)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'date' in deprecated_alias:
try:
date = deprecated_alias['date']
if parse_isodate(date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
deprecated_alias['name'], deprecated_alias['date'])
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when deprecated_alias['date'] is not in ISO format. Since
# schema validation already reported this as an error, don't report it a second
# time.
pass
has_version = False
if self.collection and self.collection_version is not None:
compare_version = self.collection_version
version_of_what = "this collection (%s)" % self.collection_version_str
code_prefix = 'collection'
has_version = True
elif not self.collection:
compare_version = LOOSE_ANSIBLE_VERSION
version_of_what = "Ansible (%s)" % ansible_version
code_prefix = 'ansible'
has_version = True
removed_in_version = data.get('removed_in_version', None)
if removed_in_version is not None:
try:
collection_name = data.get('removed_from_collection')
removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= removed_in:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a deprecated removed_in_version %r," % removed_in_version
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'version' in deprecated_alias:
try:
collection_name = deprecated_alias.get('collection_name')
version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= version:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal in version %r," % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
deprecated_alias['name'], deprecated_alias['version'], e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
aliases = data.get('aliases', [])
if arg in aliases:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is specified as its own alias"
self.reporter.error(
path=self.object_path,
code='parameter-alias-self',
msg=msg
)
if len(aliases) > len(set(aliases)):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has at least one alias specified multiple times in aliases"
self.reporter.error(
path=self.object_path,
code='parameter-alias-repeated',
msg=msg
)
if not context and arg == 'state':
bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
for bad_state in bad_states:
self.reporter.error(
path=self.object_path,
code='parameter-state-invalid-choice',
msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
args_from_argspec.add(arg)
args_from_argspec.update(aliases)
else:
deprecated_args_from_argspec.add(arg)
deprecated_args_from_argspec.update(aliases)
if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec-options',
msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
)
elif data.get('options'):
# Record provider options from network modules, for later comparison
for provider_arg, provider_data in data.get('options', {}).items():
provider_args.add(provider_arg)
provider_args.update(provider_data.get('aliases', []))
if data.get('required') and data.get('default', object) != object:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is marked as required but specifies a default. Arguments with a" \
" default should not be marked as required"
self.reporter.error(
path=self.object_path,
code='no-default-for-required-parameter',
msg=msg
)
if arg in provider_args:
# Provider args are being removed from network module top level
# don't validate docs<->arg_spec checks below
continue
_type = data.get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
_elements = data.get('elements')
if (_type == 'list') and not _elements:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as list but elements is not defined"
self.reporter.error(
path=self.object_path,
code='parameter-list-no-elements',
msg=msg
)
if _elements:
if not callable(_elements):
module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_elements)
if _type != 'list':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
self.reporter.error(
path=self.object_path,
code='parameter-invalid-elements',
msg=msg
)
arg_default = None
if 'default' in data and not is_empty(data['default']):
try:
with CaptureStd():
arg_default = _type_checker(data['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
self.reporter.error(
path=self.object_path,
code='incompatible-default-type',
msg=msg
)
continue
elif data.get('default') is None and _type == 'bool' and 'options' not in data:
arg_default = False
doc_options_args = []
for alias in sorted(set([arg] + list(aliases))):
if alias in doc_options:
doc_options_args.append(alias)
if len(doc_options_args) == 0:
# Undocumented arguments will be handled later (search for undocumented-parameter)
doc_options_arg = {}
else:
doc_options_arg = doc_options[doc_options_args[0]]
if len(doc_options_args) > 1:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " with aliases %s is documented multiple times, namely as %s" % (
", ".join([("'%s'" % alias) for alias in aliases]),
", ".join([("'%s'" % alias) for alias in doc_options_args])
)
self.reporter.error(
path=self.object_path,
code='parameter-documented-multiple-times',
msg=msg
)
try:
doc_default = None
if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']):
with CaptureStd():
doc_default = _type_checker(doc_options_arg['default'])
elif doc_options_arg.get('default') is None and _type == 'bool' and 'suboptions' not in doc_options_arg:
doc_default = False
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
self.reporter.error(
path=self.object_path,
code='doc-default-incompatible-type',
msg=msg
)
continue
if arg_default != doc_default:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
self.reporter.error(
path=self.object_path,
code='doc-default-does-not-match-spec',
msg=msg
)
doc_type = doc_options_arg.get('type')
if 'type' in data and data['type'] is not None:
if doc_type is None:
if not arg.startswith('_'): # hidden parameter, for example _raw_params
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation doesn't define type" % (data['type'])
self.reporter.error(
path=self.object_path,
code='parameter-type-not-in-doc',
msg=msg
)
elif data['type'] != doc_type:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
self.reporter.error(
path=self.object_path,
code='doc-type-does-not-match-spec',
msg=msg
)
else:
if doc_type is None:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " uses default type ('str') but documentation doesn't define type"
self.reporter.error(
path=self.object_path,
code='doc-missing-type',
msg=msg
)
elif doc_type != 'str':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " implies type as 'str' but documentation defines as %r" % doc_type
self.reporter.error(
path=self.object_path,
code='implied-parameter-type-mismatch',
msg=msg
)
doc_choices = []
try:
for choice in doc_options_arg.get('choices', []):
try:
with CaptureStd():
doc_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='doc-choices-incompatible-type',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
arg_choices = []
try:
for choice in data.get('choices', []):
try:
with CaptureStd():
arg_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='incompatible-choices',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
if not compare_unordered_lists(arg_choices, doc_choices):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
self.reporter.error(
path=self.object_path,
code='doc-choices-do-not-match-spec',
msg=msg
)
doc_required = doc_options_arg.get('required', False)
data_required = data.get('required', False)
if (doc_required or data_required) and not (doc_required and data_required):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if doc_required:
msg += " is not required, but is documented as being required"
else:
msg += " is required, but is not documented as being required"
self.reporter.error(
path=self.object_path,
code='doc-required-mismatch',
msg=msg
)
doc_elements = doc_options_arg.get('elements', None)
doc_type = doc_options_arg.get('type', 'str')
data_elements = data.get('elements', None)
if (doc_elements and not doc_type == 'list'):
msg = "Argument '%s' " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines parameter elements as %s but it is valid only when value of parameter type is list" % doc_elements
self.reporter.error(
path=self.object_path,
code='doc-elements-invalid',
msg=msg
)
if (doc_elements or data_elements) and not (doc_elements == data_elements):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if data_elements:
msg += " specifies elements as %s," % data_elements
else:
msg += " does not specify elements,"
if doc_elements:
msg += "but elements is documented as being %s" % doc_elements
else:
msg += "but elements is not documented"
self.reporter.error(
path=self.object_path,
code='doc-elements-mismatch',
msg=msg
)
spec_suboptions = data.get('options')
doc_suboptions = doc_options_arg.get('suboptions', {})
if spec_suboptions:
if not doc_suboptions:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has sub-options but documentation does not define it"
self.reporter.error(
path=self.object_path,
code='missing-suboption-docs',
msg=msg
)
self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
context=context + [arg], last_context_spec=data)
for arg in args_from_argspec:
if not str(arg).isidentifier():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is not a valid python identifier"
self.reporter.error(
path=self.object_path,
code='parameter-invalid',
msg=msg
)
if docs:
args_from_docs = set()
for arg, data in doc_options.items():
args_from_docs.add(arg)
args_from_docs.update(data.get('aliases', []))
args_missing_from_docs = args_from_argspec.difference(args_from_docs)
docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
for arg in args_missing_from_docs:
if arg in provider_args:
# Provider args are being removed from network module top level
# So they are likely not documented on purpose
continue
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in the argument_spec, but not documented in the module documentation"
self.reporter.error(
path=self.object_path,
code='undocumented-parameter',
msg=msg
)
for arg in docs_missing_from_args:
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
self.reporter.error(
path=self.object_path,
code='nonexistent-parameter-documented',
msg=msg
)
def _check_for_new_args(self, doc):
if not self.base_branch or self._is_new_module():
return
with CaptureStd():
try:
existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True)
existing_options = existing_doc.get('options', {}) or {}
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.warning(
path=self.object_path,
code='missing-existing-doc-fragment',
msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
)
return
except Exception as e:
self.reporter.warning_trace(
path=self.object_path,
tracebk=e
)
self.reporter.warning(
path=self.object_path,
code='unknown-doc-fragment',
msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
)
return
try:
mod_collection_name = existing_doc.get('version_added_collection')
mod_version_added = self._create_strict_version(
str(existing_doc.get('version_added', '0.0')),
collection_name=mod_collection_name)
except ValueError:
mod_collection_name = self.collection_name
mod_version_added = self._create_strict_version('0.0')
options = doc.get('options', {}) or {}
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
for option, details in options.items():
try:
names = [option] + details.get('aliases', [])
except (TypeError, AttributeError):
# Reporting of this syntax error will be handled by schema validation.
continue
if any(name in existing_options for name in names):
# The option already existed. Make sure version_added didn't change.
for name in names:
existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
existing_version = existing_options.get(name, {}).get('version_added')
if existing_version:
break
current_collection_name = details.get('version_added_collection')
current_version = details.get('version_added')
if current_collection_name != existing_collection_name:
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added-collection',
msg=('version_added for existing option (%s) should '
'belong to collection %r. Currently belongs to %r' %
(option, current_collection_name, existing_collection_name))
)
elif str(current_version) != str(existing_version):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for existing option (%s) should '
'be %r. Currently %r' %
(option, existing_version, current_version))
)
continue
try:
collection_name = details.get('version_added_collection')
version_added = self._create_strict_version(
str(details.get('version_added', '0.0')),
collection_name=collection_name)
except ValueError as e:
# already reported during schema validation
continue
if collection_name != self.collection_name:
continue
if (strict_ansible_version != mod_version_added and
(version_added < strict_ansible_version or
strict_ansible_version < version_added)):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for new option (%s) should '
'be %r. Currently %r' %
(option, should_be, version_added))
)
return existing_doc
@staticmethod
def is_blacklisted(path):
base_name = os.path.basename(path)
file_name = os.path.splitext(base_name)[0]
if file_name.startswith('_') and os.path.islink(path):
return True
if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.REJECTLIST):
return True
for pat in ModuleValidator.REJECTLIST_PATTERNS:
if fnmatch(base_name, pat):
return True
return False
def validate(self):
super(ModuleValidator, self).validate()
if not self._python_module() and not self._powershell_module():
self.reporter.error(
path=self.object_path,
code='invalid-extension',
msg=('Official Ansible modules must have a .py '
'extension for python modules or a .ps1 '
'for powershell modules')
)
self._python_module_override = True
if self._python_module() and self.ast is None:
self.reporter.error(
path=self.object_path,
code='python-syntax-error',
msg='Python SyntaxError while parsing module'
)
try:
compile(self.text, self.path, 'exec')
except Exception:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
end_of_deprecation_should_be_removed_only = False
if self._python_module():
doc_info, docs = self._validate_docs()
# See if current version => deprecated.removed_in, ie, should be docs only
if docs and docs.get('deprecated', False):
if 'removed_in' in docs['deprecated']:
removed_in = None
collection_name = docs['deprecated'].get('removed_from_collection')
version = docs['deprecated']['removed_in']
if collection_name != self.collection_name:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-source',
msg=('The deprecation version for a module must be added in this collection')
)
else:
try:
removed_in = self._create_strict_version(str(version), collection_name=collection_name)
except ValueError as e:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-version',
msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
)
if removed_in:
if not self.collection:
strict_ansible_version = self._create_strict_version(
'.'.join(ansible_version.split('.')[:2]), self.collection_name)
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
elif self.collection_version:
strict_ansible_version = self.collection_version
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
# handle deprecation by date
if 'removed_at_date' in docs['deprecated']:
try:
removed_at_date = docs['deprecated']['removed_at_date']
if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
except ValueError:
# This happens if the date cannot be parsed. This is already checked by the schema.
pass
if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
self._validate_ansible_module_call(docs)
self._check_for_sys_exit()
self._find_blacklist_imports()
main = self._find_main_call()
self._find_module_utils(main)
self._find_has_import()
first_callable = self._get_first_callable()
self._ensure_imports_below_docs(doc_info, first_callable)
self._check_for_subprocess()
self._check_for_os_call()
if self._powershell_module():
if self.basename in self.PS_DOC_REJECTLIST:
return
self._validate_ps_replacers()
docs_path = self._find_ps_docs_py_file()
# We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_REJECTLIST:
with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv:
docs = docs_mv._validate_docs()[1]
self._validate_ansible_module_call(docs)
self._check_gpl3_header()
if not self._just_docs() and not end_of_deprecation_should_be_removed_only:
self._check_interpreter(powershell=self._powershell_module())
self._check_type_instead_of_isinstance(
powershell=self._powershell_module()
)
if end_of_deprecation_should_be_removed_only:
# Ensure that `if __name__ == '__main__':` calls `removed_module()` which ensure that the module has no code in
main = self._find_main_call('removed_module')
# FIXME: Ensure that the version in the call to removed_module is less than +2.
# Otherwise it's time to remove the file (This may need to be done in another test to
# avoid breaking whenever the Ansible version bumps)
class PythonPackageValidator(Validator):
REJECTLIST_FILES = frozenset(('__pycache__',))
def __init__(self, path, reporter=None):
super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(path)
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def validate(self):
super(PythonPackageValidator, self).validate()
if self.basename in self.REJECTLIST_FILES:
return
init_file = os.path.join(self.path, '__init__.py')
if not os.path.exists(init_file):
self.reporter.error(
path=self.object_path,
code='subdirectory-missing-init',
msg='Ansible module subdirectories must contain an __init__.py'
)
def setup_collection_loader():
collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep)
_AnsibleCollectionFinder(collections_paths)
def re_compile(value):
"""
Argparse expects things to raise TypeError, re.compile raises an re.error
exception
This function is a shorthand to convert the re.error exception to a
TypeError
"""
try:
return re.compile(value)
except re.error as e:
raise TypeError(e)
def run():
parser = argparse.ArgumentParser(prog="validate-modules")
parser.add_argument('modules', nargs='+',
help='Path to module or module directory')
parser.add_argument('-w', '--warnings', help='Show warnings',
action='store_true')
parser.add_argument('--exclude', help='RegEx exclusion pattern',
type=re_compile)
parser.add_argument('--arg-spec', help='Analyze module argument spec',
action='store_true', default=False)
parser.add_argument('--base-branch', default=None,
help='Used in determining if new options were added')
parser.add_argument('--format', choices=['json', 'plain'], default='plain',
help='Output format. Default: "%(default)s"')
parser.add_argument('--output', default='-',
help='Output location, use "-" for stdout. '
'Default "%(default)s"')
parser.add_argument('--collection',
help='Specifies the path to the collection, when '
'validating files within a collection. Ensure '
'that ANSIBLE_COLLECTIONS_PATH is set so the '
'contents of the collection can be located')
parser.add_argument('--collection-version',
help='The collection\'s version number used to check '
'deprecations')
args = parser.parse_args()
args.modules = [m.rstrip('/') for m in args.modules]
reporter = Reporter()
git_cache = GitCache(args.base_branch)
check_dirs = set()
routing = None
if args.collection:
setup_collection_loader()
routing_file = 'meta/runtime.yml'
# Load meta/runtime.yml if it exists, as it may contain deprecation information
if os.path.isfile(routing_file):
try:
with open(routing_file) as f:
routing = yaml.safe_load(f)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex))))
for module in args.modules:
if os.path.isfile(module):
path = module
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_blacklisted(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
git_cache=git_cache, reporter=reporter, routing=routing) as mv1:
mv1.validate()
check_dirs.add(os.path.dirname(path))
for root, dirs, files in os.walk(module):
basedir = root[len(module) + 1:].split('/', 1)[0]
if basedir in REJECTLIST_DIRS:
continue
for dirname in dirs:
if root == module and dirname in REJECTLIST_DIRS:
continue
path = os.path.join(root, dirname)
if args.exclude and args.exclude.search(path):
continue
check_dirs.add(path)
for filename in files:
path = os.path.join(root, filename)
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_blacklisted(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
git_cache=git_cache, reporter=reporter, routing=routing) as mv2:
mv2.validate()
if not args.collection:
for path in sorted(check_dirs):
pv = PythonPackageValidator(path, reporter=reporter)
pv.validate()
if args.format == 'plain':
sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
else:
sys.exit(reporter.json(warnings=args.warnings, output=args.output))
class GitCache:
def __init__(self, base_branch):
self.base_branch = base_branch
if self.base_branch:
self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/'])
else:
self.base_tree = []
try:
self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/'])
except GitError as ex:
if ex.status == 128:
# fallback when there is no .git directory
self.head_tree = self._get_module_files()
else:
raise
except OSError as ex:
if ex.errno == errno.ENOENT:
# fallback when git is not installed
self.head_tree = self._get_module_files()
else:
raise
self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1'))
self.base_module_paths.pop('__init__.py', None)
self.head_aliased_modules = set()
for path in self.head_tree:
filename = os.path.basename(path)
if filename.startswith('_') and filename != '__init__.py':
if os.path.islink(path):
self.head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
@staticmethod
def _get_module_files():
module_files = []
for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'):
for file_name in file_names:
module_files.append(os.path.join(dir_path, file_name))
return module_files
@staticmethod
def _git(args):
cmd = ['git'] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise GitError(stderr, p.returncode)
return stdout.decode('utf-8').splitlines()
class GitError(Exception):
def __init__(self, message, status):
super(GitError, self).__init__(message)
self.status = status
def main():
try:
run()
except KeyboardInterrupt:
pass
| gpl-3.0 |
googleapis/python-texttospeech | samples/snippets/audio_profile_test.py | 1 | 1085 | # Copyright 2018, Google, LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import audio_profile
TEXT = "hello"
OUTPUT = "output.mp3"
EFFECTS_PROFILE_ID = "telephony-class-application"
def test_audio_profile(capsys):
if os.path.exists(OUTPUT):
os.remove(OUTPUT)
assert not os.path.exists(OUTPUT)
audio_profile.synthesize_text_with_audio_profile(TEXT, OUTPUT, EFFECTS_PROFILE_ID)
out, err = capsys.readouterr()
assert ('Audio content written to file "%s"' % OUTPUT) in out
assert os.path.exists(OUTPUT)
os.remove(OUTPUT)
| apache-2.0 |
tcheehow/MissionPlanner | Lib/httplib.py | 50 | 49485 | """HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from array import array
import os
import socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False):
if buffering:
# The caller won't be using any sock.recv() calls, so buffering
# is fine and recommended for performance.
self.fp = sock.makefile('rb')
else:
# The buffer size is specified as zero, because the headers of
# the response are read with readline(). If the reads were
# buffered the readline() calls could consume some of the
# response, which make be read via a recv() on the underlying
# socket.
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline()
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.getheader('connection')
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.getheader('keep-alive'):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.getheader('proxy-connection')
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
s = self._safe_read(self.length)
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(''.join(value))
if chunk_left == 0:
break
if amt is None:
value.append(self._safe_read(chunk_left))
elif amt < chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
return ''.join(value)
elif amt == chunk_left:
value.append(self._safe_read(amt))
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return ''.join(value)
else:
value.append(self._safe_read(chunk_left))
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return ''.join(value)
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
# NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never
# return less than x bytes unless EOF is encountered. It now handles
# signal interruptions (socket.error EINTR) internally. This code
# never caught that exception anyways. It seems largely pointless.
# self.fp.read(amt) will work fine.
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return ''.join(s)
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
self._set_hostport(host, port)
if strict is not None:
self.strict = strict
def set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
self._set_hostport(self._tunnel_host, self._tunnel_port)
self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port))
for header, value in self._tunnel_headers.iteritems():
self.send("%s: %s\r\n" % (header, value))
self.send("\r\n")
response = self.response_class(self.sock, strict = self.strict,
method = self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if line == '\r\n': break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.create_connection((self.host,self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, data):
"""Send `data' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print "send:", repr(data)
blocksize = 8192
if hasattr(data,'read') and not isinstance(data, array):
if self.debuglevel > 0: print "sendIng a read()able"
datablock = data.read(blocksize)
while datablock:
self.sock.sendall(datablock)
datablock = data.read(blocksize)
else:
self.sock.sendall(data)
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, str):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
#message_body was not a string (i.e. it is a file) and
#we must run the risk of Nagle
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
hdr = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(hdr)
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
# Wrap the IPv6 Host Header with [] (RFC 2732)
if host_enc.find(':') >= 0:
host_enc = "[" + host_enc + "]"
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
hdr = '%s: %s' % (header, '\r\n\t'.join([str(v) for v in values]))
self._output(hdr)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass message body
associated with the request. The message body will be sent in
the same packet as the message headers if possible. The
message_body should be a string.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError, te:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body and ('content-length' not in header_names):
self._set_content_length(body)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders(body)
def getresponse(self, buffering=False):
"Get the response from the server."
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
args = (self.sock,)
kwds = {"strict":self.strict, "method":self._method}
if self.debuglevel > 0:
args += (self.debuglevel,)
if buffering:
#only add this keyword if non-default, for compatibility with
#other response_classes.
kwds["buffering"] = True;
response = self.response_class(*args, **kwds)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.putheader = conn.putheader
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
self._conn._set_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
if not buffering:
response = self._conn.getresponse()
else:
#only add this keyword if non-default for compatibility
#with other connection classes
response = self._conn.getresponse(buffering)
except BadStatusLine, e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
HTTPConnection.__init__(self, host, port, strict, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
__all__.append("HTTPSConnection")
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
def test():
"""Test this module.
A hodge podge of tests collected here, because they have too many
external dependencies for the regular test suite.
"""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'd')
dl = 0
for o, a in opts:
if o == '-d': dl = dl + 1
host = 'www.python.org'
selector = '/'
if args[0:]: host = args[0]
if args[1:]: selector = args[1]
h = HTTP()
h.set_debuglevel(dl)
h.connect(host)
h.putrequest('GET', selector)
h.endheaders()
status, reason, headers = h.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(h.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
# minimal test that code to extract host from url works
class HTTP11(HTTP):
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
h = HTTP11('www.python.org')
h.putrequest('GET', 'http://www.python.org/~jeremy/')
h.endheaders()
h.getreply()
h.close()
try:
import ssl
except ImportError:
pass
else:
for host, selector in (('sourceforge.net', '/projects/python'),
):
print "https://%s%s" % (host, selector)
hs = HTTPS()
hs.set_debuglevel(dl)
hs.connect(host)
hs.putrequest('GET', selector)
hs.endheaders()
status, reason, headers = hs.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(hs.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
if __name__ == '__main__':
test()
| gpl-3.0 |
timvdm/Helium | test/python/bitvec.py | 2 | 2947 | import helium
import unittest
class TestBitvec(unittest.TestCase):
def test_Fingerprint(self):
fp = helium.Fingerprint(8)
self.assertEqual(8, fp.numWords)
fp = helium.Fingerprint(1)
self.assertEqual('0000000000000000', fp.hex())
self.assertEqual('0000000000000000 0000000000000000 0000000000000000 0000000000000000', fp.bin())
self.assertEqual('0000000000000000000000000000000000000000000000000000000000000000', fp.bin(False))
fp = helium.bitvec_from_binary('0101010101010101010010101010')
self.assertEqual('0101010101010101010010101010000000000000000000000000000000000000', fp.bin(False))
self.assertEqual(1, fp.numWords)
fp = helium.bitvec_from_hex('0123456789abcdefABCDEF')
self.assertEqual('0123456789abcdefabcdef0000000000', fp.hex())
self.assertEqual(2, fp.numWords)
fp = helium.Fingerprint(1)
fp[4] = 1
self.assertEqual(1, fp[4])
self.assertEqual(1, fp.count())
fp[4] = 0
self.assertEqual(0, fp.count())
fp[4] = 1
fp[8] = 1
fp[12] = 1
fp[40] = 1
self.assertEqual(4, fp.count())
self.assertEqual(2, fp.count(6, 30))
fp.zero()
self.assertEqual(0, fp.count())
def test_bitvec(self):
self.assertEqual(64, helium.BitsPerWord)
self.assertEqual(1, helium.bitvec_num_words_for_bits(42))
self.assertEqual(2, helium.bitvec_num_words_for_bits(70))
fp = helium.Fingerprint(1)
fp2 = helium.bitvec_copy(fp)
helium.bitvec_set(4, fp2)
self.assertTrue(helium.bitvec_get(4, fp2))
self.assertFalse(helium.bitvec_get(4, fp))
helium.bitvec_zero(fp2)
self.assertFalse(helium.bitvec_get(4, fp2))
helium.bitvec_set(4, fp)
helium.bitvec_reset(4, fp)
self.assertFalse(helium.bitvec_get(4, fp))
helium.bitvec_set(4, fp)
helium.bitvec_set(8, fp)
helium.bitvec_set(4, fp2)
helium.bitvec_set(8, fp2)
helium.bitvec_set(12, fp2)
self.assertTrue(helium.bitvec_is_subset_superset(fp, fp2))
self.assertFalse(helium.bitvec_is_subset_superset(fp2, fp))
self.assertEqual(2, helium.bitvec_count(fp))
self.assertEqual(3, helium.bitvec_count(fp2))
helium.bitvec_set(40, fp2)
self.assertEqual(3, helium.bitvec_count(fp2, 0, 32))
self.assertEqual(1, helium.bitvec_count(fp2, 32, 64))
helium.bitvec_union_count(fp, fp2)
helium.bitvec_tanimoto(fp, fp2)
helium.bitvec_tanimoto(fp, fp2, 0, 1)
helium.bitvec_cosine(fp, fp2)
helium.bitvec_cosine(fp, fp2, 0, 1)
helium.bitvec_hamming(fp, fp2)
helium.bitvec_hamming(fp, fp2, 0, 1)
helium.bitvec_russell_rao(fp, fp2)
helium.bitvec_forbes(fp, fp2)
helium.bitvec_forbes(fp, fp2, 0, 1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Alt90/Student_progress_bar | progress_bar/settings.py | 1 | 3133 | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG') is None
ALLOWED_HOSTS = [os.environ.get('HOST', ''),]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'achievement',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'progress_bar.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'progress_bar.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_name', ''),
'USER': os.environ.get('DB_user', ''),
'PASSWORD': os.environ.get('DB_password', ''),
'HOST': os.environ.get('DB_server', ''),
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
| mit |
shishengjia/OnlineCourses | apps/organisation/adminx.py | 1 | 1321 | # -*- encoding: utf-8 -*-
from .models import CityDict, CourseOrg, Teacher
import xadmin
_author_ = 'shishengjia'
_date_ = '05/01/2017 13:36'
class CityDictAdmin(object):
list_display = ['name', 'desc', 'add_time']
search_fields = ['name', 'desc']
list_filter = ['name', 'desc', 'add_time']
class CourseOrgAdmin(object):
list_display = ['name', 'get_course_nums', 'desc', 'category', 'click_num', 'fav_num', 'image', 'address', 'add_time', 'city']
search_fields = ['name', 'desc', 'category', 'click_num', 'fav_num', 'image', 'address', 'city']
list_filter = ['name', 'desc', 'category', 'click_num', 'fav_num', 'image', 'address', 'add_time', 'city__name']
# 其他表以机构为外键时,选择的时候不会加载出所有机构,而是出现搜索框让用户自己进行搜索
relfield_style = 'fk-ajax'
class TeacherAdmin(object):
list_display = ['org', 'name', 'work_years', 'work_company', 'click_num', 'fav_num', 'add_time']
search_fields = ['org', 'name', 'work_years', 'work_company', 'click_num', 'fav_num']
list_filter = ['org__name', 'name', 'work_years', 'work_company', 'click_num', 'fav_num', 'add_time']
xadmin.site.register(CityDict, CityDictAdmin)
xadmin.site.register(CourseOrg, CourseOrgAdmin)
xadmin.site.register(Teacher, TeacherAdmin)
| apache-2.0 |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/gluon/contrib/redis_session.py | 11 | 9611 | """
Developed by niphlod@gmail.com
"""
import redis
from gluon import current
from gluon.storage import Storage
import time
import logging
import thread
logger = logging.getLogger("web2py.session.redis")
locker = thread.allocate_lock()
def RedisSession(*args, **vars):
"""
Usage example: put in models
from gluon.contrib.redis_session import RedisSession
sessiondb = RedisSession('localhost:6379',db=0, session_expiry=False, password=None)
session.connect(request, response, db = sessiondb)
Simple slip-in storage for session
"""
locker.acquire()
try:
instance_name = 'redis_instance_' + current.request.application
if not hasattr(RedisSession, instance_name):
setattr(RedisSession, instance_name, RedisClient(*args, **vars))
return getattr(RedisSession, instance_name)
finally:
locker.release()
class RedisClient(object):
meta_storage = {}
MAX_RETRIES = 5
RETRIES = 0
_release_script = None
def __init__(self, server='localhost:6379', db=None, debug=False,
session_expiry=False, with_lock=False, password=None):
"""session_expiry can be an integer, in seconds, to set the default expiration
of sessions. The corresponding record will be deleted from the redis instance,
and there's virtually no need to run sessions2trash.py
"""
self.server = server
self.password = password
self.db = db or 0
host, port = (self.server.split(':') + ['6379'])[:2]
port = int(port)
self.debug = debug
if current and current.request:
self.app = current.request.application
else:
self.app = ''
self.r_server = redis.Redis(host=host, port=port, db=self.db, password=self.password)
if with_lock:
RedisClient._release_script = self.r_server.register_script(_LUA_RELEASE_LOCK)
self.tablename = None
self.session_expiry = session_expiry
self.with_lock = with_lock
def get(self, what, default):
return self.tablename
def Field(self, fieldname, type='string', length=None, default=None,
required=False, requires=None):
return None
def define_table(self, tablename, *fields, **args):
if not self.tablename:
self.tablename = MockTable(
self, self.r_server, tablename, self.session_expiry,
self.with_lock)
return self.tablename
def __getitem__(self, key):
return self.tablename
def __call__(self, where=''):
q = self.tablename.query
return q
def commit(self):
# this is only called by session2trash.py
pass
class MockTable(object):
def __init__(self, db, r_server, tablename, session_expiry, with_lock=False):
self.db = db
self.r_server = r_server
self.tablename = tablename
# set the namespace for sessions of this app
self.keyprefix = 'w2p:sess:%s' % tablename.replace(
'web2py_session_', '')
# fast auto-increment id (needed for session handling)
self.serial = "%s:serial" % self.keyprefix
# index of all the session keys of this app
self.id_idx = "%s:id_idx" % self.keyprefix
# remember the session_expiry setting
self.session_expiry = session_expiry
self.with_lock = with_lock
def __call__(self, record_id, unique_key=None):
# Support DAL shortcut query: table(record_id)
# This will call the __getattr__ below
# returning a MockQuery
q = self.id
# Instructs MockQuery, to behave as db(table.id == record_id)
q.op = 'eq'
q.value = record_id
q.unique_key = unique_key
row = q.select()
return row[0] if row else Storage()
def __getattr__(self, key):
if key == 'id':
# return a fake query. We need to query it just by id for normal operations
self.query = MockQuery(
field='id', db=self.r_server,
prefix=self.keyprefix, session_expiry=self.session_expiry,
with_lock=self.with_lock, unique_key=self.unique_key
)
return self.query
elif key == '_db':
# needed because of the calls in sessions2trash.py and globals.py
return self.db
def insert(self, **kwargs):
# usually kwargs would be a Storage with several keys:
# 'locked', 'client_ip','created_datetime','modified_datetime'
# 'unique_key', 'session_data'
# retrieve a new key
newid = str(self.r_server.incr(self.serial))
key = self.keyprefix + ':' + newid
if self.with_lock:
key_lock = key + ':lock'
acquire_lock(self.r_server, key_lock, newid)
with self.r_server.pipeline() as pipe:
# add it to the index
pipe.sadd(self.id_idx, key)
# set a hash key with the Storage
pipe.hmset(key, kwargs)
if self.session_expiry:
pipe.expire(key, self.session_expiry)
pipe.execute()
if self.with_lock:
release_lock(self.r_server, key_lock, newid)
return newid
class MockQuery(object):
"""a fake Query object that supports querying by id
and listing all keys. No other operation is supported
"""
def __init__(self, field=None, db=None, prefix=None, session_expiry=False,
with_lock=False, unique_key=None):
self.field = field
self.value = None
self.db = db
self.keyprefix = prefix
self.op = None
self.session_expiry = session_expiry
self.with_lock = with_lock
self.unique_key = unique_key
def __eq__(self, value, op='eq'):
self.value = value
self.op = op
def __gt__(self, value, op='ge'):
self.value = value
self.op = op
def select(self):
if self.op == 'eq' and self.field == 'id' and self.value:
# means that someone wants to retrieve the key self.value
key = self.keyprefix + ':' + str(self.value)
if self.with_lock:
acquire_lock(self.db, key + ':lock', self.value)
rtn = self.db.hgetall(key)
if rtn:
if self.unique_key:
# make sure the id and unique_key are correct
if rtn['unique_key'] == self.unique_key:
rtn['update_record'] = self.update # update record support
else:
rtn = None
return [Storage(rtn)] if rtn else []
elif self.op == 'ge' and self.field == 'id' and self.value == 0:
# means that someone wants the complete list
rtn = []
id_idx = "%s:id_idx" % self.keyprefix
# find all session keys of this app
allkeys = self.db.smembers(id_idx)
for sess in allkeys:
val = self.db.hgetall(sess)
if not val:
if self.session_expiry:
# clean up the idx, because the key expired
self.db.srem(id_idx, sess)
continue
val = Storage(val)
# add a delete_record method (necessary for sessions2trash.py)
val.delete_record = RecordDeleter(
self.db, sess, self.keyprefix)
rtn.append(val)
return rtn
else:
raise Exception("Operation not supported")
def update(self, **kwargs):
# means that the session has been found and needs an update
if self.op == 'eq' and self.field == 'id' and self.value:
key = self.keyprefix + ':' + str(self.value)
if not self.db.exists(key):
return None
with self.db.pipeline() as pipe:
pipe.hmset(key, kwargs)
if self.session_expiry:
pipe.expire(key, self.session_expiry)
rtn = pipe.execute()[0]
if self.with_lock:
release_lock(self.db, key + ':lock', self.value)
return rtn
def delete(self, **kwargs):
# means that we want this session to be deleted
if self.op == 'eq' and self.field == 'id' and self.value:
id_idx = "%s:id_idx" % self.keyprefix
key = self.keyprefix + ':' + str(self.value)
with self.db.pipeline() as pipe:
pipe.delete(key)
pipe.srem(id_idx, key)
rtn = pipe.execute()
return rtn[1]
class RecordDeleter(object):
"""Dumb record deleter to support sessions2trash.py"""
def __init__(self, db, key, keyprefix):
self.db, self.key, self.keyprefix = db, key, keyprefix
def __call__(self):
id_idx = "%s:id_idx" % self.keyprefix
# remove from the index
self.db.srem(id_idx, self.key)
# remove the key itself
self.db.delete(self.key)
def acquire_lock(conn, lockname, identifier, ltime=10):
while True:
if conn.set(lockname, identifier, ex=ltime, nx=True):
return identifier
time.sleep(.01)
_LUA_RELEASE_LOCK = """
if redis.call("get", KEYS[1]) == ARGV[1]
then
return redis.call("del", KEYS[1])
else
return 0
end
"""
def release_lock(conn, lockname, identifier):
return RedisClient._release_script(
keys=[lockname], args=[identifier],
client=conn)
| gpl-3.0 |
rlugojr/kubernetes | hack/update_owners.py | 94 | 7705 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
import re
import json
import os
import random
import subprocess
import sys
import time
import urllib2
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
OWNERS_PATH = os.path.abspath(
os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
SKIP_MAINTAINERS = {
'a-robinson', 'aronchick', 'bgrant0607-nocc', 'david-mcmahon',
'goltermann', 'sarahnovotny'}
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_history(days_ago):
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json',
time.gmtime(time.time() - days_ago * 24 * 60 * 60))
resp = urllib2.urlopen(url)
content = resp.read()
if resp.headers.get('content-encoding') == 'gzip':
content = zlib.decompress(content, 15 | 16)
return json.loads(content)
def get_test_names_from_test_history():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
return test_names
def get_test_names_from_local_files():
tests_json = subprocess.check_output(['go', 'run', 'test/list/main.go', '-json'])
tests = json.loads(tests_json)
return {normalize(t['Name'] + (' ' + t['TestName'] if 'k8s.io/' not in t['Name'] else ''))
for t in tests}
def load_owners(fname):
owners = {}
with open(fname) as f:
for n, cols in enumerate(csv.reader(f)):
if n == 0:
continue # header
if len(cols) == 3:
# migrate from previous version without sig
(name, owner, random_assignment), sig = cols, ""
else:
(name, owner, random_assignment, sig) = cols
owners[normalize(name)] = (owner, int(random_assignment), sig)
return owners
def write_owners(fname, owners):
with open(fname, 'w') as f:
out = csv.writer(f, lineterminator='\n')
out.writerow(['name', 'owner', 'auto-assigned', 'sig'])
sort_key = lambda (k, v): (k != 'DEFAULT', k) # put 'DEFAULT' first.
items = sorted(owners.items(), key=sort_key)
for name, (owner, random_assignment, sig) in items:
out.writerow([name, owner, int(random_assignment), sig])
def get_maintainers():
# Github doesn't seem to support team membership listing without a key with
# org admin privileges. Instead, we do it manually:
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
# Run this in the js console:
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
# e => e.textContent.trim())
ret = {"alex-mohr", "apelisse", "aronchick", "bgrant0607", "bgrant0607-nocc",
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
"eparis", "erictune", "fabioy", "fejta", "fgrzadkowski", "freehan",
"gmarek", "grodrigues3", "ingvagabund", "ixdy", "janetkuo", "jbeda",
"jessfraz", "jingxu97", "jlowdermilk", "jsafrane", "jszczepkowski",
"justinsb", "kargakis", "Kashomon", "kevin-wangzefeng", "krousey",
"lavalamp", "liggitt", "luxas", "madhusudancs", "maisem", "matchstick",
"mbohlool", "mikedanese", "mml", "mtaufen", "mwielgus", "ncdc",
"nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole",
"Random-Liu", "rmmh", "roberthbailey", "saad-ali", "smarterclayton",
"soltysh", "spxtr", "sttts", "thelinuxfoundation", "thockin",
"timothysc", "timstclair", "vishh", "wojtek-t", "xiang90", "yifan-gu",
"yujuhong", "zmerlynn"}
return sorted(ret - SKIP_MAINTAINERS)
def detect_github_username():
origin_url = subprocess.check_output(['git', 'config', 'remote.origin.url'])
m = re.search(r'github.com[:/](.*)/', origin_url)
if m and m.group(1) != 'kubernetes':
return m.group(1)
raise ValueError('unable to determine GitHub user from '
'`git config remote.origin.url` output, run with --user instead')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--history', action='store_true', help='Generate test list from result history.')
parser.add_argument('--user', help='User to assign new tests to (or RANDOM, default: current GitHub user).')
parser.add_argument('--addonly', action='store_true', help='Only add missing tests, do not change existing.')
parser.add_argument('--check', action='store_true', help='Exit with a nonzero status if the test list has changed.')
options = parser.parse_args()
if options.history:
test_names = get_test_names_from_test_history()
else:
test_names = get_test_names_from_local_files()
test_names.add('DEFAULT')
test_names = sorted(test_names)
owners = load_owners(OWNERS_PATH)
outdated_tests = sorted(set(owners) - set(test_names))
new_tests = sorted(set(test_names) - set(owners))
maintainers = get_maintainers()
print '# OUTDATED TESTS (%d):' % len(outdated_tests)
print '\n'.join('%s -- %s%s' %
(t, owners[t][0], ['', ' (random)'][owners[t][1]])
for t in outdated_tests)
print '# NEW TESTS (%d):' % len(new_tests)
print '\n'.join(new_tests)
if options.check:
if new_tests or outdated_tests:
print
print 'ERROR: the test list has changed'
sys.exit(1)
sys.exit(0)
if not options.user:
options.user = detect_github_username()
for name in outdated_tests:
owners.pop(name)
if not options.addonly:
print '# UNEXPECTED MAINTAINERS ',
print '(randomly assigned, but not in kubernetes-maintainers)'
for name, (owner, random_assignment, _) in sorted(owners.iteritems()):
if random_assignment and owner not in maintainers:
print '%-16s %s' % (owner, name)
owners.pop(name)
print
owner_counts = collections.Counter(
owner for name, (owner, random, sig) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
random_assignment = True
if options.user.lower() == 'random':
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
else:
new_owner = options.user
random_assignment = False
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, random_assignment, "")
if options.user.lower() == 'random':
print '# Tests per maintainer:'
for owner, count in owner_counts.most_common():
print '%-20s %3d' % (owner, count)
write_owners(OWNERS_PATH, owners)
if __name__ == '__main__':
main()
| apache-2.0 |
nicolaspanel/djangularjs | server/settings/base.py | 1 | 3798 | """
Django settings for server project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
NOTE: __generator-djangularjs__ may automatically modified this file.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import json, yaml
SETTINGS_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(SETTINGS_DIR)
PROJECT_DIR = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
with open(os.path.join(SETTINGS_DIR, '.secrets.yml')) as conf_file:
# We use Ansible and Ansible vault to keep sensible informations secrets
sensible_cfg = yaml.load(conf_file)
SECRET_KEY = sensible_cfg['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django_su',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'compressor',
'server.core',
'server.authentication',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'server.urls'
AUTHENTICATION_BACKENDS = (
# Standard Django auth
"django.contrib.auth.backends.ModelBackend",
# django-su auth
"django_su.backends.SuBackend",
"server.authentication.backends.EmailBackend",
)
WSGI_APPLICATION = 'server.wsgi.application'
# Database
DATABASES = sensible_cfg['DATABASES'] # see provisioning/group_vars/dev and/or server/settings/conf.json
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
# 'django.contrib.messages.context_processors.messages',
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
],
},
},
]
# Static files (CSS, JavaScript, Images)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder', # TODO: check if necessary
'compressor.finders.CompressorFinder',
)
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static/')
with open(os.path.join(PROJECT_DIR, 'assets.json')) as assets_file:
ASSETS = json.load(assets_file)
STATIC_URL = ASSETS['staticUrl']
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'public/'),
os.path.join(PROJECT_DIR, 'dist/'),
)
COMPRESS_JS_FILTERS = [] # disable JS minification (done with uglifyjs)
| mit |
dhimmel/networkx | networkx/algorithms/bipartite/tests/test_project.py | 31 | 13836 | #!/usr/bin/env python
from nose.tools import assert_equal
import networkx as nx
from networkx.algorithms import bipartite
from networkx.testing import assert_edges_equal, assert_nodes_equal
class TestBipartiteProject:
def test_path_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.projected_graph(G, [1, 3])
assert_nodes_equal(P.nodes(), [1, 3])
assert_edges_equal(P.edges(), [(1, 3)])
P=bipartite.projected_graph(G, [0, 2])
assert_nodes_equal(P.nodes(), [0, 2])
assert_edges_equal(P.edges(), [(0, 2)])
def test_path_projected_properties_graph(self):
G=nx.path_graph(4)
G.add_node(1,name='one')
G.add_node(2,name='two')
P=bipartite.projected_graph(G,[1,3])
assert_nodes_equal(P.nodes(),[1,3])
assert_edges_equal(P.edges(),[(1,3)])
assert_equal(P.node[1]['name'],G.node[1]['name'])
P=bipartite.projected_graph(G,[0,2])
assert_nodes_equal(P.nodes(),[0,2])
assert_edges_equal(P.edges(),[(0,2)])
assert_equal(P.node[2]['name'],G.node[2]['name'])
def test_path_collaboration_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(P.nodes(),[1,3])
assert_edges_equal(P.edges(),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(P.nodes(),[0,2])
assert_edges_equal(P.edges(),[(0,2)])
P[0][2]['weight']=1
def test_directed_path_collaboration_projected_graph(self):
G=nx.DiGraph()
G.add_path(list(range(4)))
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(P.nodes(),[1,3])
assert_edges_equal(P.edges(),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(P.nodes(),[0,2])
assert_edges_equal(P.edges(),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(P.nodes(),[1,3])
assert_edges_equal(P.edges(),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(P.nodes(),[0,2])
assert_edges_equal(P.edges(),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_directed_graph(self):
G=nx.DiGraph()
G.add_path(list(range(4)))
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(P.nodes(),[1,3])
assert_edges_equal(P.edges(),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(P.nodes(),[0,2])
assert_edges_equal(P.edges(),[(0,2)])
P[0][2]['weight']=1
def test_star_projected_graph(self):
G=nx.star_graph(3)
P=bipartite.projected_graph(G,[1,2,3])
assert_nodes_equal(P.nodes(),[1,2,3])
assert_edges_equal(P.edges(),[(1,2),(1,3),(2,3)])
P=bipartite.weighted_projected_graph(G,[1,2,3])
assert_nodes_equal(P.nodes(),[1,2,3])
assert_edges_equal(P.edges(),[(1,2),(1,3),(2,3)])
P=bipartite.projected_graph(G,[0])
assert_nodes_equal(P.nodes(),[0])
assert_edges_equal(P.edges(),[])
def test_project_multigraph(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('a',2)
G.add_edge('b',2)
P=bipartite.projected_graph(G,'ab')
assert_edges_equal(P.edges(),[('a','b')])
P=bipartite.weighted_projected_graph(G,'ab')
assert_edges_equal(P.edges(),[('a','b')])
P=bipartite.projected_graph(G,'ab',multigraph=True)
assert_edges_equal(P.edges(),[('a','b'),('a','b')])
def test_project_collaboration(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('b',2)
G.add_edge('c',2)
G.add_edge('c',3)
G.add_edge('c',4)
G.add_edge('b',4)
P=bipartite.collaboration_weighted_projected_graph(G,'abc')
assert_equal(P['a']['b']['weight'],1)
assert_equal(P['b']['c']['weight'],2)
def test_directed_projection(self):
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge('B',2)
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(P.edges(),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(P.edges(),[('A','B')])
assert_equal(P['A']['B']['weight'],1)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(P.edges(),[('A','B')])
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge(2,'B')
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(P.edges(),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(P.edges(),[('A','B')])
assert_equal(P['A']['B']['weight'],2)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(P.edges(),[('A','B'),('A','B')])
class TestBipartiteWeightedProjection:
def setUp(self):
# Tore Opsahl's example
# http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/
self.G=nx.Graph()
self.G.add_edge('A',1)
self.G.add_edge('A',2)
self.G.add_edge('B',1)
self.G.add_edge('B',2)
self.G.add_edge('B',3)
self.G.add_edge('B',4)
self.G.add_edge('B',5)
self.G.add_edge('C',1)
self.G.add_edge('D',3)
self.G.add_edge('E',4)
self.G.add_edge('E',5)
self.G.add_edge('E',6)
self.G.add_edge('F',6)
# Graph based on figure 6 from Newman (2001)
self.N=nx.Graph()
self.N.add_edge('A',1)
self.N.add_edge('A',2)
self.N.add_edge('A',3)
self.N.add_edge('B',1)
self.N.add_edge('B',2)
self.N.add_edge('B',3)
self.N.add_edge('C',1)
self.N.add_edge('D',1)
self.N.add_edge('E',3)
def test_project_weighted_shared(self):
edges=[('A','B',2),
('A','C',1),
('B','C',1),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3),
('A','E',1),
('A','C',1),
('A','D',1),
('B','E',1),
('B','C',1),
('B','D',1),
('C','D',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_newman(self):
edges=[('A','B',1.5),
('A','C',0.5),
('B','C',0.5),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',11/6.0),
('A','E',1/2.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/2.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_ratio(self):
edges=[('A','B',2/6.0),
('A','C',1/6.0),
('B','C',1/6.0),
('B','D',1/6.0),
('B','E',2/6.0),
('E','F',1/6.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G, 'ABCDEF', ratio=True)
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N, 'ABCDE', ratio=True)
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_overlap(self):
edges=[('A','B',2/2.0),
('A','C',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('B','E',2/3.0),
('E','F',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF', jaccard=False)
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/1.0),
('A','C',1/1.0),
('A','D',1/1.0),
('B','E',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE', jaccard=False)
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_jaccard(self):
edges=[('A','B',2/5.0),
('A','C',1/2.0),
('B','C',1/5.0),
('B','D',1/5.0),
('B','E',2/6.0),
('E','F',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(P.edges(),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_generic_weighted_projected_graph_simple(self):
def shared(G, u, v):
return len(set(G[u]) & set(G[v]))
B = nx.path_graph(5)
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4], weight_function=shared)
assert_nodes_equal(G.nodes(), [0, 2, 4])
assert_edges_equal(G.edges(data=True),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(G.nodes(), [0, 2, 4])
assert_edges_equal(G.edges(data=True),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
B = nx.DiGraph()
B.add_path(list(range(5)))
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(G.nodes(), [0, 2, 4])
assert_edges_equal(G.edges(data=True),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
def test_generic_weighted_projected_graph_custom(self):
def jaccard(G, u, v):
unbrs = set(G[u])
vnbrs = set(G[v])
return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
def my_weight(G, u, v, weight='weight'):
w = 0
for nbr in set(G[u]) & set(G[v]):
w += G.edge[u][nbr].get(weight, 1) + G.edge[v][nbr].get(weight, 1)
return w
B = nx.bipartite.complete_bipartite_graph(2, 2)
for i,(u,v) in enumerate(B.edges()):
B.edge[u][v]['weight'] = i + 1
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=jaccard)
assert_edges_equal(G.edges(data=True), [(0, 1, {'weight': 1.0})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=my_weight)
assert_edges_equal(G.edges(data=True), [(0, 1, {'weight': 10})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1])
assert_edges_equal(G.edges(data=True), [(0, 1, {'weight': 2})])
| bsd-3-clause |
ol-loginov/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/geometries.py | 90 | 33343 | import re
wkt_regex = re.compile(r'^(?P<type>[A-Z]+) ?\(')
class TestGeom:
"The Test Geometry class container."
def __init__(self, wkt, **kwargs):
self.wkt = wkt
self.bad = kwargs.pop('bad', False)
if not self.bad:
m = wkt_regex.match(wkt)
if not m:
raise Exception('Improper WKT: "%s"' % wkt)
self.geo_type = m.group('type')
for key, value in kwargs.items():
setattr(self, key, value)
# For the old tests
swig_geoms = (TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0))', ncoords=5),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 90, 90 90, 90 10, 10 10) ))', ncoords=10),
)
# Testing WKT & HEX
hex_wkt = (TestGeom('POINT(0 1)', hex='01010000000000000000000000000000000000F03F'),
TestGeom('LINESTRING(0 1, 2 3, 4 5)', hex='0102000000030000000000000000000000000000000000F03F0000000000000040000000000000084000000000000010400000000000001440'),
TestGeom('POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))', hex='010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000'),
TestGeom('MULTIPOINT(0 0, 10 0, 10 10, 0 10, 0 0)', hex='010400000005000000010100000000000000000000000000000000000000010100000000000000000024400000000000000000010100000000000000000024400000000000002440010100000000000000000000000000000000002440010100000000000000000000000000000000000000'),
TestGeom('MULTILINESTRING((0 0, 10 0, 10 10, 0 10),(20 20, 30 20))', hex='01050000000200000001020000000400000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440010200000002000000000000000000344000000000000034400000000000003E400000000000003440'),
TestGeom('MULTIPOLYGON(((0 0, 10 0, 10 10, 0 10, 0 0)),((20 20, 20 30, 30 30, 30 20, 20 20),(25 25, 25 26, 26 26, 26 25, 25 25)))', hex='010600000002000000010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000010300000002000000050000000000000000003440000000000000344000000000000034400000000000003E400000000000003E400000000000003E400000000000003E40000000000000344000000000000034400000000000003440050000000000000000003940000000000000394000000000000039400000000000003A400000000000003A400000000000003A400000000000003A40000000000000394000000000000039400000000000003940'),
TestGeom('GEOMETRYCOLLECTION(MULTIPOLYGON(((0 0, 10 0, 10 10, 0 10, 0 0)),((20 20, 20 30, 30 30, 30 20, 20 20),(25 25, 25 26, 26 26, 26 25, 25 25))),MULTILINESTRING((0 0, 10 0, 10 10, 0 10),(20 20, 30 20)),MULTIPOINT(0 0, 10 0, 10 10, 0 10, 0 0))', hex='010700000003000000010600000002000000010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000010300000002000000050000000000000000003440000000000000344000000000000034400000000000003E400000000000003E400000000000003E400000000000003E40000000000000344000000000000034400000000000003440050000000000000000003940000000000000394000000000000039400000000000003A400000000000003A400000000000003A400000000000003A4000000000000039400000000000003940000000000000394001050000000200000001020000000400000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440010200000002000000000000000000344000000000000034400000000000003E400000000000003440010400000005000000010100000000000000000000000000000000000000010100000000000000000024400000000000000000010100000000000000000024400000000000002440010100000000000000000000000000000000002440010100000000000000000000000000000000000000'),
)
# WKT, GML, KML output
wkt_out = (TestGeom('POINT (110 130)', ewkt='POINT (110.0000000000000000 130.0000000000000000)', kml='<Point><coordinates>110.0,130.0,0</coordinates></Point>', gml='<gml:Point><gml:coordinates>110,130</gml:coordinates></gml:Point>'),
TestGeom('LINESTRING (40 40,50 130,130 130)', ewkt='LINESTRING (40.0000000000000000 40.0000000000000000, 50.0000000000000000 130.0000000000000000, 130.0000000000000000 130.0000000000000000)', kml='<LineString><coordinates>40.0,40.0,0 50.0,130.0,0 130.0,130.0,0</coordinates></LineString>', gml='<gml:LineString><gml:coordinates>40,40 50,130 130,130</gml:coordinates></gml:LineString>'),
TestGeom('POLYGON ((150 150,410 150,280 20,20 20,150 150),(170 120,330 120,260 50,100 50,170 120))', ewkt='POLYGON ((150.0000000000000000 150.0000000000000000, 410.0000000000000000 150.0000000000000000, 280.0000000000000000 20.0000000000000000, 20.0000000000000000 20.0000000000000000, 150.0000000000000000 150.0000000000000000), (170.0000000000000000 120.0000000000000000, 330.0000000000000000 120.0000000000000000, 260.0000000000000000 50.0000000000000000, 100.0000000000000000 50.0000000000000000, 170.0000000000000000 120.0000000000000000))', kml='<Polygon><outerBoundaryIs><LinearRing><coordinates>150.0,150.0,0 410.0,150.0,0 280.0,20.0,0 20.0,20.0,0 150.0,150.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>170.0,120.0,0 330.0,120.0,0 260.0,50.0,0 100.0,50.0,0 170.0,120.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon>', gml='<gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>150,150 410,150 280,20 20,20 150,150</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>170,120 330,120 260,50 100,50 170,120</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon>'),
TestGeom('MULTIPOINT (10 80,110 170,110 120)', ewkt='MULTIPOINT (10.0000000000000000 80.0000000000000000, 110.0000000000000000 170.0000000000000000, 110.0000000000000000 120.0000000000000000)', kml='<MultiGeometry><Point><coordinates>10.0,80.0,0</coordinates></Point><Point><coordinates>110.0,170.0,0</coordinates></Point><Point><coordinates>110.0,120.0,0</coordinates></Point></MultiGeometry>', gml='<gml:MultiPoint><gml:pointMember><gml:Point><gml:coordinates>10,80</gml:coordinates></gml:Point></gml:pointMember><gml:pointMember><gml:Point><gml:coordinates>110,170</gml:coordinates></gml:Point></gml:pointMember><gml:pointMember><gml:Point><gml:coordinates>110,120</gml:coordinates></gml:Point></gml:pointMember></gml:MultiPoint>'),
TestGeom('MULTILINESTRING ((110 100,40 30,180 30),(170 30,110 90,50 30))', ewkt='MULTILINESTRING ((110.0000000000000000 100.0000000000000000, 40.0000000000000000 30.0000000000000000, 180.0000000000000000 30.0000000000000000), (170.0000000000000000 30.0000000000000000, 110.0000000000000000 90.0000000000000000, 50.0000000000000000 30.0000000000000000))', kml='<MultiGeometry><LineString><coordinates>110.0,100.0,0 40.0,30.0,0 180.0,30.0,0</coordinates></LineString><LineString><coordinates>170.0,30.0,0 110.0,90.0,0 50.0,30.0,0</coordinates></LineString></MultiGeometry>', gml='<gml:MultiLineString><gml:lineStringMember><gml:LineString><gml:coordinates>110,100 40,30 180,30</gml:coordinates></gml:LineString></gml:lineStringMember><gml:lineStringMember><gml:LineString><gml:coordinates>170,30 110,90 50,30</gml:coordinates></gml:LineString></gml:lineStringMember></gml:MultiLineString>'),
TestGeom('MULTIPOLYGON (((110 110,70 200,150 200,110 110),(110 110,100 180,120 180,110 110)),((110 110,150 20,70 20,110 110),(110 110,120 40,100 40,110 110)))', ewkt='MULTIPOLYGON (((110.0000000000000000 110.0000000000000000, 70.0000000000000000 200.0000000000000000, 150.0000000000000000 200.0000000000000000, 110.0000000000000000 110.0000000000000000), (110.0000000000000000 110.0000000000000000, 100.0000000000000000 180.0000000000000000, 120.0000000000000000 180.0000000000000000, 110.0000000000000000 110.0000000000000000)), ((110.0000000000000000 110.0000000000000000, 150.0000000000000000 20.0000000000000000, 70.0000000000000000 20.0000000000000000, 110.0000000000000000 110.0000000000000000), (110.0000000000000000 110.0000000000000000, 120.0000000000000000 40.0000000000000000, 100.0000000000000000 40.0000000000000000, 110.0000000000000000 110.0000000000000000)))', kml='<MultiGeometry><Polygon><outerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 70.0,200.0,0 150.0,200.0,0 110.0,110.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 100.0,180.0,0 120.0,180.0,0 110.0,110.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 150.0,20.0,0 70.0,20.0,0 110.0,110.0,0</coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>110.0,110.0,0 120.0,40.0,0 100.0,40.0,0 110.0,110.0,0</coordinates></LinearRing></innerBoundaryIs></Polygon></MultiGeometry>', gml='<gml:MultiPolygon><gml:polygonMember><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 70,200 150,200 110,110</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 100,180 120,180 110,110</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon></gml:polygonMember><gml:polygonMember><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 150,20 70,20 110,110</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs><gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>110,110 120,40 100,40 110,110</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs></gml:Polygon></gml:polygonMember></gml:MultiPolygon>'),
TestGeom('GEOMETRYCOLLECTION (POINT (110 260),LINESTRING (110 0,110 60))', ewkt='GEOMETRYCOLLECTION (POINT (110.0000000000000000 260.0000000000000000), LINESTRING (110.0000000000000000 0.0000000000000000, 110.0000000000000000 60.0000000000000000))', kml='<MultiGeometry><Point><coordinates>110.0,260.0,0</coordinates></Point><LineString><coordinates>110.0,0.0,0 110.0,60.0,0</coordinates></LineString></MultiGeometry>', gml='<gml:GeometryCollection><gml:geometryMember><gml:Point><gml:coordinates>110,260</gml:coordinates></gml:Point></gml:geometryMember><gml:geometryMember><gml:LineString><gml:coordinates>110,0 110,60</gml:coordinates></gml:LineString></gml:geometryMember></gml:GeometryCollection>'),
)
# Errors
errors = (TestGeom('GEOMETR##!@#%#............a32515', bad=True, hex=False),
TestGeom('Foo.Bar', bad=True, hex=False),
TestGeom('POINT (5, 23)', bad=True, hex=False),
TestGeom('AAABBBDDDAAD##@#1113511111-098111111111111111533333333333333', bad=True, hex=True),
TestGeom('FFFFFFFFFFFFFFFFF1355555555555555555565111', bad=True, hex=True),
TestGeom('', bad=True, hex=False),
)
# Polygons
polygons = (TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 90, 90 90, 90 10, 10 10))',
n_i=1, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=10, area=3600.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0), (10 10, 10 20, 20 20, 20 10, 10 10), (80 80, 80 90, 90 90, 90 80, 80 80))',
n_i=2, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=15, area=9800.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((0 0, 0 100, 100 100, 100 0, 0 0))',
n_i=0, ext_ring_cs=((0, 0), (0, 100), (100, 100), (100, 0), (0, 0)), n_p=5, area=10000.0, centroid=(50., 50.),
),
TestGeom('POLYGON ((-95.3848703124799471 29.7056021479768511, -95.3851905195191847 29.7046588196500281, -95.3859356966379011 29.7025053545605502, -95.3860723000647539 29.7020963367038391, -95.3871517697222089 29.6989779021280995, -95.3865578518265522 29.6990856888057202, -95.3862634205175226 29.6999471753441782, -95.3861991779541967 29.6999591988978615, -95.3856773799358137 29.6998323107113578, -95.3856209915427229 29.6998005235473741, -95.3855833545501639 29.6996619391729801, -95.3855776331865002 29.6996232659570047, -95.3850162731712885 29.6997236706530536, -95.3831047357410284 29.7000847603095082, -95.3829800724914776 29.7000676365023502, -95.3828084594470909 29.6999969684031200, -95.3828131504821499 29.6999090511531065, -95.3828022942979601 29.6998152117366025, -95.3827893930918833 29.6997790953076759, -95.3825174668099862 29.6998267772748825, -95.3823521544804862 29.7000451723151606, -95.3820491918785223 29.6999682034582335, -95.3817932841505893 29.6999640407204772, -95.3815438924600443 29.7005983712500630, -95.3807812390843424 29.7007538492921590, -95.3778578936435935 29.7012966201172048, -95.3770817300034679 29.7010555145969093, -95.3772763716395957 29.7004995005932031, -95.3769891024414420 29.7005797730360186, -95.3759855007185990 29.7007754783987821, -95.3759516423090474 29.7007305400669388, -95.3765252155960042 29.6989549173240874, -95.3766842746727832 29.6985134987163164, -95.3768510987262914 29.6980530300744938, -95.3769198676258014 29.6977137204527573, -95.3769616670751930 29.6973351617272172, -95.3770309229297766 29.6969821084304186, -95.3772352596880637 29.6959751305871613, -95.3776232419333354 29.6945439060847463, -95.3776849628727064 29.6943364710766069, -95.3779699491714723 29.6926548349458947, -95.3781945479573494 29.6920088336742545, -95.3785807118394189 29.6908279316076005, -95.3787441368896651 29.6908846275832197, -95.3787903214163890 29.6907152912461640, -95.3791765069353659 29.6893335376821526, -95.3794935959513026 29.6884781789101595, -95.3796592071232112 29.6880066681407619, -95.3799788182090111 29.6873687353035081, -95.3801545516183893 29.6868782380716993, -95.3801258908302145 29.6867756621337762, -95.3801104284899566 29.6867229678809572, -95.3803803523746154 29.6863753372986459, -95.3821028558287622 29.6837392961470421, -95.3827289584682205 29.6828097375216160, -95.3827494698109035 29.6790739156259278, -95.3826022014838486 29.6776502228345507, -95.3825047356438063 29.6765773006280753, -95.3823473035336917 29.6750405250369127, -95.3824540163482055 29.6750076408228587, -95.3838984230304305 29.6745679207378679, -95.3916547074937426 29.6722459226508377, -95.3926154662749468 29.6719609085105489, -95.3967246645118081 29.6707316485589736, -95.3974588054406780 29.6705065336410989, -95.3978523748756828 29.6703795547846845, -95.3988598162279970 29.6700874981900853, -95.3995628600665952 29.6698505300412414, -95.4134721665944170 29.6656841279906232, -95.4143262068232616 29.6654291174019278, -95.4159685142480214 29.6649750989232288, -95.4180067396277565 29.6643253024318021, -95.4185886692196590 29.6641482768691063, -95.4234155309609662 29.6626925393704788, -95.4287785503196346 29.6611023620959706, -95.4310287312749352 29.6604222580752648, -95.4320295629628959 29.6603361318136720, -95.4332899683975739 29.6600560661713608, -95.4342675748811047 29.6598454934599900, -95.4343110414310871 29.6598411486215490, -95.4345576779282538 29.6598147020668499, -95.4348823041721630 29.6597875803673112, -95.4352827715209457 29.6597762346946681, -95.4355290431309982 29.6597827926562374, -95.4359197997999331 29.6598014511782715, -95.4361907884752156 29.6598444333523368, -95.4364608955807228 29.6598901433108217, -95.4367250147512323 29.6599494499910712, -95.4364898759758091 29.6601880616540186, -95.4354501111810691 29.6616378572201107, -95.4381459623171224 29.6631265631655126, -95.4367852490863129 29.6642266600024023, -95.4370040894557263 29.6643425389568769, -95.4367078350812648 29.6645492592343238, -95.4366081749871285 29.6646291473027297, -95.4358539359938192 29.6652308742342932, -95.4350327668927889 29.6658995989314462, -95.4350580905272921 29.6678812477895271, -95.4349710541447536 29.6680054925936965, -95.4349500440473548 29.6671410080890006, -95.4341492724148850 29.6678790545191688, -95.4340248868274728 29.6680353198492135, -95.4333227845797438 29.6689245624945990, -95.4331325652123326 29.6691616138940901, -95.4321314741096955 29.6704473333237253, -95.4320435792664341 29.6702578985411982, -95.4320147929883547 29.6701800936425109, -95.4319764538662980 29.6683246590817085, -95.4317490976340679 29.6684974372577166, -95.4305958185342718 29.6694049049170374, -95.4296600735653016 29.6701723430938493, -95.4284928989940937 29.6710931793380972, -95.4274630532378580 29.6719378813640091, -95.4273056811974811 29.6720684984625791, -95.4260554084574864 29.6730668861566969, -95.4253558063699643 29.6736342467365724, -95.4249278826026028 29.6739557343648919, -95.4248648873821423 29.6745400910786152, -95.4260016131471929 29.6750987014005858, -95.4258567183010911 29.6753452063069929, -95.4260238081486847 29.6754322077221353, -95.4258707374502393 29.6756647377294307, -95.4257951755816691 29.6756407098663360, -95.4257701599566985 29.6761077719536068, -95.4257726684792260 29.6761711204603955, -95.4257980187195614 29.6770219651929423, -95.4252712669032519 29.6770161558853758, -95.4249234392992065 29.6770068683962300, -95.4249574272905789 29.6779707498635759, -95.4244725881033702 29.6779825646764159, -95.4222269476429545 29.6780711474441716, -95.4223032371999267 29.6796029391538809, -95.4239133706588945 29.6795331493690355, -95.4224579084327331 29.6813706893847780, -95.4224290108823965 29.6821953228763924, -95.4230916478977349 29.6822130268724109, -95.4222928279595521 29.6832041816675343, -95.4228763710016352 29.6832087677714505, -95.4223401691637179 29.6838987872753748, -95.4211655906087088 29.6838784024852984, -95.4201984153205558 29.6851319258758082, -95.4206156387716362 29.6851623398125319, -95.4213438084897660 29.6851763011334739, -95.4212071118618752 29.6853679931624974, -95.4202651399651245 29.6865313962980508, -95.4172061157659783 29.6865816431043932, -95.4182217951255183 29.6872251197301544, -95.4178664826439160 29.6876750901471631, -95.4180678442928780 29.6877960336377207, -95.4188763472917572 29.6882826379510938, -95.4185374500596311 29.6887137897831934, -95.4182121713132290 29.6885097429738813, -95.4179857231741551 29.6888118367840086, -95.4183106010563620 29.6890048676118212, -95.4179489865331334 29.6894546700979056, -95.4175581746284820 29.6892323606815438, -95.4173439957341571 29.6894990139807007, -95.4177411199311081 29.6897435034738422, -95.4175789200209721 29.6899207529979208, -95.4170598559864800 29.6896042165807508, -95.4166733682539814 29.6900891174451367, -95.4165941362704331 29.6900347214235047, -95.4163537218065301 29.6903529467753238, -95.4126843270708775 29.6881086357212780, -95.4126604121378392 29.6880942378803496, -95.4126672298953338 29.6885951670109982, -95.4126680884821923 29.6887052446594275, -95.4158080137241882 29.6906382377959339, -95.4152061403821961 29.6910871045531586, -95.4155842583188161 29.6917382915894308, -95.4157426793520358 29.6920726941677096, -95.4154520563662203 29.6922052332446427, -95.4151389936167078 29.6923261661269571, -95.4148649784384872 29.6924343866430256, -95.4144051352401590 29.6925623927348106, -95.4146792019416665 29.6926770338507744, -95.4148824479948985 29.6928117893696388, -95.4149851734360226 29.6929823719519774, -95.4140436551925291 29.6929626643100946, -95.4140465993023241 29.6926545917254892, -95.4137269186733334 29.6927395764256090, -95.4137372859685513 29.6935432485666624, -95.4135702836218655 29.6933186678088283, -95.4133925235973237 29.6930415229852152, -95.4133017035615580 29.6928685062036166, -95.4129588921634593 29.6929391128977862, -95.4125107395559695 29.6930481664661485, -95.4102647423187307 29.6935850183258019, -95.4081931340840157 29.6940907430947760, -95.4078783596459772 29.6941703429951609, -95.4049213975000043 29.6948723732981961, -95.4045944244127071 29.6949626434239207, -95.4045865139788134 29.6954109019001358, -95.4045953345484037 29.6956972800496963, -95.4038879332535146 29.6958296089365490, -95.4040366394459340 29.6964389004769842, -95.4032774779020798 29.6965643341263892, -95.4026066501239853 29.6966646227683881, -95.4024991226393837 29.6961389766619703, -95.4011781398631911 29.6963566063186377, -95.4011524097636112 29.6962596176762190, -95.4018184046368276 29.6961399466727336, -95.4016995838361908 29.6956442609415099, -95.4007100753964608 29.6958900524002978, -95.4008032469935188 29.6962639900781404, -95.3995660267125487 29.6965636449370329, -95.3996140564775601 29.6967877962763644, -95.3996364430014410 29.6968901984825280, -95.3984003269631842 29.6968679634805746, -95.3981442026887265 29.6983660679730335, -95.3980178461957706 29.6990890276252415, -95.3977097967130163 29.7008526152273049, -95.3962347157626027 29.7009697553607630, -95.3951949050136250 29.7004740386619019, -95.3957564950617183 29.6990281830553187, -95.3965927101519924 29.6968771129030706, -95.3957496517238184 29.6970800358387095, -95.3957720559467361 29.6972264611230727, -95.3957391586571788 29.6973548894558732, -95.3956286413405365 29.6974949857280883, -95.3955111053256957 29.6975661086270186, -95.3953215342724121 29.6976022763384790, -95.3951795558443365 29.6975846977491038, -95.3950369632041060 29.6975175779330200, -95.3949401089966500 29.6974269267953304, -95.3948740281415581 29.6972903308506346, -95.3946650813866910 29.6973397326847923, -95.3947654059391112 29.6974882560192022, -95.3949627316619768 29.6980355864961858, -95.3933200807862249 29.6984590863712796, -95.3932606497523494 29.6984464798710839, -95.3932983699113350 29.6983154306484352, -95.3933058014696655 29.6982165816983610, -95.3932946347785133 29.6981089778195759, -95.3931780601756287 29.6977068906794841, -95.3929928222970602 29.6977541771878180, -95.3930873169846478 29.6980676264932946, -95.3932743746374570 29.6981249406449663, -95.3929512584706316 29.6989526513922222, -95.3919850280655197 29.7014358632108646, -95.3918950918929056 29.7014169320765724, -95.3916928317890296 29.7019232352846423, -95.3915424614970959 29.7022988712928289, -95.3901530441668939 29.7058519502930061, -95.3899656322116698 29.7059156823562418, -95.3897628748670883 29.7059900058266777, -95.3896062677805787 29.7060738276384946, -95.3893941800512266 29.7061891695242046, -95.3892150365492455 29.7062641292949436, -95.3890502563035199 29.7063339729630940, -95.3888717930715586 29.7063896908080736, -95.3886925428988945 29.7064453871994978, -95.3885376849411983 29.7064797304524149, -95.3883284158984139 29.7065153575050189, -95.3881046767627794 29.7065368368267357, -95.3878809284696132 29.7065363048447537, -95.3876046356120924 29.7065288525102424, -95.3873060894974714 29.7064822806001452, -95.3869851943158409 29.7063993367575350, -95.3865967896568065 29.7062870572919202, -95.3861785624983156 29.7061492099008184, -95.3857375009733488 29.7059887337478798, -95.3854573290902152 29.7058683664514618, -95.3848703124799471 29.7056021479768511))',
n_i=0, ext_ring_cs=False, n_p=264, area=0.00129917360654, centroid=(-95.403569179437341, 29.681772571690402),
),
)
# MultiPolygons
multipolygons = (TestGeom('MULTIPOLYGON (((100 20, 180 20, 180 100, 100 100, 100 20)), ((20 100, 100 100, 100 180, 20 180, 20 100)), ((100 180, 180 180, 180 260, 100 260, 100 180)), ((180 100, 260 100, 260 180, 180 180, 180 100)))', valid=True, num_geom=4, n_p=20),
TestGeom('MULTIPOLYGON (((60 300, 320 220, 260 60, 60 100, 60 300)), ((60 300, 320 220, 260 60, 60 100, 60 300)))', valid=False),
TestGeom('MULTIPOLYGON (((180 60, 240 160, 300 60, 180 60)), ((80 80, 180 60, 160 140, 240 160, 360 140, 300 60, 420 100, 320 280, 120 260, 80 80)))', valid=True, num_geom=2, n_p=14),
)
# Points
points = (TestGeom('POINT (5 23)', x=5.0, y=23.0, centroid=(5.0, 23.0)),
TestGeom('POINT (-95.338492 29.723893)', x=-95.338492, y=29.723893, centroid=(-95.338492, 29.723893)),
TestGeom('POINT(1.234 5.678)', x=1.234, y=5.678, centroid=(1.234, 5.678)),
TestGeom('POINT(4.321 8.765)', x=4.321, y=8.765, centroid=(4.321, 8.765)),
TestGeom('POINT(10 10)', x=10, y=10, centroid=(10., 10.)),
TestGeom('POINT (5 23 8)', x=5.0, y=23.0, z=8.0, centroid=(5.0, 23.0)),
)
# MultiPoints
multipoints = (TestGeom('MULTIPOINT(10 10, 20 20 )', n_p=2, points=((10., 10.), (20., 20.)), centroid=(15., 15.)),
TestGeom('MULTIPOINT(10 10, 20 20, 10 20, 20 10)',
n_p=4, points=((10., 10.), (20., 20.), (10., 20.), (20., 10.)),
centroid=(15., 15.)),
)
# LineStrings
linestrings = (TestGeom('LINESTRING (60 180, 120 100, 180 180)', n_p=3, centroid=(120, 140), tup=((60, 180), (120, 100), (180, 180))),
TestGeom('LINESTRING (0 0, 5 5, 10 5, 10 10)', n_p=4, centroid=(6.1611652351681556, 4.6966991411008934), tup=((0, 0), (5, 5), (10, 5), (10, 10)),),
)
# Linear Rings
linearrings = (TestGeom('LINEARRING (649899.3065171393100172 4176512.3807915160432458, 649902.7294133581453934 4176512.7834989596158266, 649906.5550170192727819 4176514.3942507002502680, 649910.5820134161040187 4176516.0050024418160319, 649914.4076170771149918 4176518.0184616246260703, 649917.2264131171396002 4176519.4278986593708396, 649920.0452871860470623 4176521.6427505780011415, 649922.0587463703704998 4176522.8507948759943247, 649924.2735982896992937 4176524.4616246484220028, 649926.2870574744883925 4176525.4683542405255139, 649927.8978092158213258 4176526.8777912775985897, 649929.3072462501004338 4176528.0858355751261115, 649930.1126611357321963 4176529.4952726080082357, 649927.4951798024121672 4176506.9444361114874482, 649899.3065171393100172 4176512.3807915160432458)', n_p=15),
)
# MultiLineStrings
multilinestrings = (TestGeom('MULTILINESTRING ((0 0, 0 100), (100 0, 100 100))', n_p=4, centroid=(50, 50), tup=(((0, 0), (0, 100)), ((100, 0), (100, 100)))),
TestGeom('MULTILINESTRING ((20 20, 60 60), (20 -20, 60 -60), (-20 -20, -60 -60), (-20 20, -60 60), (-80 0, 0 80, 80 0, 0 -80, -80 0), (-40 20, -40 -20), (-20 40, 20 40), (40 20, 40 -20), (20 -40, -20 -40))',
n_p=21, centroid=(0, 0), tup=(((20., 20.), (60., 60.)), ((20., -20.), (60., -60.)), ((-20., -20.), (-60., -60.)), ((-20., 20.), (-60., 60.)), ((-80., 0.), (0., 80.), (80., 0.), (0., -80.), (-80., 0.)), ((-40., 20.), (-40., -20.)), ((-20., 40.), (20., 40.)), ((40., 20.), (40., -20.)), ((20., -40.), (-20., -40.))))
)
# ====================================================
# Topology Operations
topology_geoms = ( (TestGeom('POLYGON ((-5.0 0.0, -5.0 10.0, 5.0 10.0, 5.0 0.0, -5.0 0.0))'),
TestGeom('POLYGON ((0.0 -5.0, 0.0 5.0, 10.0 5.0, 10.0 -5.0, 0.0 -5.0))')
),
(TestGeom('POLYGON ((2 0, 18 0, 18 15, 2 15, 2 0))'),
TestGeom('POLYGON ((10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
),
)
intersect_geoms = ( TestGeom('POLYGON ((5 5,5 0,0 0,0 5,5 5))'),
TestGeom('POLYGON ((10 1, 9 3, 7 4, 5 6, 4 8, 4 10, 5 12, 7 13, 9 12, 10 10, 11 12, 13 13, 15 12, 16 10, 16 8, 15 6, 13 4, 11 3, 10 1))'),
)
union_geoms = ( TestGeom('POLYGON ((-5 0,-5 10,5 10,5 5,10 5,10 -5,0 -5,0 0,-5 0))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0))'),
)
diff_geoms = ( TestGeom('POLYGON ((-5 0,-5 10,5 10,5 5,0 5,0 0,-5 0))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0), (10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
)
sdiff_geoms = ( TestGeom('MULTIPOLYGON (((-5 0,-5 10,5 10,5 5,0 5,0 0,-5 0)),((0 0,5 0,5 5,10 5,10 -5,0 -5,0 0)))'),
TestGeom('POLYGON ((2 0, 2 15, 18 15, 18 0, 2 0), (10 1, 11 3, 13 4, 15 6, 16 8, 16 10, 15 12, 13 13, 11 12, 10 10, 9 12, 7 13, 5 12, 4 10, 4 8, 5 6, 7 4, 9 3, 10 1))'),
)
relate_geoms = ( (TestGeom('MULTIPOINT(80 70, 20 20, 200 170, 140 120)'),
TestGeom('MULTIPOINT(80 170, 140 120, 200 80, 80 70)'),
'0F0FFF0F2', True,),
(TestGeom('POINT(20 20)'), TestGeom('POINT(40 60)'),
'FF0FFF0F2', True,),
(TestGeom('POINT(110 110)'), TestGeom('LINESTRING(200 200, 110 110, 200 20, 20 20, 110 110, 20 200, 200 200)'),
'0FFFFF1F2', True,),
(TestGeom('MULTILINESTRING((20 20, 90 20, 170 20), (90 20, 90 80, 90 140))'),
TestGeom('MULTILINESTRING((90 20, 170 100, 170 140), (130 140, 130 60, 90 20, 20 90, 90 20))'),
'FF10F0102', True,),
)
buffer_geoms = ( (TestGeom('POINT(0 0)'),
TestGeom('POLYGON ((5 0,4.903926402016153 -0.97545161008064,4.619397662556435 -1.913417161825447,4.157348061512728 -2.777851165098009,3.53553390593274 -3.535533905932735,2.777851165098015 -4.157348061512724,1.913417161825454 -4.619397662556431,0.975451610080648 -4.903926402016151,0.000000000000008 -5.0,-0.975451610080632 -4.903926402016154,-1.913417161825439 -4.619397662556437,-2.777851165098002 -4.157348061512732,-3.53553390593273 -3.535533905932746,-4.157348061512719 -2.777851165098022,-4.619397662556429 -1.913417161825462,-4.903926402016149 -0.975451610080656,-5.0 -0.000000000000016,-4.903926402016156 0.975451610080624,-4.619397662556441 1.913417161825432,-4.157348061512737 2.777851165097995,-3.535533905932752 3.535533905932723,-2.777851165098029 4.157348061512714,-1.913417161825468 4.619397662556426,-0.975451610080661 4.903926402016149,-0.000000000000019 5.0,0.975451610080624 4.903926402016156,1.913417161825434 4.61939766255644,2.777851165097998 4.157348061512735,3.535533905932727 3.535533905932748,4.157348061512719 2.777851165098022,4.619397662556429 1.91341716182546,4.90392640201615 0.975451610080652,5 0))'),
5.0, 8),
(TestGeom('POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'),
TestGeom('POLYGON ((-2 0,-2 10,-1.961570560806461 10.390180644032258,-1.847759065022573 10.765366864730179,-1.662939224605091 11.111140466039204,-1.414213562373095 11.414213562373096,-1.111140466039204 11.662939224605092,-0.765366864730179 11.847759065022574,-0.390180644032256 11.961570560806461,0 12,10 12,10.390180644032256 11.961570560806461,10.765366864730179 11.847759065022574,11.111140466039204 11.66293922460509,11.414213562373096 11.414213562373096,11.66293922460509 11.111140466039204,11.847759065022574 10.765366864730179,11.961570560806461 10.390180644032256,12 10,12 0,11.961570560806461 -0.390180644032256,11.847759065022574 -0.76536686473018,11.66293922460509 -1.111140466039204,11.414213562373096 -1.414213562373095,11.111140466039204 -1.66293922460509,10.765366864730179 -1.847759065022573,10.390180644032256 -1.961570560806461,10 -2,0.0 -2.0,-0.390180644032255 -1.961570560806461,-0.765366864730177 -1.847759065022575,-1.1111404660392 -1.662939224605093,-1.41421356237309 -1.4142135623731,-1.662939224605086 -1.111140466039211,-1.84775906502257 -0.765366864730189,-1.961570560806459 -0.390180644032268,-2 0))'),
2.0, 8),
)
json_geoms = (TestGeom('POINT(100 0)', json='{ "type": "Point", "coordinates": [ 100.000000, 0.000000 ] }'),
TestGeom('POLYGON((0 0, -10 0, -10 -10, 0 -10, 0 0))', json='{ "type": "Polygon", "coordinates": [ [ [ 0.000000, 0.000000 ], [ -10.000000, 0.000000 ], [ -10.000000, -10.000000 ], [ 0.000000, -10.000000 ], [ 0.000000, 0.000000 ] ] ] }'),
TestGeom('MULTIPOLYGON(((102 2, 103 2, 103 3, 102 3, 102 2)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))', json='{ "type": "MultiPolygon", "coordinates": [ [ [ [ 102.000000, 2.000000 ], [ 103.000000, 2.000000 ], [ 103.000000, 3.000000 ], [ 102.000000, 3.000000 ], [ 102.000000, 2.000000 ] ] ], [ [ [ 100.000000, 0.000000 ], [ 101.000000, 0.000000 ], [ 101.000000, 1.000000 ], [ 100.000000, 1.000000 ], [ 100.000000, 0.000000 ] ], [ [ 100.200000, 0.200000 ], [ 100.800000, 0.200000 ], [ 100.800000, 0.800000 ], [ 100.200000, 0.800000 ], [ 100.200000, 0.200000 ] ] ] ] }'),
TestGeom('GEOMETRYCOLLECTION(POINT(100 0),LINESTRING(101.0 0.0, 102.0 1.0))',
json='{ "type": "GeometryCollection", "geometries": [ { "type": "Point", "coordinates": [ 100.000000, 0.000000 ] }, { "type": "LineString", "coordinates": [ [ 101.000000, 0.000000 ], [ 102.000000, 1.000000 ] ] } ] }',
),
TestGeom('MULTILINESTRING((100.0 0.0, 101.0 1.0),(102.0 2.0, 103.0 3.0))',
json="""
{ "type": "MultiLineString",
"coordinates": [
[ [100.0, 0.0], [101.0, 1.0] ],
[ [102.0, 2.0], [103.0, 3.0] ]
]
}
""",
not_equal=True,
),
)
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
| apache-2.0 |
fx19880617/helix | contributors/py-helix-admin/helix/zkfunctions.py | 7 | 22919 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""library to handle helix commands"""
# XXX: Explore using zookeeper transactions for some of the write operations into zookeeper.
# Currently, it looks like ensure_path and create with the make_path argument are not supported in transactions so it isn't usable out of the box.
import json
from ordereddict import OrderedDict
from kazoo.client import KazooClient
from statemodeldefs import STATE_DEF_MAP
from helixexceptions import HelixException
from helixexceptions import HelixAlreadyExistsException
from helixexceptions import HelixDoesNotExistException
from kazoo.exceptions import NodeExistsError
RESOURCE_MODES = ["FULL_AUTO", "CUSTOMIZED", "SEMI_AUTO", "USER_DEFINED"]
IDEAL_STATE_PATH = "/{clusterName}/IDEALSTATES"
RESOURCE_IDEAL_STATE_PATH = "/{clusterName}/IDEALSTATES/{resourceName}"
EXTERNAL_VIEW_STATE_PATH = "/{clusterName}/EXTERNALVIEW/{resourceName}"
INSTANCE_PATH = "/{clusterName}/INSTANCES"
PARTICIPANT_CONFIG_PATH = "/{clusterName}/CONFIGS/PARTICIPANT/{instanceName}"
PARTICIPANTS_CONFIG_PATH = "/{clusterName}/CONFIGS/PARTICIPANT"
CLUSTER_CONFIG_PATH = "/{clusterName}/CONFIGS/CLUSTER/{clusterName}"
CONFIG_PATH = "/{clusterName}/CONFIGS/{configName}/{entityName}"
STATE_MODEL_DEF_PATH = "/{clusterName}/STATEMODELDEFS/{stateModelName}"
LIVE_INSTANCE_PATH = "/{clusterName}/LIVEINSTANCES/{instanceName}"
HELIX_ZOOKEEPER_PATHS = {
"cluster": [
"/{clusterName}/CONFIGS",
"/{clusterName}/CONFIGS/RESOURCE",
"/{clusterName}/CONFIGS/CLUSTER",
PARTICIPANTS_CONFIG_PATH,
"/{clusterName}/LIVEINSTANCES",
INSTANCE_PATH,
IDEAL_STATE_PATH,
#"/{clusterName}/RESOURCEASSIGNMENTS",
"/{clusterName}/EXTERNALVIEW",
"/{clusterName}/STATEMODELDEFS",
"/{clusterName}/CONTROLLER",
"/{clusterName}/CONTROLLER/HISTORY",
"/{clusterName}/CONTROLLER/ERRORS",
"/{clusterName}/CONTROLLER/MESSAGES",
"/{clusterName}/CONTROLLER/STATUSUPDATES",
"/{clusterName}/PROPERTYSTORE",
],
"resource": [
RESOURCE_IDEAL_STATE_PATH,
"/{clusterName}/RESOURCEASSIGNMENTS/{resourceName}",
EXTERNAL_VIEW_STATE_PATH
],
"instance": [
#"/{clusterName}/LIVEINSTANCES/{instanceName}",
"/{clusterName}/INSTANCES/{instanceName}",
"/{clusterName}/INSTANCES/{instanceName}/CURRENTSTATES",
"/{clusterName}/INSTANCES/{instanceName}/ERRORS",
"/{clusterName}/INSTANCES/{instanceName}/STATUSUPDATES",
"/{clusterName}/INSTANCES/{instanceName}/MESSAGES"
],
"statemodel": [
STATE_MODEL_DEF_PATH
]
}
CLUSTER_CONFIG_TEMPLATE = OrderedDict()
CLUSTER_CONFIG_TEMPLATE["id"] = "{clusterName}"
CLUSTER_CONFIG_TEMPLATE["mapFields"] = {}
CLUSTER_CONFIG_TEMPLATE["listFields"] = {}
CLUSTER_CONFIG_TEMPLATE["simpleFields"] = {"allowParticipantAutoJoin": "true"}
class ZookeeperHelixFunctions(object):
"""Zookeeper based client to manage helix clusters"""
def __init__(self, zookeeper_connect_string, zk_root):
"""Constructor."""
self.zk = KazooClient(hosts=zookeeper_connect_string)
self.zk.start()
self.zk_root = zk_root
def _list_path(self, path):
"""List a zookeeper path."""
return self.zk.get_children(path)
def _is_valid_cluster(self, cluster):
"""Validate cluster configuration."""
for path in HELIX_ZOOKEEPER_PATHS.get("cluster"):
full_path = self._build_path(path.format(clusterName=cluster))
if not self.zk.exists(full_path):
return False
return True
def _build_path(self, path):
"""Construct zookeeper path."""
return "".join([self.zk_root, path])
@classmethod
def _build_instance_entry(cls, instance, enabled="true"):
"""Create the data entry for an instance."""
host, port = instance.split(":")
instance_data = OrderedDict()
instance_data["id"] = "{host}_{port}".format(host=host, port=port)
instance_data["listFields"] = {}
instance_data["mapFields"] = {}
instance_data["simpleFields"] = OrderedDict()
instance_data["simpleFields"]["HELIX_ENABLED"] = enabled
instance_data["simpleFields"]["HELIX_HOST"] = host
instance_data["simpleFields"]["HELIX_PORT"] = port
return instance_data
def create_root(self):
"""Initialize zookeeper root"""
path = self._build_path("")
if not self.zk.exists(path):
self.zk.create(path)
return True
def get_clusters(self):
""" querys helix cluster for all clusters """
if self.zk.exists(self.zk_root):
return [ cluster for cluster in self._list_path(self.zk_root) if self._is_valid_cluster(cluster) ]
else:
return []
def get_resource_groups(self, cluster):
""" querys helix cluster for resources groups of the current cluster"""
return self._list_path(self._build_path(IDEAL_STATE_PATH.format(clusterName=cluster)))
def get_resource_tags(self, cluster):
"""returns a dict of resource tags for a cluster"""
resource_tags = {}
for resource in self.get_resource_groups(cluster):
resource_data, resource_meta = self._get_resource_group(cluster, resource)
tag = resource_data.get("INSTANCE_GROUP_TAG")
if tag:
resource_tags[tag] = [resource]
return resource_tags
def _get_resource_group(self, cluster, resource):
""" gets the ideal state of the specified resource group of the
current cluster"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{resource} is not a resource group of {cluster}".format(resource=resource, cluster=cluster))
data, stat = self.zk.get(self._build_path(RESOURCE_IDEAL_STATE_PATH.format(clusterName=cluster, resourceName=resource)))
return (json.loads(data), stat)
def get_resource_group(self, cluster, resource):
""" COMPAT: gets the ideal state of the specified resource group of the
current cluster"""
return self._get_resource_group(cluster, resource)[0]
def _get_ideal_state(self, cluster, resource):
""" gets the ideal state of the specified resource group of the
current cluster"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{0} is not a resource group of {1}".format(resource, cluster))
return self._get_resource_group(cluster, resource)["mapFields"]
def get_ideal_state(self, cluster, resource):
""" COMPAT: gets the ideal state of the specified resource group of the
current cluster"""
return self._get_ideal_state(cluster, resource)[0]
def _get_external_view(self, cluster, resource):
"""return the external view for a given cluster and resource"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{0} is not a resource group of {1}".format(resource, cluster))
data, stat = self.zk.get(self._build_path(EXTERNAL_VIEW_STATE_PATH.format(clusterName=cluster, resourceName=resource)))
return (json.loads(data)["mapFields"], stat)
def get_external_view(self, cluster, resource):
""" COMPAT: return the external view for a given cluster and resource"""
return self._get_external_view(cluster, resource)[0]
def get_instances(self, cluster):
"""get list of instances registered to the cluster"""
if not cluster:
raise HelixException("Cluster must be set before "
"calling this function")
instances = []
for instance in self._list_path(self._build_path(PARTICIPANTS_CONFIG_PATH.format(clusterName=cluster))):
instance_data = json.loads(self.zk.get(self._build_path(PARTICIPANT_CONFIG_PATH.format(clusterName=cluster, instanceName=instance)))[0])
if self.zk.exists(self._build_path(LIVE_INSTANCE_PATH.format(clusterName=cluster, instanceName=instance))):
instance_data["simpleFields"]["Alive"] = "true"
else:
instance_data["simpleFields"]["Alive"] = "false"
instances.append(instance_data)
return instances
def _get_instance_detail(self, cluster, instance):
"""get details of an instance"""
data, stat = self.zk.get(self._build_path(PARTICIPANT_CONFIG_PATH.format(clusterName=cluster, instanceName=instance)))
return (json.loads(data), stat)
def get_instance_detail(self, cluster, instance):
""" COMPAT: get details of an instance"""
return self._get_instance_detail(cluster, instance)[0]
def _get_config(self, cluster, config, entity):
"""get requested config"""
data, stat = self.zk.get(self._build_path(CONFIG_PATH.format(clusterName=cluster, configName=config, entityName=entity)))
return (json.loads(data), stat)
def get_config(self, cluster, config, entity):
""" COMPAT: get requested config"""
return self._get_config(cluster, config, entity)[0]
def add_cluster(self, cluster):
"""add a cluster to helix"""
if cluster in self.get_clusters():
raise HelixAlreadyExistsException(
"Cluster {0} already exists".format(cluster))
for path in HELIX_ZOOKEEPER_PATHS.get("cluster"):
self.zk.ensure_path(self._build_path(path.format(clusterName=cluster)))
data = CLUSTER_CONFIG_TEMPLATE
data["id"] = cluster
try:
self.zk.create(self._build_path(CLUSTER_CONFIG_PATH.format(clusterName=cluster)), json.dumps(data))
except NodeExistsError:
# Ignore existing cluster
pass
# Insert state defs if they don't exist
for state_def in STATE_DEF_MAP:
if not self.zk.exists(self._build_path(STATE_MODEL_DEF_PATH.format(clusterName=cluster, stateModelName=state_def))):
self.zk.create(self._build_path(STATE_MODEL_DEF_PATH.format(clusterName=cluster, stateModelName=state_def)), json.dumps(STATE_DEF_MAP[state_def]))
return True
def add_instance(self, cluster, instances, port):
"""add a list of instances to a cluster"""
if cluster not in self.get_clusters():
raise HelixDoesNotExistException(
"Cluster {0} does not exist".format(cluster))
if not isinstance(instances, list):
instances = [instances]
instances = ["{instance}:{port}".format(instance=instance, port=port) for instance in instances]
try:
newinstances = set(instances)
oldinstances = set(
[x["id"].replace('_', ':') for x in self.get_instances(cluster)])
instances = list(newinstances - oldinstances)
except HelixException:
# this will get thrown if instances is empty,
# which if we're just populating should happen
pass
if instances:
for instance in instances:
data = self._build_instance_entry(instance)
self.zk.create(self._build_path(PARTICIPANT_CONFIG_PATH.format(clusterName=cluster, instanceName=instance.replace(':', '_'))), json.dumps(data))
for path in HELIX_ZOOKEEPER_PATHS.get("instance"):
self.zk.ensure_path(self._build_path(path.format(clusterName=cluster, instanceName=instance.replace(':', '_'))))
return True
else:
raise HelixAlreadyExistsException(
"All instances given already exist in cluster")
def rebalance(self, cluster, resource, replicas, key=""):
"""rebalance the given resource group"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{0} is not a resource group of {1}".format(resource, cluster))
# TODO: key usage is currently not supported.
if not key == "":
raise NotImplementedError
resource_data, resource_meta = self._get_resource_group(cluster, resource)
resource_data["simpleFields"]["REPLICAS"] = replicas
self.zk.set(self._build_path(RESOURCE_IDEAL_STATE_PATH.format(clusterName=cluster, resourceName=resource)), json.dumps(resource_data))
return True
def activate_cluster(self, cluster, grand_cluster, enabled=True):
"""activate the cluster with the grand cluster"""
if grand_cluster not in self.get_clusters():
raise HelixException(
"grand cluster {0} does not exist".format(grand_cluster))
raise NotImplementedError
def deactivate_cluster(self, cluster, grand_cluster):
"""deactivate the cluster with the grand cluster"""
return self.activate_cluster(cluster, grand_cluster, enabled=False)
def add_resource(self, cluster, resource, partitions,
state_model_def, mode="", state_model_factory_name="DEFAULT"):
"""Add given resource group"""
if resource in self.get_resource_groups(cluster):
raise HelixAlreadyExistsException(
"ResourceGroup {0} already exists".format(resource))
data = {"id": resource,
"mapFields": {},
"listFields": {},
"simpleFields": {
"IDEAL_STATE_MODE": "AUTO",
"NUM_PARTITIONS": partitions,
"REBALANCE_MODE": mode,
"REPLICAS": "0",
"STATE_MODEL_DEF_REF": state_model_def,
"STATE_MODEL_FACTORY_NAME": state_model_factory_name
}
}
if mode:
if mode in RESOURCE_MODES:
data["mode"] = mode
else:
raise ValueError("Invalid mode ({mode})".format(mode=mode))
self.zk.create(self._build_path(RESOURCE_IDEAL_STATE_PATH.format(clusterName=cluster, resourceName=resource)), json.dumps(data))
return True
def enable_resource(self, cluster, resource, enabled=True):
"""enable or disable specified resource"""
raise NotImplementedError
def disable_resource(self, cluster, resource):
"""function for disabling resources"""
return self.enable_resource(cluster, resource, enabled=False)
def alter_ideal_state(self, cluster, resource, newstate):
"""alter ideal state"""
raise NotImplementedError
def enable_instance(self, cluster, instance, enabled=True):
"""enable instance within cluster"""
raise NotImplementedError
def disable_instance(self, cluster, instance):
"""wrapper for ease of use for disabling an instance"""
return self.enable_instance(cluster, instance, enabled=False)
def swap_instance(self, cluster, old, new):
"""swap instance"""
raise NotImplementedError
def enable_partition(self, cluster, resource, partition, instance,
enabled=True):
"""enable Partition """
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
raise NotImplementedError
def disable_partition(self, cluster, resource, partitions, instance):
"""disable Partition """
return self.enable_partition(cluster, resource, partitions, instance,
enabled=False)
def reset_partition(self, cluster, resource, partitions, instance):
"""reset partition"""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
raise NotImplementedError
def reset_resource(self, cluster, resource):
"""reset resource"""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
raise NotImplementedError
def reset_instance(self, cluster, instance):
"""reset instance"""
if instance not in self.get_instances(cluster):
raise HelixDoesNotExistException(
"Instance {0} does not exist".format(instance))
raise NotImplementedError
def add_instance_tag(self, cluster, instance, tag):
"""add tag to an instance"""
instance_data, instance_meta = self._get_instance_detail(cluster, instance)
instance_tags = instance_data.get("listFields").get("TAG_LIST", [])
if tag in instance_tags:
raise HelixAlreadyExistsException(
"Tag ({tag}) already exists for instance ({instance}).".format(tag=tag, instance=instance))
instance_tags.append(tag)
instance_data["listFields"]["TAG_LIST"] = instance_tags
# XXX: Apply some retry logic here
self.zk.set(self._build_path(PARTICIPANT_CONFIG_PATH.format(clusterName=cluster, instanceName=instance)), json.dumps(instance_data), version=instance_meta.version)
return True
def del_instance_tag(self, cluster, instance, tag):
"""remove tag from instance"""
if instance not in [x["id"] for x in self.get_instances(cluster)]:
raise HelixDoesNotExistException(
"Instance {0} does not exist.".format(instance))
def add_resource_tag(self, cluster, resource, tag):
"""add tag to resource group"""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
resource_data, resource_stat = self._get_resource_group(cluster, resource)
resource_data["simpleFields"]["INSTANCE_GROUP_TAG"] = tag
self.zk.set(self._build_path(RESOURCE_IDEAL_STATE_PATH.format(clusterName=cluster, resourceName=resource)), json.dumps(resource_data), version=resource_stat.version)
return True
def del_resource_tag(self, cluster, resource, tag):
"""Delete resource tag."""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
raise NotImplementedError
def get_instance_taginfo(self, cluster):
"""Get resource tag info."""
instance_tags = {}
for instance in self.get_instances(cluster):
list_fields = instance.get("listFields")
if "TAG_LIST" in list_fields:
for tag in list_fields.get("TAG_LIST"):
if tag in instance_tags:
instance_tags[tag].append(instance.get("id"))
else:
instance_tags[tag] = [instance.get("id")]
return instance_tags
def expand_cluster(self, cluster):
"""expand cluster"""
raise NotImplementedError
def expand_resource(self, cluster, resource):
"""expand resource"""
raise NotImplementedError
def add_resource_property(self, cluster, resource, properties):
"""Add resource property. Properties must be a dictionary of properties."""
raise NotImplementedError
def set_config(self, cluster, configs, participant=None, resource=None):
"""sets config in helix"""
raise NotImplementedError
def remove_config(self, cluster, configs, participant=None, resource=None):
"""sets config in helix"""
raise NotImplementedError
def get_zk_path(self, path):
"""get zookeeper path"""
return self.zk.get(self._build_path(path))
def del_zk_path(self, path):
"""delete zookeeper path"""
return self.zk.delete(self._build_path(path))
def add_state_model(self, cluster, newstate):
"""add state model"""
raise NotImplementedError
def del_instance(self, cluster, instance):
"""delete instance"""
if cluster not in self.get_clusters():
raise HelixDoesNotExistException(
"Cluster {0} does not exist.".format(cluster))
if instance not in [x["id"] for x in self.get_instances(cluster)]:
raise HelixDoesNotExistException(
"Instance {0} does not exist.".format(instance))
self.zk.delete(self._build_path(PARTICIPANT_CONFIG_PATH.format(clusterName=cluster, instanceName=instance.replace(':', '_'))))
# Reverse zookeeper structure for destruction.
for path in HELIX_ZOOKEEPER_PATHS.get("instance")[::-1]:
self.zk.delete(self._build_path(path.format(clusterName=cluster, instanceName=instance.replace(':', '_'))))
return True
def del_resource(self, cluster, resource):
"""delete specified resource from cluster"""
if cluster not in self.get_clusters():
raise HelixDoesNotExistException(
"Cluster {0} does not exist.".format(cluster))
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
self.zk.delete(self._build_path(RESOURCE_IDEAL_STATE_PATH.format(clusterName=cluster, resourceName=resource)))
return True
def del_cluster(self, cluster):
"""delete cluster"""
if cluster not in self.get_clusters():
raise HelixDoesNotExistException(
"Cluster {0} does not exist.".format(cluster))
self.zk.delete(self._build_path(CLUSTER_CONFIG_PATH.format(clusterName=cluster)))
for path in HELIX_ZOOKEEPER_PATHS.get("cluster")[::-1]:
self.zk.ensure_path(self._build_path(path.format(clusterName=cluster)))
return True
def send_message(self, cluster, path, **kwargs):
"""Send helix IPC message."""
raise NotImplementedError
| apache-2.0 |
rlucio/cinder-violin-driver-icehouse | cinder/volume/drivers/violin/vxg/vshare/snapshot.py | 1 | 23168 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.volume.drivers.violin.vxg.core.node import XGNode
from cinder.volume.drivers.violin.vxg.core.error import *
"""
Here's an example of how to extend the functionality in this module:
class SnapshotManager_3(SnapshotManager_2):
def __init__(self, basic):
super(SnapshotManager_3, self).__init__(basic)
def new_function(self, *args):
pass
"""
class SnapshotManager(object):
def __init__(self, basic):
self._basic = basic
def rollback_lun(self, container, lun, name):
"""Rollback a LUN to the specified snapshot.
Arguments:
container -- string
lun -- string
name -- string
Returns:
Action result as a dict.
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lun', 'string', lun))
nodes.append(XGNode('name', 'string', name))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapshot/rollback', nodes)
def rollback_lun_group(self, container, lungroup, name):
"""Rollback a LUN group to the specified snapshot.
Arguments:
container -- string
lungroup -- string
name -- string
Returns:
Action result as a dict.
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lungroup', 'string', lungroup))
nodes.append(XGNode('name', 'string', name))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapgroup/rollback', nodes)
def create_lun_group_snapshot(self, container, lungroup, name,
description=None, readwrite=None,
snap_protect=None):
"""Creates a LUN group snapshot.
Arguments:
container -- string
lungroup -- string
name -- string
description -- string (optional)
readwrite -- bool (optional)
snap_protect -- bool (optional)
Returns:
Action result as a dict.
"""
return self._snapgroup_create(container, lungroup, name, 'create',
description, readwrite, snap_protect)
def delete_lun_group_snapshot(self, container, lungroup, name):
"""Deletes a LUN group snapshot.
Arguments:
container -- string
lungroup -- string
name -- string
Returns:
Action result as a dict.
"""
return self._snapgroup_create(container, lungroup, name, 'delete',
None, None, None)
def _snapgroup_create(self, container, lungroup, name, action,
description, readwrite, snap_protect):
"""Internal work function for:
create_lun_group_snapshot
delete_lun_group_snapshot
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lungroup', 'string', lungroup))
nodes.append(XGNode('name', 'string', name))
nodes.append(XGNode('action', 'string', action))
if description is not None:
nodes.append(XGNode('description', 'string', description))
if readwrite is not None:
nodes.append(XGNode('readwrite', 'bool', readwrite))
if snap_protect is not None:
nodes.append(XGNode('snap_protect', 'bool', snap_protect))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapgroup/create', nodes)
def export_lun_group_snapshot(self, container, lungroup, name,
initiators=None, ports=None):
"""Export a LUN group snapshot.
Arguments:
container -- string
lungroup -- string
name -- string
initiators -- string (string or list)
ports -- string (string or list)
Returns:
Action result as a dict.
"""
return self._snapgroup_export(container, lungroup, name,
initiators, ports, False)
def unexport_lun_group_snapshot(self, container, lungroup, name,
initiators=None, ports=None):
"""Unexport a LUN group snapshot.
Arguments:
container -- string
lungroup -- string
name -- string
initiators -- string (string or list)
ports -- string (string or list)
Returns:
Action result as a dict.
"""
return self._snapgroup_export(container, lungroup, name,
initiators, ports, True)
def _snapgroup_export(self, container, lungroup, name,
initiators, ports, unexport):
"""Internal work function for:
export_lun_group_snapshot
unexport_lun_group_snapshot
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lungroup', 'string', lungroup))
nodes.append(XGNode('name', 'string', name))
nodes.extend(XGNode.as_node_list('initiators/{0}', 'string',
initiators))
nodes.extend(XGNode.as_node_list('ports/{0}', 'string', ports))
nodes.append(XGNode('unexport', 'bool', unexport))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapgroup/export', nodes)
def set_lun_group_snapshot(self, container, lungroup, name,
new_name=None, description=None,
readwrite=None, snap_protect=None):
"""Update a LUN group snapshot.
Arguments:
container -- string
lungroup -- string
name -- string
new_name -- string (optional)
description -- string (optional)
readwrite -- bool (optional)
snap_protect -- bool (optional)
Returns:
Action result as a dict.
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lungroup', 'string', lungroup))
nodes.append(XGNode('name', 'string', name))
if new_name is not None:
nodes.append(XGNode('new_name', 'string', new_name))
if description is not None:
nodes.append(XGNode('description', 'string', description))
if readwrite is not None:
nodes.append(XGNode('readwrite', 'bool', readwrite))
if snap_protect is not None:
nodes.append(XGNode('snap_protect', 'bool', snap_protect))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapgroup/update', nodes)
def create_lun_snapshot(self, container, lun, name, description=None,
readwrite=None, snap_protect=None):
"""Create a LUN snapshot.
Arguments:
container -- string
lun -- string
name -- string
description -- string (optional)
readwrite -- bool (optional)
snap_protect -- bool (optional)
Returns:
Action result as a dict.
"""
return self._snapshot_create(container, lun, name, 'create',
description, readwrite, snap_protect)
def delete_lun_snapshot(self, container, lun, name):
"""Delete a LUN snapshot.
Arguments:
container -- string
lun -- string
name -- string
Returns:
Action result as a dict.
"""
return self._snapshot_create(container, lun, name,
'delete', None, None, None)
def _snapshot_create(self, container, lun, name, action,
description, readwrite, snap_protect):
"""Internal work function for:
create_lun_snapshot
delete_lun_snapshot
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lun', 'string', lun))
nodes.append(XGNode('name', 'string', name))
nodes.append(XGNode('action', 'string', action))
if description is not None:
nodes.append(XGNode('description', 'string', description))
if readwrite is not None:
nodes.append(XGNode('readwrite', 'bool', readwrite))
if snap_protect is not None:
nodes.append(XGNode('snap_protect', 'bool', snap_protect))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapshot/create', nodes)
def set_lun_snapshot(self, container, lun, name, new_name=None,
description=None, readwrite=None, snap_protect=None,
port_A=None, port_B=None):
"""Update a LUN snapshot.
Arguments:
container -- string
lun -- string
name -- string
new_name -- string (optional)
description -- string (optional)
readwrite -- bool (optional)
snap_protect -- bool (optional)
port_A -- bool (optional)
port_B -- bool (optional)
Returns:
Action result as a dict.
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lun', 'string', lun))
nodes.append(XGNode('name', 'string', name))
if new_name is not None:
nodes.append(XGNode('new_name', 'string', new_name))
if description is not None:
nodes.append(XGNode('description', 'string', description))
if readwrite is not None:
nodes.append(XGNode('readwrite', 'bool', readwrite))
if snap_protect is not None:
nodes.append(XGNode('snap_protect', 'bool', snap_protect))
if port_A is not None:
nodes.append(XGNode('port_A', 'bool', port_A))
if port_B is not None:
nodes.append(XGNode('port_B', 'bool', port_B))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapshot/set', nodes)
def export_lun_snapshot(self, container, lun, names, initiators,
ports, lun_id):
"""Export a snapshot.
Arguments:
container -- string
lun -- string
names -- string (string or list)
initiators -- string (string or list)
ports -- string (string or list)
lun_id -- string
Returns:
Action result as a dict.
"""
return self._snapshot_export(container, lun, names, initiators,
ports, lun_id, False, None)
def unexport_lun_snapshot(self, container, lun, names, initiators,
ports, lun_id, force):
"""Unexport a snapshot.
Arguments:
container -- string
lun -- string
names -- string (string or list)
initiators -- string (string or list)
ports -- string (string or list)
lun_id -- string
force -- bool (optional)
Returns:
Action result as a dict.
"""
return self._snapshot_export(container, lun, names, initiators,
ports, lun_id, True, force)
def _snapshot_export(self, container, lun, names, initiators,
ports, lun_id, unexport, force):
"""Internal work function for:
export_lun_snapshot
unexport_lun_snapshot
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('lun', 'string', lun))
nodes.extend(XGNode.as_node_list('names/{0}', 'string', names))
nodes.extend(XGNode.as_node_list('initiators/{0}', 'string',
initiators))
nodes.extend(XGNode.as_node_list('ports/{0}', 'string', ports))
nodes.append(XGNode('lun_id', 'string', lun_id))
nodes.append(XGNode('unexport', 'bool', unexport))
if force is not None:
nodes.append(XGNode('force', 'bool', force))
return self._basic.perform_action('/vshare/actions/vdm' +
'/snapshot/export', nodes)
def create_snapshot_schedule(self, container, schedule_name, type,
is_lungroup_schedule, lun_or_group_name,
description=None, date=None, time=None,
start_date=None, start_time=None,
end_date=None, end_time=None,
periodicity=None, days_of_week=None,
monthly_interval=None, day_of_month=None,
readwrite=None, protected=None,
max_keep=None, disabled=None):
"""Create a snapshot schedule.
Arguments:
container -- string
schedule_name -- string
type -- string
is_lungroup_schedule -- bool
lun_or_group_name -- string
description -- string (optional)
date -- date (string or datetime; optional)
time -- time_sec (string or datetime; optional)
start_date -- date (string or datetime; optional)
start_time -- time_sec (string or datetime; optional)
end_date -- date (string or datetime; optional)
end_time -- time_sec (string or datetime; optional)
periodicity -- duration_sec (string; optional)
days_of_week -- string (optional)
monthly_interval -- uint32 (optional)
day_of_month -- int32 (optional)
readwrite -- bool (optional)
protected -- bool (optional)
max_keep -- int32 (optional)
disabled -- bool (optional)
Returns:
Action result as a dict.
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('schedule_name', 'string', schedule_name))
nodes.append(XGNode('type', 'string', type))
nodes.append(XGNode('is_lungroup_schedule', 'bool',
is_lungroup_schedule))
nodes.append(XGNode('lun_or_group_name', 'string', lun_or_group_name))
if description is not None:
nodes.append(XGNode('description', 'string', description))
if date is not None:
nodes.append(XGNode('date', 'date', date))
if time is not None:
nodes.append(XGNode('time', 'time_sec', time))
if start_date is not None:
nodes.append(XGNode('start_date', 'date', start_date))
if start_time is not None:
nodes.append(XGNode('start_time', 'time_sec', start_time))
if end_date is not None:
nodes.append(XGNode('end_date', 'date', end_date))
if end_time is not None:
nodes.append(XGNode('end_time', 'time_sec', end_time))
if periodicity is not None:
nodes.append(XGNode('periodicity', 'duration_sec', periodicity))
if days_of_week is not None:
nodes.append(XGNode('days_of_week', 'string', days_of_week))
if monthly_interval is not None:
nodes.append(XGNode('monthly_interval', 'uint32',
monthly_interval))
if day_of_month is not None:
nodes.append(XGNode('day_of_month', 'int32', day_of_month))
if readwrite is not None:
nodes.append(XGNode('readwrite', 'bool', readwrite))
if protected is not None:
nodes.append(XGNode('protected', 'bool', protected))
if max_keep is not None:
nodes.append(XGNode('max_keep', 'int32', max_keep))
if disabled is not None:
nodes.append(XGNode('disabled', 'bool', disabled))
return self._basic.perform_action('/vshare/actions/vdm' +
'/schedule/snapshot/create', nodes)
def modify_snapshot_schedule(self, container, schedule_name, type,
is_lungroup_schedule, lun_or_group_name,
new_schedule_name=None, description=None,
date=None, time=None,
start_date=None, start_time=None,
end_date=None, end_time=None,
periodicity=None, days_of_week=None,
monthly_interval=None, day_of_month=None,
readwrite=None, protect=None,
max_keep=None, enable=None):
"""Modify a snapshot schedule.
Arguments:
container -- string
schedule_name -- string
type -- string
is_lungroup_schedule -- bool
lun_or_group_name -- string
new_schedule_name -- string (optional)
description -- string (optional)
date -- date (string or datetime; optional)
time -- time_sec (string or datetime; optional)
start_date -- date (string or datetime; optional)
start_time -- time_sec (string or datetime; optional)
end_date -- date (string or datetime; optional)
end_time -- time_sec (string or datetime; optional)
periodicity -- duration_sec (string; optional)
days_of_week -- string (optional)
monthly_interval -- uint32 (optional)
day_of_month -- int32 (optional)
readwrite -- bool (optional)
protect -- bool (optional)
max_keep -- int32 (optional)
enable -- bool (optional)
Returns:
Action result as a dict.
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
nodes.append(XGNode('schedule_name', 'string', schedule_name))
nodes.append(XGNode('type', 'string', type))
nodes.append(XGNode('is_lungroup_schedule', 'bool',
is_lungroup_schedule))
nodes.append(XGNode('lun_or_group_name', 'string', lun_or_group_name))
if new_schedule_name is not None:
nodes.append(XGNode('new_schedule_name', 'string',
new_schedule_name))
if description is not None:
nodes.append(XGNode('description', 'string', description))
if date is not None:
nodes.append(XGNode('date', 'date', date))
if time is not None:
nodes.append(XGNode('time', 'time_sec', time))
if start_date is not None:
nodes.append(XGNode('start_date', 'date', start_date))
if start_time is not None:
nodes.append(XGNode('start_time', 'time_sec', start_time))
if end_date is not None:
nodes.append(XGNode('end_date', 'date', end_date))
if end_time is not None:
nodes.append(XGNode('end_time', 'time_sec', end_time))
if periodicity is not None:
nodes.append(XGNode('periodicity', 'duration_sec', periodicity))
if days_of_week is not None:
nodes.append(XGNode('days_of_week', 'string', days_of_week))
if monthly_interval is not None:
nodes.append(XGNode('monthly_interval', 'uint32',
monthly_interval))
if day_of_month is not None:
nodes.append(XGNode('day_of_month', 'int32', day_of_month))
if readwrite is not None:
nodes.append(XGNode('readwrite', 'bool', readwrite))
if protect is not None:
nodes.append(XGNode('protect', 'bool', protect))
if max_keep is not None:
nodes.append(XGNode('max_keep', 'int32', max_keep))
if enable is not None:
nodes.append(XGNode('enable', 'bool', enable))
return self._basic.perform_action('/vshare/actions/vdm' +
'/schedule/snapshot/modify', nodes)
def delete_snapshot_schedule(self, container, lun=None, lungroup=None,
schedule_name=None, delete_snapshots=None):
"""Delete a snapshot schedule.
Arguments:
container -- string
lun -- string (optional)
lungroup -- string (optional)
schedule_name -- string (optional)
delete_snapshots -- bool (optional)
Returns:
Action result as a dict.
"""
nodes = []
nodes.append(XGNode('container', 'string', container))
if lun is not None:
nodes.append(XGNode('lun', 'string', lun))
if lungroup is not None:
nodes.append(XGNode('lungroup', 'string', lungroup))
if schedule_name is not None:
nodes.append(XGNode('schedule_name', 'string', schedule_name))
if delete_snapshots is not None:
nodes.append(XGNode('delete_snapshots', 'bool', delete_snapshots))
return self._basic.perform_action('/vshare/actions/vdm' +
'/schedule/snapshot/delete', nodes)
| apache-2.0 |
huggingface/pytorch-transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | 1 | 44192 | # coding=utf-8
# Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch SqueezeBert model. """
import math
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_squeezebert import SqueezeBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "squeezebert/squeezebert-uncased"
_CONFIG_FOR_DOC = "SqueezeBertConfig"
_TOKENIZER_FOR_DOC = "SqueezeBertTokenizer"
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"squeezebert/squeezebert-uncased",
"squeezebert/squeezebert-mnli",
"squeezebert/squeezebert-mnli-headless",
]
class SqueezeBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MatMulWrapper(torch.nn.Module):
"""
Wrapper for torch.matmul(). This makes flop-counting easier to implement. Note that if you directly call
torch.matmul() in your code, the flop counter will typically ignore the flops of the matmul.
"""
def __init__(self):
super().__init__()
def forward(self, mat1, mat2):
"""
:param inputs: two torch tensors :return: matmul of these tensors
Here are the typical dimensions found in BERT (the B is optional) mat1.shape: [B, <optional extra dims>, M, K]
mat2.shape: [B, <optional extra dims>, K, N] output shape: [B, <optional extra dims>, M, N]
"""
return torch.matmul(mat1, mat2)
class SqueezeBertLayerNorm(nn.LayerNorm):
"""
This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
N = batch C = channels W = sequence length
"""
def __init__(self, hidden_size, eps=1e-12):
nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps) # instantiates self.{weight, bias, eps}
def forward(self, x):
x = x.permute(0, 2, 1)
x = nn.LayerNorm.forward(self, x)
return x.permute(0, 2, 1)
class ConvDropoutLayerNorm(nn.Module):
"""
ConvDropoutLayerNorm: Conv, Dropout, LayerNorm
"""
def __init__(self, cin, cout, groups, dropout_prob):
super().__init__()
self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
self.layernorm = SqueezeBertLayerNorm(cout)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, hidden_states, input_tensor):
x = self.conv1d(hidden_states)
x = self.dropout(x)
x = x + input_tensor
x = self.layernorm(x)
return x
class ConvActivation(nn.Module):
"""
ConvActivation: Conv, Activation
"""
def __init__(self, cin, cout, groups, act):
super().__init__()
self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
self.act = ACT2FN[act]
def forward(self, x):
output = self.conv1d(x)
return self.act(output)
class SqueezeBertSelfAttention(nn.Module):
def __init__(self, config, cin, q_groups=1, k_groups=1, v_groups=1):
"""
config = used for some things; ignored for others (work in progress...) cin = input channels = output channels
groups = number of groups to use in conv1d layers
"""
super().__init__()
if cin % config.num_attention_heads != 0:
raise ValueError(
f"cin ({cin}) is not a multiple of the number of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(cin / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=q_groups)
self.key = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=k_groups)
self.value = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=v_groups)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
self.matmul_qk = MatMulWrapper()
self.matmul_qkv = MatMulWrapper()
def transpose_for_scores(self, x):
"""
- input: [N, C, W]
- output: [N, C1, W, C2] where C1 is the head index, and C2 is one head's contents
"""
new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
x = x.view(*new_x_shape)
return x.permute(0, 1, 3, 2) # [N, C1, C2, W] --> [N, C1, W, C2]
def transpose_key_for_scores(self, x):
"""
- input: [N, C, W]
- output: [N, C1, C2, W] where C1 is the head index, and C2 is one head's contents
"""
new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
x = x.view(*new_x_shape)
# no `permute` needed
return x
def transpose_output(self, x):
"""
- input: [N, C1, W, C2]
- output: [N, C, W]
"""
x = x.permute(0, 1, 3, 2).contiguous() # [N, C1, C2, W]
new_x_shape = (x.size()[0], self.all_head_size, x.size()[3]) # [N, C, W]
x = x.view(*new_x_shape)
return x
def forward(self, hidden_states, attention_mask, output_attentions):
"""
expects hidden_states in [N, C, W] data layout.
The attention_mask data layout is [N, W], and it does not need to be transposed.
"""
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_score = self.matmul_qk(query_layer, key_layer)
attention_score = attention_score / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_score = attention_score + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = self.softmax(attention_score)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = self.matmul_qkv(attention_probs, value_layer)
context_layer = self.transpose_output(context_layer)
result = {"context_layer": context_layer}
if output_attentions:
result["attention_score"] = attention_score
return result
class SqueezeBertModule(nn.Module):
def __init__(self, config):
"""
- hidden_size = input chans = output chans for Q, K, V (they are all the same ... for now) = output chans for
the module
- intermediate_size = output chans for intermediate layer
- groups = number of groups for all layers in the BertModule. (eventually we could change the interface to
allow different groups for different layers)
"""
super().__init__()
c0 = config.hidden_size
c1 = config.hidden_size
c2 = config.intermediate_size
c3 = config.hidden_size
self.attention = SqueezeBertSelfAttention(
config=config, cin=c0, q_groups=config.q_groups, k_groups=config.k_groups, v_groups=config.v_groups
)
self.post_attention = ConvDropoutLayerNorm(
cin=c0, cout=c1, groups=config.post_attention_groups, dropout_prob=config.hidden_dropout_prob
)
self.intermediate = ConvActivation(cin=c1, cout=c2, groups=config.intermediate_groups, act=config.hidden_act)
self.output = ConvDropoutLayerNorm(
cin=c2, cout=c3, groups=config.output_groups, dropout_prob=config.hidden_dropout_prob
)
def forward(self, hidden_states, attention_mask, output_attentions):
att = self.attention(hidden_states, attention_mask, output_attentions)
attention_output = att["context_layer"]
post_attention_output = self.post_attention(attention_output, hidden_states)
intermediate_output = self.intermediate(post_attention_output)
layer_output = self.output(intermediate_output, post_attention_output)
output_dict = {"feature_map": layer_output}
if output_attentions:
output_dict["attention_score"] = att["attention_score"]
return output_dict
class SqueezeBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
assert config.embedding_size == config.hidden_size, (
"If you want embedding_size != intermediate hidden_size,"
"please insert a Conv1d layer to adjust the number of channels "
"before the first SqueezeBertModule."
)
self.layers = nn.ModuleList(SqueezeBertModule(config) for _ in range(config.num_hidden_layers))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
if head_mask is None:
head_mask_is_all_none = True
elif head_mask.count(None) == len(head_mask):
head_mask_is_all_none = True
else:
head_mask_is_all_none = False
assert head_mask_is_all_none is True, "head_mask is not yet supported in the SqueezeBert implementation."
# [batch_size, sequence_length, hidden_size] --> [batch_size, hidden_size, sequence_length]
hidden_states = hidden_states.permute(0, 2, 1)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for layer in self.layers:
if output_hidden_states:
hidden_states = hidden_states.permute(0, 2, 1)
all_hidden_states += (hidden_states,)
hidden_states = hidden_states.permute(0, 2, 1)
layer_output = layer.forward(hidden_states, attention_mask, output_attentions)
hidden_states = layer_output["feature_map"]
if output_attentions:
all_attentions += (layer_output["attention_score"],)
# [batch_size, hidden_size, sequence_length] --> [batch_size, sequence_length, hidden_size]
hidden_states = hidden_states.permute(0, 2, 1)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class SqueezeBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class SqueezeBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class SqueezeBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = SqueezeBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class SqueezeBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = SqueezeBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class SqueezeBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = SqueezeBertConfig
base_model_prefix = "transformer"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv1d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, SqueezeBertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
SQUEEZEBERT_START_DOCSTRING = r"""
The SqueezeBERT model was proposed in `SqueezeBERT: What can computer vision teach NLP about efficient neural
networks? <https://arxiv.org/abs/2006.11316>`__ by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W.
Keutzer
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the
`squeezebert/squeezebert-mnli-headless` checkpoint as a starting point.
Parameters:
config (:class:`~transformers.SqueezeBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
Hierarchy::
Internal class hierarchy:
SqueezeBertModel
SqueezeBertEncoder
SqueezeBertModule
SqueezeBertSelfAttention
ConvActivation
ConvDropoutLayerNorm
Data layouts::
Input data is in [batch, sequence_length, hidden_size] format.
Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if :obj:`output_hidden_states
== True`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format.
The final output of the encoder is in [batch, sequence_length, hidden_size] format.
"""
SQUEEZEBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.SqueezeBertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top.",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertModel(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = SqueezeBertEmbeddings(config)
self.encoder = SqueezeBertEncoder(config)
self.pooler = SqueezeBertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""SqueezeBERT Model with a `language modeling` head on top. """, SQUEEZEBERT_START_DOCSTRING)
class SqueezeBertForMaskedLM(SqueezeBertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.transformer = SqueezeBertModel(config)
self.cls = SqueezeBertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForSequenceClassification(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForMultipleChoice(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
SQUEEZEBERT_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForTokenClassification(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
SQUEEZEBERT_START_DOCSTRING,
)
class SqueezeBertForQuestionAnswering(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = SqueezeBertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| apache-2.0 |
tklaus/ansible | lib/ansible/playbook/included_file.py | 34 | 5081 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.template import Templar
class IncludedFile:
def __init__(self, filename, args, task):
self._filename = filename
self._args = args
self._task = task
self._hosts = []
def add_host(self, host):
if host not in self._hosts:
self._hosts.append(host)
def __eq__(self, other):
return other._filename == self._filename and other._args == self._args
def __repr__(self):
return "%s (%s): %s" % (self._filename, self._args, self._hosts)
@staticmethod
def process_include_results(results, tqm, iterator, loader, variable_manager):
included_files = []
for res in results:
if res._host in tqm._failed_hosts:
raise AnsibleError("host is failed, not including files")
if res._task.action == 'include':
if res._task.loop:
if 'results' not in res._result:
continue
include_results = res._result['results']
else:
include_results = [ res._result ]
for include_result in include_results:
# if the task result was skipped or failed, continue
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
continue
original_task = iterator.get_original_task(res._host, res._task)
task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task)
templar = Templar(loader=loader, variables=task_vars)
include_variables = include_result.get('include_variables', dict())
if 'item' in include_result:
task_vars['item'] = include_variables['item'] = include_result['item']
if original_task:
if original_task._task_include:
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = original_task._task_include
while parent_include is not None:
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
include_target = templar.template(include_result['include'])
if original_task._role:
new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir)
include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target)
if os.path.exists(include_file):
break
else:
parent_include = parent_include._task_include
elif original_task._role:
include_target = templar.template(include_result['include'])
include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target)
else:
include_file = loader.path_dwim(res._task.args.get('_raw_params'))
else:
include_file = loader.path_dwim(res._task.args.get('_raw_params'))
include_file = templar.template(include_file)
inc_file = IncludedFile(include_file, include_variables, original_task)
try:
pos = included_files.index(inc_file)
inc_file = included_files[pos]
except ValueError:
included_files.append(inc_file)
inc_file.add_host(res._host)
return included_files
| gpl-3.0 |
CatMe0w/shadowsocks | shadowsocks/obfsplugin/http_simple.py | 11 | 13800 | #!/usr/bin/env python
#
# Copyright 2015-2015 breakwa11
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import datetime
import random
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord
def create_http_obfs(method):
return http_simple(method)
def create_http2_obfs(method):
return http2_simple(method)
def create_tls_obfs(method):
return tls_simple(method)
def create_random_head_obfs(method):
return random_head(method)
obfs_map = {
'http_simple': (create_http_obfs,),
'http_simple_compatible': (create_http_obfs,),
'http2_simple': (create_http2_obfs,),
'http2_simple_compatible': (create_http2_obfs,),
'tls_simple': (create_tls_obfs,),
'tls_simple_compatible': (create_tls_obfs,),
'random_head': (create_random_head_obfs,),
'random_head_compatible': (create_random_head_obfs,),
}
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class http_simple(plain.plain):
def __init__(self, method):
self.method = method
self.has_sent_header = False
self.has_recv_header = False
self.host = None
self.port = 0
self.recv_buffer = b''
self.user_agent = [b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0",
b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/44.0",
b"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
b"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Ubuntu/11.10 Chromium/27.0.1453.93 Chrome/27.0.1453.93 Safari/537.36",
b"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0",
b"Mozilla/5.0 (compatible; WOW64; MSIE 10.0; Windows NT 6.2)",
b"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27",
b"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C)",
b"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko",
b"Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/BuildID) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36",
b"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3",
b"Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3"]
def encode_head(self, buf):
ret = b''
for ch in buf:
ret += '%' + binascii.hexlify(ch)
return ret
def client_encode(self, buf):
if self.has_sent_header:
return buf
if len(buf) > 64:
headlen = random.randint(1, 64)
else:
headlen = len(buf)
headdata = buf[:headlen]
buf = buf[headlen:]
port = b''
if self.server_info.port != 80:
port = b':' + common.to_bytes(str(self.server_info.port))
http_head = b"GET /" + self.encode_head(headdata) + b" HTTP/1.1\r\n"
http_head += b"Host: " + (self.server_info.param or self.server_info.host) + port + b"\r\n"
http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n"
http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nDNT: 1\r\nConnection: keep-alive\r\n\r\n"
self.has_sent_header = True
return http_head + buf
def client_decode(self, buf):
if self.has_recv_header:
return (buf, False)
pos = buf.find(b'\r\n\r\n')
if pos >= 0:
self.has_recv_header = True
return (buf[pos + 4:], False)
else:
return (b'', False)
def server_encode(self, buf):
if self.has_sent_header:
return buf
header = b'HTTP/1.1 200 OK\r\nServer: openresty\r\nDate: '
header += to_bytes(datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT'))
header += b'\r\nContent-Type: text/plain; charset=utf-8\r\nTransfer-Encoding: chunked\r\nConnection: keep-alive\r\nKeep-Alive: timeout=20\r\nVary: Accept-Encoding\r\nContent-Encoding: gzip\r\n\r\n'
self.has_sent_header = True
return header + buf
def get_data_from_http_header(self, buf):
ret_buf = b''
lines = buf.split(b'\r\n')
if lines and len(lines) > 4:
hex_items = lines[0].split(b'%')
if hex_items and len(hex_items) > 1:
for index in range(1, len(hex_items)):
if len(hex_items[index]) != 2:
ret_buf += binascii.unhexlify(hex_items[index][:2])
break
ret_buf += binascii.unhexlify(hex_items[index])
return ret_buf
return b''
def not_match_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
if self.method == 'http_simple':
return (b'E', False, False)
return (buf, True, False)
def server_decode(self, buf):
if self.has_recv_header:
return (buf, True, False)
self.recv_buffer += buf
buf = self.recv_buffer
if len(buf) > 10:
if match_begin(buf, b'GET /') or match_begin(buf, b'POST /'):
if len(buf) > 65536:
self.recv_buffer = None
logging.warn('http_simple: over size')
return self.not_match_return(buf)
else: #not http header, run on original protocol
self.recv_buffer = None
logging.debug('http_simple: not match begin')
return self.not_match_return(buf)
else:
return (b'', True, False)
datas = buf.split(b'\r\n\r\n', 1)
if datas and len(datas) > 1:
ret_buf = self.get_data_from_http_header(buf)
ret_buf += datas[1]
if len(ret_buf) >= 15:
self.has_recv_header = True
return (ret_buf, True, False)
return (b'', True, False)
else:
return (b'', True, False)
class http2_simple(plain.plain):
def __init__(self, method):
self.method = method
self.has_sent_header = False
self.has_recv_header = False
self.raw_trans_sent = False
self.host = None
self.port = 0
self.recv_buffer = b''
def client_encode(self, buf):
if self.raw_trans_sent:
return buf
self.send_buffer += buf
if not self.has_sent_header:
port = b''
if self.server_info.port != 80:
port = b':' + common.to_bytes(str(self.server_info.port))
self.has_sent_header = True
http_head = b"GET / HTTP/1.1\r\n"
http_head += b"Host: " + (self.server_info.param or self.server_info.host) + port + b"\r\n"
http_head += b"Connection: Upgrade, HTTP2-Settings\r\nUpgrade: h2c\r\n"
http_head += b"HTTP2-Settings: " + base64.urlsafe_b64encode(buf) + b"\r\n"
return http_head + b"\r\n"
if self.has_recv_header:
ret = self.send_buffer
self.send_buffer = b''
self.raw_trans_sent = True
return ret
return b''
def client_decode(self, buf):
if self.has_recv_header:
return (buf, False)
pos = buf.find(b'\r\n\r\n')
if pos >= 0:
self.has_recv_header = True
return (buf[pos + 4:], False)
else:
return (b'', False)
def server_encode(self, buf):
if self.has_sent_header:
return buf
header = b'HTTP/1.1 101 Switching Protocols\r\nConnection: Upgrade\r\nUpgrade: h2c\r\n\r\n'
self.has_sent_header = True
return header + buf
def not_match_return(self, buf):
self.has_sent_header = True
self.has_recv_header = True
if self.method == 'http2_simple':
return (b'E', False, False)
return (buf, True, False)
def server_decode(self, buf):
if self.has_recv_header:
return (buf, True, False)
self.recv_buffer += buf
buf = self.recv_buffer
if len(buf) > 10:
if match_begin(buf, b'GET /'):
pass
else: #not http header, run on original protocol
self.recv_buffer = None
return self.not_match_return(buf)
else:
return (b'', True, False)
datas = buf.split(b'\r\n\r\n', 1)
if datas and len(datas) > 1 and len(datas[0]) >= 4:
lines = buf.split(b'\r\n')
if lines and len(lines) >= 4:
if match_begin(lines[4], b'HTTP2-Settings: '):
ret_buf = base64.urlsafe_b64decode(lines[4][16:])
ret_buf += datas[1]
self.has_recv_header = True
return (ret_buf, True, False)
return (b'', True, False)
else:
return (b'', True, False)
return self.not_match_return(buf)
class tls_simple(plain.plain):
def __init__(self, method):
self.method = method
self.has_sent_header = False
self.has_recv_header = False
self.raw_trans_sent = False
def client_encode(self, buf):
if self.raw_trans_sent:
return buf
self.send_buffer += buf
if not self.has_sent_header:
self.has_sent_header = True
data = b"\x03\x03" + os.urandom(32) + binascii.unhexlify(b"000016c02bc02fc00ac009c013c01400330039002f0035000a0100006fff01000100000a00080006001700180019000b0002010000230000337400000010002900270568322d31360568322d31350568322d313402683208737064792f332e3108687474702f312e31000500050100000000000d001600140401050106010201040305030603020304020202")
data = b"\x01\x00" + struct.pack('>H', len(data)) + data
data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data
return data
if self.has_recv_header:
ret = self.send_buffer
self.send_buffer = b''
self.raw_trans_sent = True
return ret
return b''
def client_decode(self, buf):
if self.has_recv_header:
return (buf, False)
self.has_recv_header = True
return (b'', True)
def server_encode(self, buf):
if self.has_sent_header:
return buf
self.has_sent_header = True
# TODO
#server_hello = b''
return b'\x16\x03\x01'
def server_decode(self, buf):
if self.has_recv_header:
return (buf, True, False)
self.has_recv_header = True
if not match_begin(buf, b'\x16\x03\x01'):
self.has_sent_header = True
if self.method == 'tls_simple':
return (b'E', False, False)
return (buf, True, False)
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
return (b'', False, True)
class random_head(plain.plain):
def __init__(self, method):
self.method = method
self.has_sent_header = False
self.has_recv_header = False
self.raw_trans_sent = False
self.raw_trans_recv = False
self.send_buffer = b''
def client_encode(self, buf):
if self.raw_trans_sent:
return buf
self.send_buffer += buf
if not self.has_sent_header:
self.has_sent_header = True
data = os.urandom(common.ord(os.urandom(1)[0]) % 96 + 4)
crc = (0xffffffff - binascii.crc32(data)) & 0xffffffff
return data + struct.pack('<I', crc)
if self.raw_trans_recv:
ret = self.send_buffer
self.send_buffer = b''
self.raw_trans_sent = True
return ret
return b''
def client_decode(self, buf):
if self.raw_trans_recv:
return (buf, False)
self.raw_trans_recv = True
return (b'', True)
def server_encode(self, buf):
if self.has_sent_header:
return buf
self.has_sent_header = True
return os.urandom(common.ord(os.urandom(1)[0]) % 96 + 4)
def server_decode(self, buf):
if self.has_recv_header:
return (buf, True, False)
self.has_recv_header = True
crc = binascii.crc32(buf) & 0xffffffff
if crc != 0xffffffff:
self.has_sent_header = True
if self.method == 'random_head':
return (b'E', False, False)
return (buf, True, False)
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
return (b'', False, True)
| apache-2.0 |
40223225/2015-cdb_g3-40223225 | static/Brython3.1.1-20150328-091302/Lib/xml/dom/domreg.py | 841 | 3402 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name=None, features=()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, str):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except Exception: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError("no suitable DOM implementation found")
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError("bad feature name: %r" % (feature,))
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| gpl-3.0 |
DirtyUnicorns/android_external_chromium-org | third_party/protobuf/python/google/protobuf/service.py | 590 | 9131 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
| bsd-3-clause |
Zanzibar82/streamondemand.test | channels/channels_sports/main.py | 1 | 5373 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand - XBMC Plugin
# Canal para sports-main
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
from channels.channels_sports import sportstools as SPT
__channel__ = "sports-main"
__title__ = "Deportes"
__language__ = "ES"
def isGeneric():
return True
def mainlist(item):
logger.info("[sports-main.py] mainlist")
itemlist = []
itemlist.append( Item(channel="sports-lshunter", action="mainlist", title="LSHunter.tv", url="http://www.drakulastream.eu" ))
itemlist.append( Item(channel="sports-rojadirecta", action="mainlist", title="RojaDirecta.me", url="http://www.rojadirecta.me" ))
#itemlist.append( Item(channel="sports-firstrow", action="mainlist", title="FirstRowSports.eu", url="http://www.ifeed2all.eu/type/football.html" ))
itemlist.append( Item(channel="sports-lshunter", action="myteam", title=SPT.MIEQUIPO+" en LSHunter.tv", url="http://www.drakulastream.eu" ))
itemlist.append( Item(channel="sports-rojadirecta", action="myteam", title=SPT.MIEQUIPO+" en RojaDirecta.me", url="http://www.rojadirecta.me" ))
#itemlist.append( Item(channel="sports-firstrow", action="myteam", title=SPT.MIEQUIPO+" en FirstRowSports.eu", url="http://www.ifeed2all.eu/type/football.html" ))
itemlist.append( Item(channel="configuracion", action="mainlist" , title="Configurar mi equipo : [COLOR=green][B]"+SPT.MIEQUIPO+'[/B][/COLOR]' ))
return itemlist
# BUSCAR ENLACES A VIDEOS :
# =========================
def corregir_url(url):
if url == 'http://tuttosportweb.com':
return 'http://tuttosportweb.com/update/ch1.php'
aux = scrapertools.find_single_match (url, "tuttosportweb.com/([\w]+.php)")
if aux != '':
return 'http://tuttosportweb.com/update/%s' % aux
url = url.replace("kasimirotv.net/canal", "kasimirotv.net/player")
return url
def play(item):
logger.info("[sports-main.py] play")
itemlist = []
item.url = corregir_url(item.url)
data = scrapertools.cachePage(item.url,headers=SPT.DEFAULT_HEADERS)
headers = SPT.DEFAULT_HEADERS[:]
headers.append(["Referer",item.url])
url = buscar_url_valida(data, headers)
if url == '':
# Buscar si hay algun iframe que pudiera contener el video (width>=500 y height>=300 !?)
patron = '<iframe([^>]+)'
matches = re.compile(patron,re.DOTALL | re.IGNORECASE).findall(data)
for match in matches:
#logger.info("iframe match "+match) # marginheight="0" marginwidth="0" style="width: 700px; height: 450px" width="650" height="80"
w = re.findall ('[^n]width\s*[:=][^\d]*(\d+)', match, re.DOTALL | re.IGNORECASE)
h = re.findall ('[^n]height\s*[:=][^\d]*(\d+)', match, re.DOTALL | re.IGNORECASE)
if int(w[0]) >= 500 and int(h[0]) >= 300:
url2 = scrapertools.find_single_match (match, 'src\s*=\s*["\']([^"\']+)')
logger.info("buscando en iframe "+url2)
headers = SPT.DEFAULT_HEADERS[:]
headers.append(["Referer",item.url])
try:
data = scrapertools.cachePage(url2,headers=headers)
except:
continue
if data == '':
continue
headers = SPT.DEFAULT_HEADERS[:]
headers.append(["Referer",url2])
url = buscar_url_valida(data, headers)
if url != '':
break
if url != '':
itemlist.append( Item(channel=__channel__, title=item.title , url=url, server='directo'))
else:
logger.info("NO DETECTADO SERVIDOR")
return itemlist
def buscar_url_valida(data, headers):
logger.info("[sports-main.py] buscar_url_valida")
if (SPT.DEBUG): logger.info("data="+data)
# unescape de posible código javascript "oculto"
patronjs = "unescape\s*\(\s*['\"]([^'\"]+)"
matches = re.compile(patronjs,re.DOTALL).findall(data)
for ofuscado in matches:
data = data.replace(ofuscado, urllib.unquote(ofuscado))
#if (SPT.DEBUG): logger.info("datanoofus="+data)
# Ejecuta find_url_play en cada servidor hasta encontrar una url
for serverid in SPT.SPORTS_SERVERS:
try:
servers_module = __import__("servers_sports."+serverid)
server_module = getattr(servers_module,serverid)
url = server_module.find_url_play(data, headers)
if url != '':
return url
except ImportError:
logger.info("No existe conector para "+serverid)
except:
logger.info("Error en el conector "+serverid)
import traceback,sys
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
return '' # no encontrada
| gpl-3.0 |
keflavich/TurbuStat | turbustat/statistics/wavelets/wavelet_transform.py | 1 | 13165 | # Licensed under an MIT open source license - see LICENSE
import numpy as np
import warnings
from astropy.convolution import convolve_fft, MexicanHat2DKernel
import statsmodels.formula.api as sm
from pandas import Series, DataFrame
try:
from scipy.fftpack import fftn, ifftn, fftfreq
except ImportError:
from numpy.fft import fftn, ifftn, fftfreq
class Mexican_hat():
'''
Implements the Mexican hat wavelet class.
Code is from kPyWavelet.
'''
name = 'Mexican hat'
def __init__(self):
# Reconstruction factor $C_{\psi, \delta}$
self.cpsi = 1. # pi
def psi_ft(self, k, l):
'''
Fourier transform of the Mexican hat wavelet as in Wang and
Lu (2010), equation [15].
'''
K, L = np.meshgrid(k, l)
return (K ** 2. + L ** 2.) * np.exp(-0.5 * (K ** 2. + L ** 2.))
def psi(self, x, y):
'''
Mexican hat wavelet as in Wang and Lu (2010), equation [14].
'''
X, Y = np.meshgrid(x, y)
return (2. - (X ** 2. + Y ** 2.)) * np.exp(-0.5 * (X ** 2. + Y ** 2.))
class wt2D(object):
'''
Compute the wavelet transform of a 2D array.
Parameters
----------
array : numpy.ndarray
2D array.
header : FITS header
Header for the array.
scales : numpy.ndarray or list
The scales where the transform is calculated.
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
wavelet : wavelet class
The wavelet class to use.
'''
def __init__(self, array, header, scales=None, dx=0.25, dy=0.25,
wavelet=Mexican_hat(), num=50, ang_units=True):
super(wt2D, self).__init__()
self.array = array.astype("f8")
self.header = header
self.wavelet = wavelet
if scales is None:
a_min = round((5. / 3.), 3) # Smallest scale given by paper
self.scales = np.logspace(
np.log10(a_min), np.log10(min(self.array.shape)), num)
else:
self.scales = scales
### NOTE: can't use nan_interpolating from astropy
### until the normalization for sum to zeros kernels is fixed!!!
self.array[np.isnan(self.array)] = np.nanmin(self.array)
self.nan_flag = False
if np.isnan(self.array).any():
self.nan_flag = True
if ang_units:
try:
self.imgscale = np.abs(self.header["CDELT2"])
except ValueError:
warnings.warn("Header doesn't not contain the\
angular size. Reverting to pixel scales.")
ang_units = False
if not ang_units:
self.imgscale = 1.0
a_min = 5 / 3. # Minimum scale size given by Gill and Henriksen (90)
self.dx = dx * a_min
self.dy = dy * a_min
self.Wf = None
self.iWf = None
def cwt2d(self, dx=None, dy=None):
'''
Bi-dimensional continuous wavelet transform of the signal at
specified scale a.
Parameters
----------
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
'''
if dx is not None:
assert isinstance(dx, list)
self.dx = dx
if dy is not None:
assert isinstance(dy, list)
self.dx = dy
# Determines the shape of the arrays and the discrete scales.
n0, m0 = self.array.shape
N, M = 2 ** int(np.ceil(np.log2(n0))), 2 ** int(np.ceil(np.log2(m0)))
if self.scales is None:
self.scales = 2 ** np.arange(int(np.floor(np.log2(min(n0, m0)))))
A = len(self.scales)
# Calculates the zonal and meridional wave numbers.
l, k = fftfreq(N, self.dy), fftfreq(M, self.dx)
# Calculates the Fourier transform of the input signal.
f_ft = fftn(self.array, shape=(N, M))
# Creates empty wavelet transform array and fills it for every discrete
# scale using the convolution theorem.
self.Wf = np.zeros((A, N, M), 'complex')
for i, an in enumerate(self.scales):
psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l)
self.Wf[i, :, :] = ifftn(f_ft * psi_ft_bar, shape=(N, M))
self.Wf = self.Wf[:, :n0, :m0]
return self
def astropy_cwt2d(self, dx=None, dy=None):
'''
Same as cwt2D except it uses astropy.convolve_fft's ability
to interpolate over NaNs.
Parameters
----------
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
'''
if dx is not None:
assert isinstance(dx, list)
self.dx = dx
if dy is not None:
assert isinstance(dy, list)
self.dx = dy
n0, m0 = self.array.shape
N, M = 2 ** int(np.ceil(np.log2(n0))), 2 ** int(np.ceil(np.log2(m0)))
if self.scales is None:
self.scales = 2 ** np.arange(int(np.floor(np.log2(min(n0, m0)))))
A = len(self.scales)
self.Wf = np.zeros((A, N, M), 'complex')
for i, an in enumerate(self.scales):
psi = MexicanHat2DKernel(an, x_size=n0, y_size=m0)
self.Wf[i, :, :] = convolve_fft(self.array, psi,
interpolate_nan=True,
normalize_kernel=True,
fftn=fftn, ifftn=ifftn)
self.Wf = self.Wf[:, :n0, :m0]
return self
def icwt2d(self, da=0.25):
'''
Inverse bi-dimensional continuous wavelet transform as in Wang and
Lu (2010), equation [5].
Parameters
----------
da : float, optional
Spacing in the frequency axis.
'''
if self.Wf is None:
raise TypeError("Run cwt2D before icwt2D")
m0, l0, k0 = self.Wf.shape
if m0 != self.scales.size:
raise Warning('Scale parameter array shape does not match\
wavelet transform array shape.')
# Calculates the zonal and meridional wave numters.
L, K = 2 ** int(np.ceil(np.log2(l0))), 2 ** int(np.ceil(np.log2(k0)))
# Calculates the zonal and meridional wave numbers.
l, k = fftfreq(L, self.dy), fftfreq(K, self.dx)
# Creates empty inverse wavelet transform array and fills it for every
# discrete scale using the convolution theorem.
self.iWf = np.zeros((m0, L, K), 'complex')
for i, an in enumerate(self.scales):
psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l)
W_ft = fftn(self.Wf[i, :, :], s=(L, K))
self.iWf[i, :, :] = ifftn(W_ft * psi_ft_bar, s=(L, K)) *\
da / an ** 2.
self.iWf = self.iWf[:, :l0, :k0].real.sum(axis=0) / self.wavelet.cpsi
return self
def make_1D_transform(self):
self.curve = transform((self.Wf, self.scales), self.imgscale)
def run(self):
'''
Compute the Wavelet transform.
'''
if self.nan_flag:
self.astropy_cwt2d()
else:
self.cwt2d()
self.make_1D_transform()
class Wavelet_Distance(object):
'''
Compute the distance between the two cubes using the Wavelet transform.
We fit a linear model to the two wavelet transforms. The distance is the
t-statistic of the interaction term describing the difference in the
slopes.
Parameters
----------
dataset1 : FITS hdu
2D image.
dataset2 : FITS hdu
2D image.
wavelet : class
Wavelet class. Only Mexican_hat() is implemented.
ang_units : bool, optional
Sets whether to use angular units.
scales : numpy.ndarray or list
The scales where the transform is calculated.
num : int
Number of scales to calculate the transform at.
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
fiducial_model : wt2D
Computed wt2D object. use to avoid recomputing.
'''
def __init__(self, dataset1, dataset2, wavelet=Mexican_hat(),
ang_units=True, scales=None, num=50, dx=0.25, dy=0.25,
fiducial_model=None):
super(Wavelet_Distance, self).__init__()
array1 = dataset1[0]
header1 = dataset1[1]
array2 = dataset2[0]
header2 = dataset2[1]
if fiducial_model is None:
self.wt1 = wt2D(array1, header1, scales=scales, wavelet=wavelet,
ang_units=ang_units)
self.wt1.run()
else:
self.wt1 = fiducial_model
self.wt2 = wt2D(array2, header2, scales=scales, wavelet=wavelet,
ang_units=ang_units)
self.wt2.run()
self.curve1 = self.wt1.curve
self.curve2 = self.wt2.curve
self.results = None
self.distance = None
def distance_metric(self, non_linear=True, verbose=False):
'''
Implements the distance metric for 2 wavelet transforms.
We fit the linear portion of the transform to represent the powerlaw
Parameters
----------
non_linear : bool, optional
Enables clipping of non-linear portions of the transform.
verbose : bool, optional
Enables plotting.
'''
if non_linear:
self.curve1 = clip_to_linear(self.curve1)
self.curve2 = clip_to_linear(self.curve2)
dummy = [0] * len(self.curve1[0, :]) + [1] * len(self.curve2[0, :])
x = np.concatenate((self.curve1[0, :], self.curve2[0, :]))
regressor = x.T * dummy
log_T_g = np.concatenate((self.curve1[1, :], self.curve2[1, :]))
d = {"dummy": Series(dummy), "scales": Series(
x), "log_T_g": Series(log_T_g), "regressor": Series(regressor)}
df = DataFrame(d)
model = sm.ols(formula="log_T_g ~ dummy + scales + regressor", data=df)
self.results = model.fit()
self.distance = np.abs(self.results.tvalues["regressor"])
if verbose:
print self.results.summary()
import matplotlib.pyplot as p
p.plot(self.curve1[0, :], self.curve1[1, :], 'bD',
self.curve2[0, :], self.curve2[1, :], 'gD')
p.plot(self.curve1[0, :],
self.results.fittedvalues[:len(self.curve1[1, :])], "b",
self.curve2[0, :],
self.results.fittedvalues[-len(self.curve2[1, :]):], "g")
p.grid(True)
p.xlabel("log a")
p.ylabel(r"log $T_g$")
p.show()
return self
def clip_to_linear(data, threshold=1.0, kernel_width=0.1, ends_clipped=0.05):
'''
Takes the second derivative of the data with a ricker wavelet.
Data is clipped to the linear portion (2nd derivative ~ 0)
Parameters
----------
data : numpy.ndarray
x and y data.
threshold : float, optional
Acceptable value of the second derivative to be called linear.
kernel_width : float, optional
Kernel width set to this percentage of the data length
ends_clipped : float, optional
Percentage of data to clip off at the ends. End points have residual
effects from the convolution.
Returns
-------
data_clipped : numpy.ndarray
Linear portion of the data set returned.
'''
from scipy.signal import ricker
y = data[1, :]
x = data[0, :]
num_pts = len(y)
kernel = ricker(num_pts, num_pts * kernel_width)
sec_deriv = np.convolve(y, kernel, mode="same")
# Ends go back to being ~ linear, so clip them off
if ends_clipped > 0.0:
clipped_pts = int(num_pts * ends_clipped)
sec_deriv = sec_deriv[clipped_pts: num_pts - clipped_pts]
y = y[clipped_pts: num_pts - clipped_pts]
x = x[clipped_pts: num_pts - clipped_pts]
linear_pts = np.abs(sec_deriv) < threshold
data_clipped = np.empty((2, len(y[linear_pts])))
data_clipped[:, :] = x[linear_pts], y[linear_pts]
return data_clipped
def transform(data, imgscale):
'''
Put output of the wavelet transform into the mean of the nonzero components
This reduces the dataset to 1D.
Parameters
----------
data : tuple
Contains N arrays and scales from the transform.
Returns
-------
data_1D - numpy.ndarray
Scales in the first column and log <T_g> in the second.
'''
wav_arrays = data[0]
scales = data[1]
log_av_T_g = []
for i in range(len(scales)):
average_Tg_i = np.log10(np.abs(wav_arrays[i, :, :]
[wav_arrays[i, :, :] > 0]).mean())
log_av_T_g.append(average_Tg_i)
physical_scales = np.log10(scales * imgscale)
data_1D = np.array([physical_scales, log_av_T_g])
return data_1D
| mit |
andyxhadji/incubator-airflow | airflow/hooks/pig_hook.py | 13 | 3181 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import subprocess
from tempfile import NamedTemporaryFile
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.file import TemporaryDirectory
class PigCliHook(BaseHook):
"""
Simple wrapper around the pig CLI.
Note that you can also set default pig CLI properties using the
``pig_properties`` to be used in your connection as in
``{"pig_properties": "-Dpig.tmpfilecompression=true"}``
"""
def __init__(
self,
pig_cli_conn_id="pig_cli_default"):
conn = self.get_connection(pig_cli_conn_id)
self.pig_properties = conn.extra_dejson.get('pig_properties', '')
self.conn = conn
def run_cli(self, pig, verbose=True):
"""
Run an pig script using the pig cli
>>> ph = PigCliHook()
>>> result = ph.run_cli("ls /;")
>>> ("hdfs://" in result)
True
"""
with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(pig.encode('utf-8'))
f.flush()
fname = f.name
pig_bin = 'pig'
cmd_extra = []
pig_cmd = [pig_bin, '-f', fname] + cmd_extra
if self.pig_properties:
pig_properties_list = self.pig_properties.split()
pig_cmd.extend(pig_properties_list)
if verbose:
self.log.info(" ".join(pig_cmd))
sp = subprocess.Popen(
pig_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, b''):
stdout += line.decode('utf-8')
if verbose:
self.log.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Pig job")
self.sp.kill()
| apache-2.0 |
fhe-odoo/odoo | addons/project_issue_sheet/project_issue_sheet.py | 381 | 2875 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv,orm
from openerp.tools.translate import _
class project_issue(osv.osv):
_inherit = 'project.issue'
_description = 'project issue'
_columns = {
'timesheet_ids': fields.one2many('hr.analytic.timesheet', 'issue_id', 'Timesheets'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
}
def on_change_project(self, cr, uid, ids, project_id, context=None):
if not project_id:
return {}
result = super(project_issue, self).on_change_project(cr, uid, ids, project_id, context=context)
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if 'value' not in result:
result['value'] = {}
account = project.analytic_account_id
if account:
result['value']['analytic_account_id'] = account.id
return result
def on_change_account_id(self, cr, uid, ids, account_id, context=None):
if not account_id:
return {}
account = self.pool.get('account.analytic.account').browse(cr, uid, account_id, context=context)
result = {}
if account and account.state == 'pending':
result = {'warning' : {'title' : _('Analytic Account'), 'message' : _('The Analytic Account is pending !')}}
return result
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'account analytic line'
_columns = {
'create_date' : fields.datetime('Create Date', readonly=True),
}
class hr_analytic_issue(osv.osv):
_inherit = 'hr.analytic.timesheet'
_description = 'hr analytic timesheet'
_columns = {
'issue_id' : fields.many2one('project.issue', 'Issue'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
KyoungRan/Django_React_ex | Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/pyasn1/codec/der/encoder.py | 160 | 1139 | # DER encoder
from pyasn1.type import univ
from pyasn1.codec.cer import encoder
from pyasn1 import error
class SetOfEncoder(encoder.SetOfEncoder):
def _cmpSetComponents(self, c1, c2):
tagSet1 = isinstance(c1, univ.Choice) and \
c1.getEffectiveTagSet() or c1.getTagSet()
tagSet2 = isinstance(c2, univ.Choice) and \
c2.getEffectiveTagSet() or c2.getTagSet()
return cmp(tagSet1, tagSet2)
tagMap = encoder.tagMap.copy()
tagMap.update({
# Overload CER encoders with BER ones (a bit hackerish XXX)
univ.BitString.tagSet: encoder.encoder.BitStringEncoder(),
univ.OctetString.tagSet: encoder.encoder.OctetStringEncoder(),
# Set & SetOf have same tags
univ.SetOf().tagSet: SetOfEncoder()
})
typeMap = encoder.typeMap
class Encoder(encoder.Encoder):
supportIndefLength = False
def __call__(self, client, defMode=True, maxChunkSize=0):
if not defMode:
raise error.PyAsn1Error('DER forbids indefinite length mode')
return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
encode = Encoder(tagMap, typeMap)
| mit |
GalaxyTab4/android_kernel_motorola_msm8226 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
Yelp/fullerite | src/diamond/collectors/userscripts/userscripts.py | 51 | 3524 | # coding=utf-8
"""
Runs third party scripts and collects their output.
Scripts need to be +x and should output metrics in the form of
```
metric.path.a 1
metric.path.b 2
metric.path.c 3
```
They are not passed any arguments and if they return an error code,
no metrics are collected.
#### Dependencies
* [subprocess](http://docs.python.org/library/subprocess.html)
"""
import diamond.collector
import diamond.convertor
import os
import subprocess
class UserScriptsCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(UserScriptsCollector,
self).get_default_config_help()
config_help.update({
'scripts_path': "Path to find the scripts to run",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UserScriptsCollector, self).get_default_config()
config.update({
'path': '.',
'scripts_path': '/etc/diamond/user_scripts/',
'floatprecision': 4,
})
return config
def collect(self):
scripts_path = self.config['scripts_path']
if not os.access(scripts_path, os.R_OK):
return None
for script in os.listdir(scripts_path):
absolutescriptpath = os.path.join(scripts_path, script)
executable = os.access(absolutescriptpath, os.X_OK)
is_file = os.path.isfile(absolutescriptpath)
if is_file:
if not executable:
self.log.info("%s is not executable" % absolutescriptpath)
continue
else:
# Don't bother logging skipped non-file files (typically
# directories)
continue
out = None
self.log.debug("Executing %s" % absolutescriptpath)
try:
proc = subprocess.Popen([absolutescriptpath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = proc.communicate()
except subprocess.CalledProcessError, e:
self.log.error("%s error launching: %s; skipping" %
(absolutescriptpath, e))
continue
if proc.returncode:
self.log.error("%s return exit value %s; skipping" %
(absolutescriptpath, proc.returncode))
if not out:
self.log.info("%s return no output" % absolutescriptpath)
continue
if err:
self.log.error("%s return error output: %s" %
(absolutescriptpath, err))
# Use filter to remove empty lines of output
for line in filter(None, out.split('\n')):
# Ignore invalid lines
try:
name, value = line.split()
float(value)
except ValueError:
self.log.error("%s returned error output: %s" %
(absolutescriptpath, line))
continue
name, value = line.split()
floatprecision = 0
if "." in value:
floatprecision = self.config['floatprecision']
self.publish(name, value, precision=floatprecision)
| apache-2.0 |
ampax/edx-platform-backup | common/djangoapps/student/migrations/0004_add_email_index.py | 188 | 7971 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.execute("create unique index email on auth_user (email)")
pass
def backwards(self, orm):
db.execute("drop index email on auth_user")
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/irc/irc/functools.py | 22 | 1030 | from __future__ import absolute_import, print_function, unicode_literals
import functools
import collections
def save_method_args(method):
"""
Wrap a method such that when it is called, we save the args and
kwargs with which it was called.
>>> class MyClass(object):
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper
| agpl-3.0 |
hnarayanan/django-rest-framework | tests/test_viewsets.py | 5 | 1240 | from django.test import TestCase
from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from rest_framework.viewsets import GenericViewSet
factory = APIRequestFactory()
class BasicViewSet(GenericViewSet):
def list(self, request, *args, **kwargs):
return Response({'ACTION': 'LIST'})
class InitializeViewSetsTestCase(TestCase):
def test_initialize_view_set_with_actions(self):
request = factory.get('/', '', content_type='application/json')
my_view = BasicViewSet.as_view(actions={
'get': 'list',
})
response = my_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'ACTION': 'LIST'})
def test_initialize_view_set_with_empty_actions(self):
try:
BasicViewSet.as_view()
except TypeError as e:
self.assertEqual(str(e), "The `actions` argument must be provided "
"when calling `.as_view()` on a ViewSet. "
"For example `.as_view({'get': 'list'})`")
else:
self.fail("actions must not be empty.")
| bsd-2-clause |
40223136/w17test1 | static/Brython3.1.3-20150514-095342/Lib/unittest/test/test_suite.py | 791 | 12066 | import unittest
import sys
from .support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite())
,(unittest.TestSuite(), unittest.TestSuite([]))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1'))
,(unittest.TestSuite([]), _mk_TestSuite('test_1'))
,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3'))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Obus/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
JonathanStein/odoo | addons/account/tests/test_account_move_closed_period.py | 127 | 1623 | from datetime import date
from openerp.tests.common import TransactionCase
from openerp.osv.orm import except_orm
class TestPeriodState(TransactionCase):
"""
Forbid creation of Journal Entries for a closed period.
"""
def setUp(self):
super(TestPeriodState, self).setUp()
cr, uid = self.cr, self.uid
self.wizard_period_close = self.registry('account.period.close')
self.wizard_period_close_id = self.wizard_period_close.create(cr, uid, {'sure': 1})
_, self.sale_journal_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "sales_journal")
_, self.period_9_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "period_9")
def test_period_state(self):
cr, uid = self.cr, self.uid
self.wizard_period_close.data_save(cr, uid, [self.wizard_period_close_id], {
'lang': 'en_US',
'active_model': 'account.period',
'active_ids': [self.period_9_id],
'tz': False,
'active_id': self.period_9_id
})
with self.assertRaises(except_orm):
self.registry('account.move').create(cr, uid, {
'name': '/',
'period_id': self.period_9_id,
'journal_id': self.sale_journal_id,
'date': date.today(),
'line_id': [(0, 0, {
'name': 'foo',
'debit': 10,
}), (0, 0, {
'name': 'bar',
'credit': 10,
})]
})
| agpl-3.0 |
geeknoid/api | python/istio_api/networking/v1alpha3/gateway_pb2.py | 1 | 19574 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: networking/v1alpha3/gateway.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='networking/v1alpha3/gateway.proto',
package='istio.networking.v1alpha3',
syntax='proto3',
serialized_options=_b('Z istio.io/api/networking/v1alpha3'),
serialized_pb=_b('\n!networking/v1alpha3/gateway.proto\x12\x19istio.networking.v1alpha3\x1a\x1fgoogle/api/field_behavior.proto\"\xbc\x01\n\x07Gateway\x12\x37\n\x07servers\x18\x01 \x03(\x0b\x32!.istio.networking.v1alpha3.ServerB\x03\xe0\x41\x02\x12G\n\x08selector\x18\x02 \x03(\x0b\x32\x30.istio.networking.v1alpha3.Gateway.SelectorEntryB\x03\xe0\x41\x02\x1a/\n\rSelectorEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xd2\x06\n\x06Server\x12\x32\n\x04port\x18\x01 \x01(\x0b\x32\x1f.istio.networking.v1alpha3.PortB\x03\xe0\x41\x02\x12\x0c\n\x04\x62ind\x18\x04 \x01(\t\x12\x12\n\x05hosts\x18\x02 \x03(\tB\x03\xe0\x41\x02\x12\x39\n\x03tls\x18\x03 \x01(\x0b\x32,.istio.networking.v1alpha3.Server.TLSOptions\x12\x18\n\x10\x64\x65\x66\x61ult_endpoint\x18\x05 \x01(\t\x1a\x9c\x05\n\nTLSOptions\x12\x16\n\x0ehttps_redirect\x18\x01 \x01(\x08\x12\x42\n\x04mode\x18\x02 \x01(\x0e\x32\x34.istio.networking.v1alpha3.Server.TLSOptions.TLSmode\x12\x1a\n\x12server_certificate\x18\x03 \x01(\t\x12\x13\n\x0bprivate_key\x18\x04 \x01(\t\x12\x17\n\x0f\x63\x61_certificates\x18\x05 \x01(\t\x12\x17\n\x0f\x63redential_name\x18\n \x01(\t\x12\x19\n\x11subject_alt_names\x18\x06 \x03(\t\x12\x1f\n\x17verify_certificate_spki\x18\x0b \x03(\t\x12\x1f\n\x17verify_certificate_hash\x18\x0c \x03(\t\x12V\n\x14min_protocol_version\x18\x07 \x01(\x0e\x32\x38.istio.networking.v1alpha3.Server.TLSOptions.TLSProtocol\x12V\n\x14max_protocol_version\x18\x08 \x01(\x0e\x32\x38.istio.networking.v1alpha3.Server.TLSOptions.TLSProtocol\x12\x15\n\rcipher_suites\x18\t \x03(\t\"Z\n\x07TLSmode\x12\x0f\n\x0bPASSTHROUGH\x10\x00\x12\n\n\x06SIMPLE\x10\x01\x12\n\n\x06MUTUAL\x10\x02\x12\x14\n\x10\x41UTO_PASSTHROUGH\x10\x03\x12\x10\n\x0cISTIO_MUTUAL\x10\x04\"O\n\x0bTLSProtocol\x12\x0c\n\x08TLS_AUTO\x10\x00\x12\x0b\n\x07TLSV1_0\x10\x01\x12\x0b\n\x07TLSV1_1\x10\x02\x12\x0b\n\x07TLSV1_2\x10\x03\x12\x0b\n\x07TLSV1_3\x10\x04\"@\n\x04Port\x12\x13\n\x06number\x18\x01 \x01(\rB\x03\xe0\x41\x02\x12\x15\n\x08protocol\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04name\x18\x03 \x01(\tB\"Z istio.io/api/networking/v1alpha3b\x06proto3')
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,])
_SERVER_TLSOPTIONS_TLSMODE = _descriptor.EnumDescriptor(
name='TLSmode',
full_name='istio.networking.v1alpha3.Server.TLSOptions.TLSmode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PASSTHROUGH', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIMPLE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MUTUAL', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTO_PASSTHROUGH', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ISTIO_MUTUAL', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=968,
serialized_end=1058,
)
_sym_db.RegisterEnumDescriptor(_SERVER_TLSOPTIONS_TLSMODE)
_SERVER_TLSOPTIONS_TLSPROTOCOL = _descriptor.EnumDescriptor(
name='TLSProtocol',
full_name='istio.networking.v1alpha3.Server.TLSOptions.TLSProtocol',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TLS_AUTO', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_0', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_1', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_2', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TLSV1_3', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1060,
serialized_end=1139,
)
_sym_db.RegisterEnumDescriptor(_SERVER_TLSOPTIONS_TLSPROTOCOL)
_GATEWAY_SELECTORENTRY = _descriptor.Descriptor(
name='SelectorEntry',
full_name='istio.networking.v1alpha3.Gateway.SelectorEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.Gateway.SelectorEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.Gateway.SelectorEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=286,
)
_GATEWAY = _descriptor.Descriptor(
name='Gateway',
full_name='istio.networking.v1alpha3.Gateway',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='servers', full_name='istio.networking.v1alpha3.Gateway.servers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='selector', full_name='istio.networking.v1alpha3.Gateway.selector', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GATEWAY_SELECTORENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=286,
)
_SERVER_TLSOPTIONS = _descriptor.Descriptor(
name='TLSOptions',
full_name='istio.networking.v1alpha3.Server.TLSOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='https_redirect', full_name='istio.networking.v1alpha3.Server.TLSOptions.https_redirect', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mode', full_name='istio.networking.v1alpha3.Server.TLSOptions.mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='server_certificate', full_name='istio.networking.v1alpha3.Server.TLSOptions.server_certificate', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='private_key', full_name='istio.networking.v1alpha3.Server.TLSOptions.private_key', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ca_certificates', full_name='istio.networking.v1alpha3.Server.TLSOptions.ca_certificates', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='credential_name', full_name='istio.networking.v1alpha3.Server.TLSOptions.credential_name', index=5,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject_alt_names', full_name='istio.networking.v1alpha3.Server.TLSOptions.subject_alt_names', index=6,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='verify_certificate_spki', full_name='istio.networking.v1alpha3.Server.TLSOptions.verify_certificate_spki', index=7,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='verify_certificate_hash', full_name='istio.networking.v1alpha3.Server.TLSOptions.verify_certificate_hash', index=8,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_protocol_version', full_name='istio.networking.v1alpha3.Server.TLSOptions.min_protocol_version', index=9,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_protocol_version', full_name='istio.networking.v1alpha3.Server.TLSOptions.max_protocol_version', index=10,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cipher_suites', full_name='istio.networking.v1alpha3.Server.TLSOptions.cipher_suites', index=11,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SERVER_TLSOPTIONS_TLSMODE,
_SERVER_TLSOPTIONS_TLSPROTOCOL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=471,
serialized_end=1139,
)
_SERVER = _descriptor.Descriptor(
name='Server',
full_name='istio.networking.v1alpha3.Server',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='port', full_name='istio.networking.v1alpha3.Server.port', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bind', full_name='istio.networking.v1alpha3.Server.bind', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hosts', full_name='istio.networking.v1alpha3.Server.hosts', index=2,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tls', full_name='istio.networking.v1alpha3.Server.tls', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_endpoint', full_name='istio.networking.v1alpha3.Server.default_endpoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVER_TLSOPTIONS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=289,
serialized_end=1139,
)
_PORT = _descriptor.Descriptor(
name='Port',
full_name='istio.networking.v1alpha3.Port',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='istio.networking.v1alpha3.Port.number', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol', full_name='istio.networking.v1alpha3.Port.protocol', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='istio.networking.v1alpha3.Port.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1141,
serialized_end=1205,
)
_GATEWAY_SELECTORENTRY.containing_type = _GATEWAY
_GATEWAY.fields_by_name['servers'].message_type = _SERVER
_GATEWAY.fields_by_name['selector'].message_type = _GATEWAY_SELECTORENTRY
_SERVER_TLSOPTIONS.fields_by_name['mode'].enum_type = _SERVER_TLSOPTIONS_TLSMODE
_SERVER_TLSOPTIONS.fields_by_name['min_protocol_version'].enum_type = _SERVER_TLSOPTIONS_TLSPROTOCOL
_SERVER_TLSOPTIONS.fields_by_name['max_protocol_version'].enum_type = _SERVER_TLSOPTIONS_TLSPROTOCOL
_SERVER_TLSOPTIONS.containing_type = _SERVER
_SERVER_TLSOPTIONS_TLSMODE.containing_type = _SERVER_TLSOPTIONS
_SERVER_TLSOPTIONS_TLSPROTOCOL.containing_type = _SERVER_TLSOPTIONS
_SERVER.fields_by_name['port'].message_type = _PORT
_SERVER.fields_by_name['tls'].message_type = _SERVER_TLSOPTIONS
DESCRIPTOR.message_types_by_name['Gateway'] = _GATEWAY
DESCRIPTOR.message_types_by_name['Server'] = _SERVER
DESCRIPTOR.message_types_by_name['Port'] = _PORT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Gateway = _reflection.GeneratedProtocolMessageType('Gateway', (_message.Message,), {
'SelectorEntry' : _reflection.GeneratedProtocolMessageType('SelectorEntry', (_message.Message,), {
'DESCRIPTOR' : _GATEWAY_SELECTORENTRY,
'__module__' : 'networking.v1alpha3.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Gateway.SelectorEntry)
})
,
'DESCRIPTOR' : _GATEWAY,
'__module__' : 'networking.v1alpha3.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Gateway)
})
_sym_db.RegisterMessage(Gateway)
_sym_db.RegisterMessage(Gateway.SelectorEntry)
Server = _reflection.GeneratedProtocolMessageType('Server', (_message.Message,), {
'TLSOptions' : _reflection.GeneratedProtocolMessageType('TLSOptions', (_message.Message,), {
'DESCRIPTOR' : _SERVER_TLSOPTIONS,
'__module__' : 'networking.v1alpha3.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Server.TLSOptions)
})
,
'DESCRIPTOR' : _SERVER,
'__module__' : 'networking.v1alpha3.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Server)
})
_sym_db.RegisterMessage(Server)
_sym_db.RegisterMessage(Server.TLSOptions)
Port = _reflection.GeneratedProtocolMessageType('Port', (_message.Message,), {
'DESCRIPTOR' : _PORT,
'__module__' : 'networking.v1alpha3.gateway_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Port)
})
_sym_db.RegisterMessage(Port)
DESCRIPTOR._options = None
_GATEWAY_SELECTORENTRY._options = None
_GATEWAY.fields_by_name['servers']._options = None
_GATEWAY.fields_by_name['selector']._options = None
_SERVER.fields_by_name['port']._options = None
_SERVER.fields_by_name['hosts']._options = None
_PORT.fields_by_name['number']._options = None
_PORT.fields_by_name['protocol']._options = None
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
RichardLitt/wyrd-django-dev | django/contrib/localflavor/mx/mx_states.py | 110 | 1304 | # -*- coding: utf-8 -*-
"""
A list of Mexican states for use as `choices` in a formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
# All 31 states, plus the `Distrito Federal`.
STATE_CHOICES = (
('AGU', _('Aguascalientes')),
('BCN', _('Baja California')),
('BCS', _('Baja California Sur')),
('CAM', _('Campeche')),
('CHH', _('Chihuahua')),
('CHP', _('Chiapas')),
('COA', _('Coahuila')),
('COL', _('Colima')),
('DIF', _('Distrito Federal')),
('DUR', _('Durango')),
('GRO', _('Guerrero')),
('GUA', _('Guanajuato')),
('HID', _('Hidalgo')),
('JAL', _('Jalisco')),
('MEX', _('Estado de México')),
('MIC', _('Michoacán')),
('MOR', _('Morelos')),
('NAY', _('Nayarit')),
('NLE', _('Nuevo León')),
('OAX', _('Oaxaca')),
('PUE', _('Puebla')),
('QUE', _('Querétaro')),
('ROO', _('Quintana Roo')),
('SIN', _('Sinaloa')),
('SLP', _('San Luis Potosí')),
('SON', _('Sonora')),
('TAB', _('Tabasco')),
('TAM', _('Tamaulipas')),
('TLA', _('Tlaxcala')),
('VER', _('Veracruz')),
('YUC', _('Yucatán')),
('ZAC', _('Zacatecas')),
)
| bsd-3-clause |
Benolds/kachMIT-myo-2015 | myo/utils/__init__.py | 9 | 1097 | # Copyright (c) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| mit |
ghandiosm/Test | addons/account/tests/account_test_users.py | 46 | 1657 | from openerp.addons.account.tests.account_test_classes import AccountingTestCase
class AccountTestUsers(AccountingTestCase):
"""Tests for diffrent type of user 'Accountant/Adviser' and added groups"""
def setUp(self):
super(AccountTestUsers, self).setUp()
self.res_user_model = self.env['res.users']
self.main_company = self.env.ref('base.main_company')
self.main_partner = self.env.ref('base.main_partner')
self.main_bank = self.env.ref('base.res_bank_1')
res_users_account_user = self.env.ref('account.group_account_user')
res_users_account_manager = self.env.ref('account.group_account_manager')
partner_manager = self.env.ref('base.group_partner_manager')
self.tax_model = self.env['account.tax']
self.account_model = self.env['account.account']
self.account_type_model = self.env['account.account.type']
self.currency_euro = self.env.ref('base.EUR')
self.account_user = self.res_user_model.with_context({'no_reset_password': True}).create(dict(
name="Accountant",
company_id=self.main_company.id,
login="acc",
email="accountuser@yourcompany.com",
groups_id=[(6, 0, [res_users_account_user.id, partner_manager.id])]
))
self.account_manager = self.res_user_model.with_context({'no_reset_password': True}).create(dict(
name="Adviser",
company_id=self.main_company.id,
login="fm",
email="accountmanager@yourcompany.com",
groups_id=[(6, 0, [res_users_account_manager.id, partner_manager.id])]
))
| gpl-3.0 |
darionyaphet/spark | python/pyspark/sql/tests/test_conf.py | 21 | 2361 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.testing.sqlutils import ReusedSQLTestCase
class ConfTests(ReusedSQLTestCase):
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
self.assertEqual(spark.conf.get("hyukjin", None), None)
# This returns 'STATIC' because it's the default value of
# 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in
# `spark.conf.get` is unset.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC")
# This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but
# `defaultValue` in `spark.conf.get` is set to None.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None)
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_conf import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
spring-week-topos/cinder-week | cinder/tests/api/extensions/foxinsocks.py | 12 | 2929 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
class FoxInSocksController(object):
def index(self, req):
return "Try to say this Mr. Knox, sir..."
class FoxInSocksServerControllerExtension(wsgi.Controller):
@wsgi.action('add_tweedle')
def _add_tweedle(self, req, id, body):
return "Tweedle Beetle Added."
@wsgi.action('delete_tweedle')
def _delete_tweedle(self, req, id, body):
return "Tweedle Beetle Deleted."
@wsgi.action('fail')
def _fail(self, req, id, body):
raise webob.exc.HTTPBadRequest(explanation='Tweedle fail')
class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
#NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
#NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['big_bands'] = 'Pig Bands!'
class Foxinsocks(extensions.ExtensionDescriptor):
"""The Fox In Socks Extension."""
name = "Fox In Socks"
alias = "FOXNSOX"
namespace = "http://www.fox.in.socks/api/ext/pie/v1.0"
updated = "2011-01-22T13:25:27-06:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('foxnsocks',
FoxInSocksController())
resources.append(resource)
return resources
def get_controller_extensions(self):
extension_list = []
extension_set = [
(FoxInSocksServerControllerExtension, 'servers'),
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
(FoxInSocksFlavorBandsControllerExtension, 'flavors'), ]
for klass, collection in extension_set:
controller = klass()
ext = extensions.ControllerExtension(self, collection, controller)
extension_list.append(ext)
return extension_list
| apache-2.0 |
robinro/ansible | lib/ansible/plugins/lookup/first_found.py | 18 | 6606 | # (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# take a list of files and (optionally) a list of paths
# return the first existing file found in the paths
# [file1, file2, file3], [path1, path2, path3]
# search order is:
# path1/file1
# path1/file2
# path1/file3
# path2/file1
# path2/file2
# path2/file3
# path3/file1
# path3/file2
# path3/file3
# first file found with os.path.exists() is returned
# no file matches raises ansibleerror
# EXAMPLES
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: foo ${inventory_hostname} bar
# paths: /tmp/production /tmp/staging
# that will look for files in this order:
# /tmp/production/foo
# ${inventory_hostname}
# bar
# /tmp/staging/foo
# ${inventory_hostname}
# bar
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: /some/place/foo ${inventory_hostname} /some/place/else
# that will look for files in this order:
# /some/place/foo
# $relative_path/${inventory_hostname}
# /some/place/else
# example - including tasks:
# tasks:
# - include: $item
# with_first_found:
# - files: generic
# paths: tasks/staging tasks/production
# this will include the tasks in the file generic where it is found first (staging or production)
# example simple file lists
# tasks:
# - name: first found file
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname} foo
# example skipping if no matched files
# First_found also offers the ability to control whether or not failing
# to find a file returns an error or not
#
# - name: first found file - or skip
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname}
# skip: true
# example a role with default configuration and configuration per host
# you can set multiple terms with their own files and paths to look through.
# consider a role that sets some configuration per host falling back on a default config.
#
# - name: some configuration template
# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
# with_first_found:
# - files:
# - ${inventory_hostname}/etc/file.cfg
# paths:
# - ../../../templates.overwrites
# - ../../../templates
# - files:
# - etc/file.cfg
# paths:
# - templates
# the above will return an empty list if the files cannot be found at all
# if skip is unspecificed or if it is set to false then it will return a list
# error which can be caught bye ignore_errors: true for that action.
# finally - if you want you can use it, in place to replace first_available_file:
# you simply cannot use the - files, path or skip options. simply replace
# first_available_file with with_first_found and leave the file listing in place
#
#
# - name: with_first_found like first_available_file
# action: copy src=$item dest=/tmp/faftest
# with_first_found:
# - ../files/foo
# - ../files/bar
# - ../files/baz
# ignore_errors: true
import os
from jinja2.exceptions import UndefinedError
from ansible.constants import mk_boolean as boolean
from ansible.errors import AnsibleFileNotFound, AnsibleLookupError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
anydict = False
skip = False
for term in terms:
if isinstance(term, dict):
anydict = True
total_search = []
if anydict:
for term in terms:
if isinstance(term, dict):
files = term.get('files', [])
paths = term.get('paths', [])
skip = boolean(term.get('skip', False))
filelist = files
if isinstance(files, string_types):
files = files.replace(',', ' ')
files = files.replace(';', ' ')
filelist = files.split(' ')
pathlist = paths
if paths:
if isinstance(paths, string_types):
paths = paths.replace(',', ' ')
paths = paths.replace(':', ' ')
paths = paths.replace(';', ' ')
pathlist = paths.split(' ')
if not pathlist:
total_search = filelist
else:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
else:
total_search.append(term)
else:
total_search = self._flatten(terms)
for fn in total_search:
try:
fn = self._templar.template(fn)
except (AnsibleUndefinedVariable, UndefinedError):
continue
# get subdir if set by task executor, default to files otherwise
subdir = getattr(self, '_subdir', 'files')
path = None
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
if path is not None:
return [path]
else:
if skip:
return []
else:
raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
"files are found")
| gpl-3.0 |
Axiologue/DjangoRestMultipleModels | drf_multiple_model/mixins.py | 1 | 12380 | import warnings
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from rest_framework.response import Response
class BaseMultipleModelMixin(object):
"""
Base class that holds functions need for all MultipleModelMixins/Views
"""
querylist = None
# Keys required for every item in a querylist
required_keys = ['queryset', 'serializer_class']
# default pagination state. Gets overridden if pagination is active
is_paginated = False
def get_querylist(self):
assert self.querylist is not None, (
'{} should either include a `querylist` attribute, '
'or override the `get_querylist()` method.'.format(
self.__class__.__name__
)
)
return self.querylist
def check_query_data(self, query_data):
"""
All items in a `querylist` must at least have `queryset` key and a
`serializer_class` key. Any querylist item lacking both those keys
will raise a ValidationError
"""
for key in self.required_keys:
if key not in query_data:
raise ValidationError(
'All items in the {} querylist attribute should contain a '
'`{}` key'.format(self.__class__.__name__, key)
)
def load_queryset(self, query_data, request, *args, **kwargs):
"""
Fetches the queryset and runs any necessary filtering, both
built-in rest_framework filters and custom filters passed into
the querylist
"""
queryset = query_data.get('queryset', [])
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
# run rest_framework filters
queryset = self.filter_queryset(queryset)
# run custom filters
filter_fn = query_data.get('filter_fn', None)
if filter_fn is not None:
queryset = filter_fn(queryset, request, *args, **kwargs)
page = self.paginate_queryset(queryset)
self.is_paginated = page is not None
return page if page is not None else queryset
def get_empty_results(self):
"""
Because the base result type is different depending on the return structure
(e.g. list for flat, dict for object), `get_result_type` initials the
`results` variable to the proper type
"""
assert self.result_type is not None, (
'{} must specify a `result_type` value or overwrite the '
'`get_empty_result` method.'.format(self.__class__.__name__)
)
return self.result_type()
def add_to_results(self, data, label, results):
"""
responsible for updating the running `results` variable with the
data from this queryset/serializer combo
"""
raise NotImplementedError(
'{} must specify how to add data to the running results tally '
'by overriding the `add_to_results` method.'.format(
self.__class__.__name__
)
)
def format_results(self, results, request):
"""
hook for processing/formatting the entire returned data set, once
the querylist has been evaluated
"""
return results
def list(self, request, *args, **kwargs):
querylist = self.get_querylist()
results = self.get_empty_results()
for query_data in querylist:
self.check_query_data(query_data)
queryset = self.load_queryset(query_data, request, *args, **kwargs)
# Run the paired serializer
context = self.get_serializer_context()
data = query_data['serializer_class'](queryset, many=True, context=context).data
label = self.get_label(queryset, query_data)
# Add the serializer data to the running results tally
results = self.add_to_results(data, label, results)
formatted_results = self.format_results(results, request)
if self.is_paginated:
try:
formatted_results = self.paginator.format_response(formatted_results)
except AttributeError:
raise NotImplementedError(
"{} cannot use the regular Rest Framework or Django paginators as is. "
"Use one of the included paginators from `drf_multiple_models.pagination "
"or subclass a paginator to add the `format_response` method."
"".format(self.__class__.__name__)
)
return Response(formatted_results)
class FlatMultipleModelMixin(BaseMultipleModelMixin):
"""
Create a List of objects from multiple models/serializers.
Mixin is expecting the view will have a querylist variable, which is
a list/tuple of dicts containing, at mininum, a `queryset` key and a
`serializer_class` key, as below:
queryList = [
{'queryset': MyModalA.objects.all(), 'serializer_class': MyModelASerializer ),
{'queryset': MyModalB.objects.all(), 'serializer_class': MyModelBSerializer ),
{'queryset': MyModalC.objects.all(), 'serializer_class': MyModelCSerializer ),
.....
]
This mixin returns a list of serialized data merged together in a single list, eg:
[
{ 'id': 1, 'type': 'myModelA', 'title': 'some_object' },
{ 'id': 4, 'type': 'myModelB', 'title': 'some_other_object' },
{ 'id': 8, 'type': 'myModelA', 'title': 'anotherother_object' },
...
]
"""
# Optional keyword to sort flat lasts by given attribute
# note that the attribute must by shared by ALL models
sorting_field = None
sorting_fields = None
# A mapping, similar to Django's `OrderingFilter`. In the following format: {parameter name: result field name}
# If request query param contains sorting parameter (by default - 'o'), result will be sorted by this parameter.
# Django-like model lookups are supported via '__', but you have to be sure that all querysets will return results
# with corresponding structure.
sorting_fields_map = {}
sorting_parameter_name = 'o'
# Flag to append the particular django model being used to the data
add_model_type = True
result_type = list
_list_attribute_error = 'Invalid sorting field. Corresponding data item is a list: {}'
def initial(self, request, *args, **kwargs):
"""
Overrides DRF's `initial` in order to set the `_sorting_field` from corresponding property in view.
Protected property is required in order to support overriding of `sorting_field` via `@property`, we do this
after original `initial` has been ran in order to make sure that view has all its properties set up.
"""
super(FlatMultipleModelMixin, self).initial(request, *args, **kwargs)
assert not (self.sorting_field and self.sorting_fields), \
'{} should either define ``sorting_field`` or ``sorting_fields`` property, not both.' \
.format(self.__class__.__name__)
if self.sorting_field:
warnings.warn(
'``sorting_field`` property is pending its deprecation. Use ``sorting_fields`` instead.',
DeprecationWarning
)
self.sorting_fields = [self.sorting_field]
self._sorting_fields = self.sorting_fields
def get_label(self, queryset, query_data):
"""
Gets option label for each datum. Can be used for type identification
of individual serialized objects
"""
if query_data.get('label', False):
return query_data['label']
elif self.add_model_type:
try:
return queryset.model.__name__
except AttributeError:
return query_data['queryset'].model.__name__
def add_to_results(self, data, label, results):
"""
Adds the label to the results, as needed, then appends the data
to the running results tab
"""
for datum in data:
if label is not None:
datum.update({'type': label})
results.append(datum)
return results
def format_results(self, results, request):
"""
Prepares sorting parameters, and sorts results, if(as) necessary
"""
self.prepare_sorting_fields()
if self._sorting_fields:
results = self.sort_results(results)
if request.accepted_renderer.format == 'html':
# Makes the the results available to the template context by transforming to a dict
results = {'data': results}
return results
def _sort_by(self, datum, param, path=None):
"""
Key function that is used for results sorting. This is passed as argument to `sorted()`
"""
if not path:
path = []
try:
if '__' in param:
root, new_param = param.split('__')
path.append(root)
return self._sort_by(datum[root], param=new_param, path=path)
else:
path.append(param)
data = datum[param]
if isinstance(data, list):
raise ValidationError(self._list_attribute_error.format(param))
return data
except TypeError:
raise ValidationError(self._list_attribute_error.format('.'.join(path)))
except KeyError:
raise ValidationError('Invalid sorting field: {}'.format('.'.join(path)))
def prepare_sorting_fields(self):
"""
Determine sorting direction and sorting field based on request query parameters and sorting options
of self
"""
if self.sorting_parameter_name in self.request.query_params:
# Extract sorting parameter from query string
self._sorting_fields = [
_.strip() for _ in self.request.query_params.get(self.sorting_parameter_name).split(',')
]
if self._sorting_fields:
# Create a list of sorting parameters. Each parameter is a tuple: (field:str, descending:bool)
self._sorting_fields = [
(self.sorting_fields_map.get(field.lstrip('-'), field.lstrip('-')), field[0] == '-')
for field in self._sorting_fields
]
def sort_results(self, results):
for field, descending in reversed(self._sorting_fields):
results = sorted(
results,
reverse=descending,
key=lambda x: self._sort_by(x, field)
)
return results
class ObjectMultipleModelMixin(BaseMultipleModelMixin):
"""
Create a Dictionary of objects from multiple models/serializers.
Mixin is expecting the view will have a querylist variable, which is
a list/tuple of dicts containing, at mininum, a `queryset` key and a
`serializer_class` key, as below:
queryList = [
{'queryset': MyModalA.objects.all(), 'serializer_class': MyModelASerializer ),
{'queryset': MyModalB.objects.all(), 'serializer_class': MyModelBSerializer ),
{'queryset': MyModalC.objects.all(), 'serializer_class': MyModelCSerializer ),
...
]
This mixin returns a dictionary of serialized data separated by object type, e.g.:
{
'MyModelA': [
{ 'id': 1, 'type': 'myModelA', 'title': 'some_object' },
{ 'id': 8, 'type': 'myModelA', 'title': 'anotherother_object' },
...
],
'MyModelB': [
{ 'id': 4, 'type': 'myModelB', 'title': 'some_other_object' },
...
]
...
}
"""
result_type = dict
def add_to_results(self, data, label, results):
results[label] = data
return results
def get_label(self, queryset, query_data):
"""
Gets option label for each datum. Can be used for type identification
of individual serialized objects
"""
if query_data.get('label', False):
return query_data['label']
try:
return queryset.model.__name__
except AttributeError:
return query_data['queryset'].model.__name__
| mit |
knossos-project/PythonQt | examples/NicePyConsole/pygments/formatters/svg.py | 76 | 5840 | # -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| lgpl-2.1 |
ansible/ansible | test/units/executor/test_task_queue_manager_callbacks.py | 68 | 4502 | # (c) 2016, Steve Kuznetsov <skuznets@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
from units.compat import unittest
from units.compat.mock import MagicMock
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
from ansible.plugins.callback import CallbackBase
from ansible.utils import context_objects as co
__metaclass__ = type
class TestTaskQueueManagerCallbacks(unittest.TestCase):
def setUp(self):
inventory = MagicMock()
variable_manager = MagicMock()
loader = MagicMock()
passwords = []
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
self._tqm = TaskQueueManager(inventory, variable_manager, loader, passwords)
self._playbook = Playbook(loader)
# we use a MagicMock to register the result of the call we
# expect to `v2_playbook_on_call`. We don't mock out the
# method since we're testing code that uses `inspect` to
# look at that method's argspec and we want to ensure this
# test is easy to reason about.
self._register = MagicMock()
def tearDown(self):
# Reset the stored command line args
co.GlobalCLIArgs._Singleton__instance = None
def test_task_queue_manager_callbacks_v2_playbook_on_start(self):
"""
Assert that no exceptions are raised when sending a Playbook
start callback to a current callback module plugin.
"""
register = self._register
class CallbackModule(CallbackBase):
"""
This is a callback module with the current
method signature for `v2_playbook_on_start`.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'current_module'
def v2_playbook_on_start(self, playbook):
register(self, playbook)
callback_module = CallbackModule()
self._tqm._callback_plugins.append(callback_module)
self._tqm.send_callback('v2_playbook_on_start', self._playbook)
register.assert_called_once_with(callback_module, self._playbook)
def test_task_queue_manager_callbacks_v2_playbook_on_start_wrapped(self):
"""
Assert that no exceptions are raised when sending a Playbook
start callback to a wrapped current callback module plugin.
"""
register = self._register
def wrap_callback(func):
"""
This wrapper changes the exposed argument
names for a method from the original names
to (*args, **kwargs). This is used in order
to validate that wrappers which change par-
ameter names do not break the TQM callback
system.
:param func: function to decorate
:return: decorated function
"""
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class WrappedCallbackModule(CallbackBase):
"""
This is a callback module with the current
method signature for `v2_playbook_on_start`
wrapped in order to change the signature.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'current_module'
@wrap_callback
def v2_playbook_on_start(self, playbook):
register(self, playbook)
callback_module = WrappedCallbackModule()
self._tqm._callback_plugins.append(callback_module)
self._tqm.send_callback('v2_playbook_on_start', self._playbook)
register.assert_called_once_with(callback_module, self._playbook)
| gpl-3.0 |
agermanidis/autosub | setup.py | 1 | 1496 | #!/usr/bin/env python
from __future__ import unicode_literals
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
long_description = (
'Autosub is a utility for automatic speech recognition and subtitle generation. '
'It takes a video or an audio file as input, performs voice activity detection '
'to find speech regions, makes parallel requests to Google Web Speech API to '
'generate transcriptions for those regions, (optionally) translates them to a '
'different language, and finally saves the resulting subtitles to disk. '
'It supports a variety of input and output languages (to see which, run the '
'utility with --list-src-languages and --list-dst-languages as arguments '
'respectively) and can currently produce subtitles in either the SRT format or '
'simple JSON.'
)
setup(
name='autosub',
version='0.4.0',
description='Auto-generates subtitles for any video or audio file',
long_description=long_description,
author='Anastasis Germanidis',
author_email='agermanidis@gmail.com',
url='https://github.com/agermanidis/autosub',
packages=['autosub'],
entry_points={
'console_scripts': [
'autosub = autosub:main',
],
},
install_requires=[
'google-api-python-client>=1.4.2',
'requests>=2.3.0',
'pysrt>=1.0.1',
'progressbar2>=3.34.3',
'six>=1.11.0',
],
license=open("LICENSE").read()
)
| mit |
deepakselvaraj/federated-horizon | openstack_dashboard/dashboards/project/overview/views.py | 2 | 2120 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import capfirst # noqa
from django.template.defaultfilters import floatformat # noqa
from django.utils.translation import ugettext as _ # noqa
from django.views.generic import TemplateView # noqa
from openstack_dashboard import usage
from openstack_dashboard.usage import base
class ProjectUsageCsvRenderer(base.BaseCsvResponse):
columns = [_("Instance Name"), _("VCPUs"), _("Ram (MB)"),
_("Disk (GB)"), _("Usage (Hours)"),
_("Uptime(Seconds)"), _("State")]
def get_row_data(self):
for inst in self.context['usage'].get_instances():
yield (inst['name'],
inst['vcpus'],
inst['memory_mb'],
inst['local_gb'],
floatformat(inst['hours'], 2),
inst['uptime'],
capfirst(inst['state']))
class ProjectOverview(usage.UsageView):
table_class = usage.ProjectUsageTable
usage_class = usage.ProjectUsage
template_name = 'project/overview/usage.html'
csv_response_class = ProjectUsageCsvRenderer
def get_data(self):
super(ProjectOverview, self).get_data()
return self.usage.get_instances()
class WarningView(TemplateView):
template_name = "project/_warning.html"
| apache-2.0 |
rapilabs/django | tests/template_tests/templatetags/custom.py | 161 | 5408 | import operator
import warnings
from django import template
from django.template.defaultfilters import stringfilter
from django.utils import six
from django.utils.html import escape, format_html
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.filter
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value
@register.simple_tag(takes_context=True)
def context_stack_length(context):
return len(context.dicts)
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args))
)
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def escape_naive(context):
"""A tag that doesn't even think about escaping issues"""
return "Hello {0}!".format(context['name'])
@register.simple_tag(takes_context=True)
def escape_explicit(context):
"""A tag that uses escape explicitly"""
return escape("Hello {0}!".format(context['name']))
@register.simple_tag(takes_context=True)
def escape_format_html(context):
"""A tag that uses format_html"""
return format_html("Hello {0}!", context['name'])
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
| bsd-3-clause |
bristy/login_app_flask | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 304 | 4273 | from collections import namedtuple
from ..exceptions import LocationParseError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| mit |
aifil/odoo | addons/event/models/event_mail.py | 33 | 5792 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp import api, fields, models, tools
_INTERVALS = {
'hours': lambda interval: relativedelta(hours=interval),
'days': lambda interval: relativedelta(days=interval),
'weeks': lambda interval: relativedelta(days=7*interval),
'months': lambda interval: relativedelta(months=interval),
'now': lambda interval: relativedelta(hours=0),
}
class EventMailScheduler(models.Model):
""" Event automated mailing. This model replaces all existing fields and
configuration allowing to send emails on events since Odoo 9. A cron exists
that periodically checks for mailing to run. """
_name = 'event.mail'
event_id = fields.Many2one('event.event', string='Event', required=True, ondelete='cascade')
sequence = fields.Integer('Display order')
interval_nbr = fields.Integer('Interval', default=1)
interval_unit = fields.Selection([
('now', 'Immediately'),
('hours', 'Hour(s)'), ('days', 'Day(s)'),
('weeks', 'Week(s)'), ('months', 'Month(s)')],
string='Unit', default='hours', required=True)
interval_type = fields.Selection([
('after_sub', 'After each subscription'),
('before_event', 'Before the event'),
('after_event', 'After the event')],
string='When to Run ', default="before_event", required=True)
template_id = fields.Many2one(
'mail.template', string='Email to Send',
domain=[('model', '=', 'event.registration')], required=True, ondelete='restrict',
help='This field contains the template of the mail that will be automatically sent')
scheduled_date = fields.Datetime('Scheduled Sent Mail', compute='_compute_scheduled_date', store=True)
mail_registration_ids = fields.One2many('event.mail.registration', 'scheduler_id')
mail_sent = fields.Boolean('Mail Sent on Event')
done = fields.Boolean('Sent', compute='_compute_done', store=True)
@api.one
@api.depends('mail_sent', 'interval_type', 'event_id.registration_ids', 'mail_registration_ids')
def _compute_done(self):
if self.interval_type in ['before_event', 'after_event']:
self.done = self.mail_sent
else:
self.done = len(self.mail_registration_ids) == len(self.event_id.registration_ids) and all(filter(lambda line: line.mail_sent, self.mail_registration_ids))
@api.one
@api.depends('event_id.state', 'event_id.date_begin', 'interval_type', 'interval_unit', 'interval_nbr')
def _compute_scheduled_date(self):
if self.event_id.state not in ['confirm', 'done']:
self.scheduled_date = False
else:
if self.interval_type == 'after_sub':
date, sign = self.event_id.create_date, 1
elif self.interval_type == 'before_event':
date, sign = self.event_id.date_begin, -1
else:
date, sign = self.event_id.date_end, 1
self.scheduled_date = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT) + _INTERVALS[self.interval_unit](sign * self.interval_nbr)
@api.one
def execute(self):
if self.interval_type == 'after_sub':
# update registration lines
lines = []
for registration in filter(lambda item: item not in [mail_reg.registration_id for mail_reg in self.mail_registration_ids], self.event_id.registration_ids):
lines.append((0, 0, {'registration_id': registration.id}))
if lines:
self.write({'mail_registration_ids': lines})
# execute scheduler on registrations
self.mail_registration_ids.filtered(lambda reg: reg.scheduled_date and reg.scheduled_date <= datetime.strftime(fields.datetime.now(), tools.DEFAULT_SERVER_DATETIME_FORMAT)).execute()
else:
if not self.mail_sent:
self.event_id.mail_attendees(self.template_id.id)
self.write({'mail_sent': True})
return True
@api.model
def run(self, autocommit=False):
schedulers = self.search([('done', '=', False), ('scheduled_date', '<=', datetime.strftime(fields.datetime.now(), tools.DEFAULT_SERVER_DATETIME_FORMAT))])
for scheduler in schedulers:
scheduler.execute()
if autocommit:
self.env.cr.commit()
return True
class EventMailRegistration(models.Model):
_name = 'event.mail.registration'
_description = 'Registration Mail Scheduler'
_rec_name = 'scheduler_id'
_order = 'scheduled_date DESC'
scheduler_id = fields.Many2one('event.mail', 'Mail Scheduler', required=True, ondelete='cascade')
registration_id = fields.Many2one('event.registration', 'Attendee', required=True, ondelete='cascade')
scheduled_date = fields.Datetime('Scheduled Time', compute='_compute_scheduled_date', store=True)
mail_sent = fields.Boolean('Mail Sent')
@api.one
def execute(self):
if self.registration_id.state in ['open', 'done'] and not self.mail_sent:
self.scheduler_id.template_id.send_mail(self.registration_id.id)
self.write({'mail_sent': True})
@api.one
@api.depends('registration_id', 'scheduler_id.interval_unit', 'scheduler_id.interval_type')
def _compute_scheduled_date(self):
if self.registration_id:
date_open = self.registration_id.date_open
date_open_datetime = date_open and datetime.strptime(date_open, tools.DEFAULT_SERVER_DATETIME_FORMAT) or fields.datetime.now()
self.scheduled_date = date_open_datetime + _INTERVALS[self.scheduler_id.interval_unit](self.scheduler_id.interval_nbr)
else:
self.scheduled_date = False
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.